file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
stack.rs
// Copyright 2017-2018 the authors. See the 'Copyright and license' section of the // README.md file at the top-level directory of this repository. // // Licensed under the Apache License, Version 2.0 (the LICENSE-APACHE file) or // the MIT license (the LICENSE-MIT file) at your option. This file may not be // copied, modified, or distributed except according to those terms. // So clippy doesn't complain that IllumOS isn't in tick marks #![cfg_attr(feature = "cargo-clippy", allow(doc_markdown))] //! A generic stack-based slab. //! //! This module implements a generic slab which uses an inline header and a stack of object //! pointers in place of a free list. It differs from both the large and small slab algorithms //! introduced by Bonwick. Bonwick's large slabs used a separately-allocated header and a free list //! of separately-allocated control objects (`kmem_bufctl`s in the IllumOS implementation). //! Bonwick's small slabs used an inline header like the present implementation, but used a free //! list constructed from headers on each object instead of a stack of pointers. //! //! This implementation is generic in that it does not prescribe a method for mapping objects to //! their containing slabs, but instead requires that an implementation of this functionality be //! provided (see the `ConfigData` trait). The `aligned` module implements this functionality by //! ensuring that slabs have an alignment equal to their size, and using this to compute a bit mask //! for objects in the slab. The `large` module implements this functionality by storing object //! pointers or page addresses in an allocator-global hash table. //! //! # Layout //! The layout of stack-based slabs is somewhat confusing, and not readily obvious from the code. //! This is due largely to the fact that slab size cannot be known at compile time, and must //! instead be computed at runtime. Why this is a problem will become apparent shortly. //! //! The layout in memory of stack-based slabs is as follows: //! //! ```text //! <header> <pre-stack padding> <stack> <post-stack padding> <array of objects> //! ``` //! //! The following requirements must be met with respect to memory layout: //! //! * The stack - which is an array of `usize` - must be aligned according to the alignment //! required by `usize` //! * The array of objects must be aligned according to the alignment requested by the user. //! //! The first requirement implies that there may need to be some padding between the header and the //! stack. The second requirement implies that there may need to be some padding between the stack //! and the array of objects. //! //! If the number of objects in a slab could be known statically, the stack could simply be an //! array in the header. Instead, its size has to be computed dynamically, and thus cannot be a //! field in the header (it could technically be `[*mut T]`, but this would make querying the //! header's size more difficult). //! //! Instead, we use the `util::stack` module to implement a dynamically-sized stack, and to //! dynamically compute the proper pre-stack padding required in order to give the stack the proper //! alignment. We do the same for the post-stack padding in order to give the array of objects the //! proper alignment. use alloc::alloc; use core::ptr::NonNull; use core::{mem, ptr}; use init::InitSystem; use object_alloc::UntypedObjectAlloc; use util::color::{Color, ColorSettings}; use util::list::*; use util::stack::Stack; use SlabSystem; /// Configuration to customize a stack-based slab implementation. /// /// `ConfigData` completes the stack-based slab implementation by providing post-alloc and /// pre-dealloc hooks and by providing a mechanism to look up an object's containing slab. pub trait ConfigData where Self: Sized, { /// Perform per-slab post-allocation work. /// /// `post_alloc` is called after a newly-allocated slab has been initialized. It is optional, /// and defaults to a no-op. #[allow(unused)] fn post_alloc(&mut self, layout: &Layout, slab_size: usize, slab: NonNull<SlabHeader>) {} /// Perform per-slab pre-deallocation work. /// /// `pre_dealloc` is called before a slab is uninitialized and deallocated. It is optional, and /// defaults to a no-op. #[allow(unused)] fn pre_dealloc(&mut self, layout: &Layout, slab_size: usize, slab: NonNull<SlabHeader>) {} /// Look up an object's slab. /// /// Given an object, `ptr_to_slab` locates the slab containing that object. fn ptr_to_slab(&self, slab_size: usize, ptr: NonNull<u8>) -> NonNull<SlabHeader>; } pub struct System<A: UntypedObjectAlloc, C: ConfigData> { pub data: C, layout: Layout, alloc: A, } impl<A: UntypedObjectAlloc, C: ConfigData> System<A, C> { pub fn from_config_data(data: C, layout: Layout, alloc: A) -> System<A, C> { System { data: data, layout: layout, alloc: alloc, } } } impl<I: InitSystem, A: UntypedObjectAlloc, C: ConfigData> SlabSystem<I> for System<A, C> { type Slab = SlabHeader; fn alloc_slab(&mut self) -> Option<NonNull<SlabHeader>> { unsafe { let color = self .layout .color_settings .next_color(self.layout.layout.align()); let slab = self.alloc.alloc()?.cast(); ptr::write( slab.as_ptr(), SlabHeader { stack: Stack::new(), color: color, next: None, prev: None, }, ); let stack_data_ptr = self.layout.stack_begin(slab); for i in 0..self.layout.num_obj { let ptr = self.layout.nth_obj(slab, color, i); (*slab.as_ptr()) .stack .push(stack_data_ptr, I::pack(ptr, I::status_uninitialized())); } self.data .post_alloc(&self.layout, self.alloc.layout().size(), slab); Some(slab) } } fn dealloc_slab(&mut self, slab: NonNull<SlabHeader>) { unsafe { debug_assert_eq!((*slab.as_ptr()).stack.size(), self.layout.num_obj); self.data .pre_dealloc(&self.layout, self.alloc.layout().size(), slab); let stack_data_ptr = self.layout.stack_begin(slab); for _ in 0..self.layout.num_obj { let packed = (*slab.as_ptr()).stack.pop(stack_data_ptr); I::drop(I::unpack_ptr(packed), I::unpack_status(packed)); } self.alloc.dealloc(slab.cast()); } } fn is_full(&self, slab: NonNull<SlabHeader>) -> bool { unsafe { (*slab.as_ptr()).stack.size() == self.layout.num_obj } } fn is_empty(&self, slab: NonNull<SlabHeader>) -> bool { unsafe { (*slab.as_ptr()).stack.size() == 0 } } fn alloc(&self, slab: NonNull<SlabHeader>) -> (NonNull<u8>, I::Status) { unsafe { let stack_data_ptr = self.layout.stack_begin(slab); let packed = (*slab.as_ptr()).stack.pop(stack_data_ptr); (I::unpack_ptr(packed), I::unpack_status(packed)) } } fn dealloc(&self, obj: NonNull<u8>, init_status: I::Status) -> (NonNull<SlabHeader>, bool) { unsafe { let slab = self.data.ptr_to_slab(self.alloc.layout().size(), obj); let was_empty = (*slab.as_ptr()).stack.size() == 0; let stack_data_ptr = self.layout.stack_begin(slab); (*slab.as_ptr()) .stack .push(stack_data_ptr, I::pack(obj, init_status)); (slab, was_empty) } } } pub struct SlabHeader { stack: Stack<usize>, // note: this is only the metadata; the real stack comes after this header color: Color, // extra padding added before array beginning next: Option<NonNull<SlabHeader>>, prev: Option<NonNull<SlabHeader>>, } impl Linkable for SlabHeader { fn next(&self) -> Option<NonNull<SlabHeader>> { self.next } fn prev(&self) -> Option<NonNull<SlabHeader>> { self.prev } fn set_next(&mut self, next: Option<NonNull<SlabHeader>>) { self.next = next; } fn set_prev(&mut self, prev: Option<NonNull<SlabHeader>>) { self.prev = prev; } } impl SlabHeader { pub fn get_color(&self) -> Color { self.color } } #[derive(Clone)] pub struct Layout { pub num_obj: usize, pub layout: alloc::Layout, pub stack_begin_offset: usize, pub array_begin_offset: usize, pub color_settings: ColorSettings, } impl Layout { /// Determines whether an allocator can be constructed for T using the given slab size. If so, /// it returns a constructed Layout for T using that slab size and the amount of unused space /// left at the end of the slab (when no coloring is used). pub fn for_slab_size(layout: alloc::Layout, slab_size: usize) -> Option<(Layout, usize)> { let obj_size = layout.size(); let obj_align = layout.align(); let hdr_size = mem::size_of::<SlabHeader>(); // padding between the SlabHeader and the base of the pointer stack let pre_stack_padding = Stack::<usize>::padding_after(hdr_size);
loop { let candidate = num_obj + 1; // total_hdr_size = size of header, post-header padding, and stack let total_hdr_size = stack_begin_offset + Stack::<usize>::bytes_for(candidate); // Padding between the pointer stack and the array of objects. NOTE: // The Layout alignment isn't used here, so we use 1 because it's // guaranteed not to cause from_size_align to return None. let post_stack_padding = alloc::Layout::from_size_align(total_hdr_size, 1) .unwrap() .padding_needed_for(obj_align); if total_hdr_size + post_stack_padding + (candidate * obj_size) <= slab_size { num_obj = candidate; array_begin_offset = total_hdr_size + post_stack_padding; } else { break; } } if num_obj == 0 { return None; } assert!(array_begin_offset > 0); let unused_space = slab_size - array_begin_offset - (num_obj * obj_size); let l = Layout { num_obj: num_obj, layout: layout, stack_begin_offset: stack_begin_offset, array_begin_offset: array_begin_offset, color_settings: ColorSettings::new(obj_align, unused_space), }; // assert that the objects fit within the slab assert!( slab_size >= l.array_begin_offset + l.color_settings.max_color().as_usize() + (l.num_obj * obj_size) ); Some((l, unused_space)) } fn array_begin(&self, slab: NonNull<SlabHeader>, color: Color) -> NonNull<u8> { debug_assert!(color.as_usize() <= self.color_settings.max_color().as_usize()); unsafe { NonNull::new_unchecked( ((slab.as_ptr() as usize) + self.array_begin_offset + color.as_usize()) as *mut u8, ) } } fn stack_begin(&self, slab: NonNull<SlabHeader>) -> NonNull<usize> { unsafe { NonNull::new_unchecked( ((slab.as_ptr() as usize) + self.stack_begin_offset) as *mut usize, ) } } pub fn nth_obj(&self, slab: NonNull<SlabHeader>, color: Color, n: usize) -> NonNull<u8> { debug_assert!((n as usize) < self.num_obj); unsafe { NonNull::new_unchecked( (self.array_begin(slab, color).as_ptr() as usize + n * self.layout.size()) as *mut u8, ) } } }
let stack_begin_offset = hdr_size + pre_stack_padding; // Find the largest number of objects we can fit in the slab. array_begin_offset is the // offset from the beginning of the slab of the array of objects. let (mut num_obj, mut array_begin_offset) = (0, 0);
random_line_split
stack.rs
// Copyright 2017-2018 the authors. See the 'Copyright and license' section of the // README.md file at the top-level directory of this repository. // // Licensed under the Apache License, Version 2.0 (the LICENSE-APACHE file) or // the MIT license (the LICENSE-MIT file) at your option. This file may not be // copied, modified, or distributed except according to those terms. // So clippy doesn't complain that IllumOS isn't in tick marks #![cfg_attr(feature = "cargo-clippy", allow(doc_markdown))] //! A generic stack-based slab. //! //! This module implements a generic slab which uses an inline header and a stack of object //! pointers in place of a free list. It differs from both the large and small slab algorithms //! introduced by Bonwick. Bonwick's large slabs used a separately-allocated header and a free list //! of separately-allocated control objects (`kmem_bufctl`s in the IllumOS implementation). //! Bonwick's small slabs used an inline header like the present implementation, but used a free //! list constructed from headers on each object instead of a stack of pointers. //! //! This implementation is generic in that it does not prescribe a method for mapping objects to //! their containing slabs, but instead requires that an implementation of this functionality be //! provided (see the `ConfigData` trait). The `aligned` module implements this functionality by //! ensuring that slabs have an alignment equal to their size, and using this to compute a bit mask //! for objects in the slab. The `large` module implements this functionality by storing object //! pointers or page addresses in an allocator-global hash table. //! //! # Layout //! The layout of stack-based slabs is somewhat confusing, and not readily obvious from the code. //! This is due largely to the fact that slab size cannot be known at compile time, and must //! instead be computed at runtime. Why this is a problem will become apparent shortly. //! //! The layout in memory of stack-based slabs is as follows: //! //! ```text //! <header> <pre-stack padding> <stack> <post-stack padding> <array of objects> //! ``` //! //! The following requirements must be met with respect to memory layout: //! //! * The stack - which is an array of `usize` - must be aligned according to the alignment //! required by `usize` //! * The array of objects must be aligned according to the alignment requested by the user. //! //! The first requirement implies that there may need to be some padding between the header and the //! stack. The second requirement implies that there may need to be some padding between the stack //! and the array of objects. //! //! If the number of objects in a slab could be known statically, the stack could simply be an //! array in the header. Instead, its size has to be computed dynamically, and thus cannot be a //! field in the header (it could technically be `[*mut T]`, but this would make querying the //! header's size more difficult). //! //! Instead, we use the `util::stack` module to implement a dynamically-sized stack, and to //! dynamically compute the proper pre-stack padding required in order to give the stack the proper //! alignment. We do the same for the post-stack padding in order to give the array of objects the //! proper alignment. use alloc::alloc; use core::ptr::NonNull; use core::{mem, ptr}; use init::InitSystem; use object_alloc::UntypedObjectAlloc; use util::color::{Color, ColorSettings}; use util::list::*; use util::stack::Stack; use SlabSystem; /// Configuration to customize a stack-based slab implementation. /// /// `ConfigData` completes the stack-based slab implementation by providing post-alloc and /// pre-dealloc hooks and by providing a mechanism to look up an object's containing slab. pub trait ConfigData where Self: Sized, { /// Perform per-slab post-allocation work. /// /// `post_alloc` is called after a newly-allocated slab has been initialized. It is optional, /// and defaults to a no-op. #[allow(unused)] fn post_alloc(&mut self, layout: &Layout, slab_size: usize, slab: NonNull<SlabHeader>) {} /// Perform per-slab pre-deallocation work. /// /// `pre_dealloc` is called before a slab is uninitialized and deallocated. It is optional, and /// defaults to a no-op. #[allow(unused)] fn pre_dealloc(&mut self, layout: &Layout, slab_size: usize, slab: NonNull<SlabHeader>) {} /// Look up an object's slab. /// /// Given an object, `ptr_to_slab` locates the slab containing that object. fn ptr_to_slab(&self, slab_size: usize, ptr: NonNull<u8>) -> NonNull<SlabHeader>; } pub struct System<A: UntypedObjectAlloc, C: ConfigData> { pub data: C, layout: Layout, alloc: A, } impl<A: UntypedObjectAlloc, C: ConfigData> System<A, C> { pub fn from_config_data(data: C, layout: Layout, alloc: A) -> System<A, C> { System { data: data, layout: layout, alloc: alloc, } } } impl<I: InitSystem, A: UntypedObjectAlloc, C: ConfigData> SlabSystem<I> for System<A, C> { type Slab = SlabHeader; fn alloc_slab(&mut self) -> Option<NonNull<SlabHeader>> { unsafe { let color = self .layout .color_settings .next_color(self.layout.layout.align()); let slab = self.alloc.alloc()?.cast(); ptr::write( slab.as_ptr(), SlabHeader { stack: Stack::new(), color: color, next: None, prev: None, }, ); let stack_data_ptr = self.layout.stack_begin(slab); for i in 0..self.layout.num_obj { let ptr = self.layout.nth_obj(slab, color, i); (*slab.as_ptr()) .stack .push(stack_data_ptr, I::pack(ptr, I::status_uninitialized())); } self.data .post_alloc(&self.layout, self.alloc.layout().size(), slab); Some(slab) } } fn dealloc_slab(&mut self, slab: NonNull<SlabHeader>) { unsafe { debug_assert_eq!((*slab.as_ptr()).stack.size(), self.layout.num_obj); self.data .pre_dealloc(&self.layout, self.alloc.layout().size(), slab); let stack_data_ptr = self.layout.stack_begin(slab); for _ in 0..self.layout.num_obj { let packed = (*slab.as_ptr()).stack.pop(stack_data_ptr); I::drop(I::unpack_ptr(packed), I::unpack_status(packed)); } self.alloc.dealloc(slab.cast()); } } fn is_full(&self, slab: NonNull<SlabHeader>) -> bool { unsafe { (*slab.as_ptr()).stack.size() == self.layout.num_obj } } fn is_empty(&self, slab: NonNull<SlabHeader>) -> bool { unsafe { (*slab.as_ptr()).stack.size() == 0 } } fn alloc(&self, slab: NonNull<SlabHeader>) -> (NonNull<u8>, I::Status) { unsafe { let stack_data_ptr = self.layout.stack_begin(slab); let packed = (*slab.as_ptr()).stack.pop(stack_data_ptr); (I::unpack_ptr(packed), I::unpack_status(packed)) } } fn dealloc(&self, obj: NonNull<u8>, init_status: I::Status) -> (NonNull<SlabHeader>, bool) { unsafe { let slab = self.data.ptr_to_slab(self.alloc.layout().size(), obj); let was_empty = (*slab.as_ptr()).stack.size() == 0; let stack_data_ptr = self.layout.stack_begin(slab); (*slab.as_ptr()) .stack .push(stack_data_ptr, I::pack(obj, init_status)); (slab, was_empty) } } } pub struct SlabHeader { stack: Stack<usize>, // note: this is only the metadata; the real stack comes after this header color: Color, // extra padding added before array beginning next: Option<NonNull<SlabHeader>>, prev: Option<NonNull<SlabHeader>>, } impl Linkable for SlabHeader { fn next(&self) -> Option<NonNull<SlabHeader>> { self.next } fn prev(&self) -> Option<NonNull<SlabHeader>> { self.prev } fn set_next(&mut self, next: Option<NonNull<SlabHeader>>) { self.next = next; } fn set_prev(&mut self, prev: Option<NonNull<SlabHeader>>) { self.prev = prev; } } impl SlabHeader { pub fn get_color(&self) -> Color { self.color } } #[derive(Clone)] pub struct Layout { pub num_obj: usize, pub layout: alloc::Layout, pub stack_begin_offset: usize, pub array_begin_offset: usize, pub color_settings: ColorSettings, } impl Layout { /// Determines whether an allocator can be constructed for T using the given slab size. If so, /// it returns a constructed Layout for T using that slab size and the amount of unused space /// left at the end of the slab (when no coloring is used). pub fn for_slab_size(layout: alloc::Layout, slab_size: usize) -> Option<(Layout, usize)> { let obj_size = layout.size(); let obj_align = layout.align(); let hdr_size = mem::size_of::<SlabHeader>(); // padding between the SlabHeader and the base of the pointer stack let pre_stack_padding = Stack::<usize>::padding_after(hdr_size); let stack_begin_offset = hdr_size + pre_stack_padding; // Find the largest number of objects we can fit in the slab. array_begin_offset is the // offset from the beginning of the slab of the array of objects. let (mut num_obj, mut array_begin_offset) = (0, 0); loop { let candidate = num_obj + 1; // total_hdr_size = size of header, post-header padding, and stack let total_hdr_size = stack_begin_offset + Stack::<usize>::bytes_for(candidate); // Padding between the pointer stack and the array of objects. NOTE: // The Layout alignment isn't used here, so we use 1 because it's // guaranteed not to cause from_size_align to return None. let post_stack_padding = alloc::Layout::from_size_align(total_hdr_size, 1) .unwrap() .padding_needed_for(obj_align); if total_hdr_size + post_stack_padding + (candidate * obj_size) <= slab_size
else { break; } } if num_obj == 0 { return None; } assert!(array_begin_offset > 0); let unused_space = slab_size - array_begin_offset - (num_obj * obj_size); let l = Layout { num_obj: num_obj, layout: layout, stack_begin_offset: stack_begin_offset, array_begin_offset: array_begin_offset, color_settings: ColorSettings::new(obj_align, unused_space), }; // assert that the objects fit within the slab assert!( slab_size >= l.array_begin_offset + l.color_settings.max_color().as_usize() + (l.num_obj * obj_size) ); Some((l, unused_space)) } fn array_begin(&self, slab: NonNull<SlabHeader>, color: Color) -> NonNull<u8> { debug_assert!(color.as_usize() <= self.color_settings.max_color().as_usize()); unsafe { NonNull::new_unchecked( ((slab.as_ptr() as usize) + self.array_begin_offset + color.as_usize()) as *mut u8, ) } } fn stack_begin(&self, slab: NonNull<SlabHeader>) -> NonNull<usize> { unsafe { NonNull::new_unchecked( ((slab.as_ptr() as usize) + self.stack_begin_offset) as *mut usize, ) } } pub fn nth_obj(&self, slab: NonNull<SlabHeader>, color: Color, n: usize) -> NonNull<u8> { debug_assert!((n as usize) < self.num_obj); unsafe { NonNull::new_unchecked( (self.array_begin(slab, color).as_ptr() as usize + n * self.layout.size()) as *mut u8, ) } } }
{ num_obj = candidate; array_begin_offset = total_hdr_size + post_stack_padding; }
conditional_block
stack.rs
// Copyright 2017-2018 the authors. See the 'Copyright and license' section of the // README.md file at the top-level directory of this repository. // // Licensed under the Apache License, Version 2.0 (the LICENSE-APACHE file) or // the MIT license (the LICENSE-MIT file) at your option. This file may not be // copied, modified, or distributed except according to those terms. // So clippy doesn't complain that IllumOS isn't in tick marks #![cfg_attr(feature = "cargo-clippy", allow(doc_markdown))] //! A generic stack-based slab. //! //! This module implements a generic slab which uses an inline header and a stack of object //! pointers in place of a free list. It differs from both the large and small slab algorithms //! introduced by Bonwick. Bonwick's large slabs used a separately-allocated header and a free list //! of separately-allocated control objects (`kmem_bufctl`s in the IllumOS implementation). //! Bonwick's small slabs used an inline header like the present implementation, but used a free //! list constructed from headers on each object instead of a stack of pointers. //! //! This implementation is generic in that it does not prescribe a method for mapping objects to //! their containing slabs, but instead requires that an implementation of this functionality be //! provided (see the `ConfigData` trait). The `aligned` module implements this functionality by //! ensuring that slabs have an alignment equal to their size, and using this to compute a bit mask //! for objects in the slab. The `large` module implements this functionality by storing object //! pointers or page addresses in an allocator-global hash table. //! //! # Layout //! The layout of stack-based slabs is somewhat confusing, and not readily obvious from the code. //! This is due largely to the fact that slab size cannot be known at compile time, and must //! instead be computed at runtime. Why this is a problem will become apparent shortly. //! //! The layout in memory of stack-based slabs is as follows: //! //! ```text //! <header> <pre-stack padding> <stack> <post-stack padding> <array of objects> //! ``` //! //! The following requirements must be met with respect to memory layout: //! //! * The stack - which is an array of `usize` - must be aligned according to the alignment //! required by `usize` //! * The array of objects must be aligned according to the alignment requested by the user. //! //! The first requirement implies that there may need to be some padding between the header and the //! stack. The second requirement implies that there may need to be some padding between the stack //! and the array of objects. //! //! If the number of objects in a slab could be known statically, the stack could simply be an //! array in the header. Instead, its size has to be computed dynamically, and thus cannot be a //! field in the header (it could technically be `[*mut T]`, but this would make querying the //! header's size more difficult). //! //! Instead, we use the `util::stack` module to implement a dynamically-sized stack, and to //! dynamically compute the proper pre-stack padding required in order to give the stack the proper //! alignment. We do the same for the post-stack padding in order to give the array of objects the //! proper alignment. use alloc::alloc; use core::ptr::NonNull; use core::{mem, ptr}; use init::InitSystem; use object_alloc::UntypedObjectAlloc; use util::color::{Color, ColorSettings}; use util::list::*; use util::stack::Stack; use SlabSystem; /// Configuration to customize a stack-based slab implementation. /// /// `ConfigData` completes the stack-based slab implementation by providing post-alloc and /// pre-dealloc hooks and by providing a mechanism to look up an object's containing slab. pub trait ConfigData where Self: Sized, { /// Perform per-slab post-allocation work. /// /// `post_alloc` is called after a newly-allocated slab has been initialized. It is optional, /// and defaults to a no-op. #[allow(unused)] fn post_alloc(&mut self, layout: &Layout, slab_size: usize, slab: NonNull<SlabHeader>) {} /// Perform per-slab pre-deallocation work. /// /// `pre_dealloc` is called before a slab is uninitialized and deallocated. It is optional, and /// defaults to a no-op. #[allow(unused)] fn pre_dealloc(&mut self, layout: &Layout, slab_size: usize, slab: NonNull<SlabHeader>) {} /// Look up an object's slab. /// /// Given an object, `ptr_to_slab` locates the slab containing that object. fn ptr_to_slab(&self, slab_size: usize, ptr: NonNull<u8>) -> NonNull<SlabHeader>; } pub struct System<A: UntypedObjectAlloc, C: ConfigData> { pub data: C, layout: Layout, alloc: A, } impl<A: UntypedObjectAlloc, C: ConfigData> System<A, C> { pub fn from_config_data(data: C, layout: Layout, alloc: A) -> System<A, C> { System { data: data, layout: layout, alloc: alloc, } } } impl<I: InitSystem, A: UntypedObjectAlloc, C: ConfigData> SlabSystem<I> for System<A, C> { type Slab = SlabHeader; fn alloc_slab(&mut self) -> Option<NonNull<SlabHeader>> { unsafe { let color = self .layout .color_settings .next_color(self.layout.layout.align()); let slab = self.alloc.alloc()?.cast(); ptr::write( slab.as_ptr(), SlabHeader { stack: Stack::new(), color: color, next: None, prev: None, }, ); let stack_data_ptr = self.layout.stack_begin(slab); for i in 0..self.layout.num_obj { let ptr = self.layout.nth_obj(slab, color, i); (*slab.as_ptr()) .stack .push(stack_data_ptr, I::pack(ptr, I::status_uninitialized())); } self.data .post_alloc(&self.layout, self.alloc.layout().size(), slab); Some(slab) } } fn dealloc_slab(&mut self, slab: NonNull<SlabHeader>) { unsafe { debug_assert_eq!((*slab.as_ptr()).stack.size(), self.layout.num_obj); self.data .pre_dealloc(&self.layout, self.alloc.layout().size(), slab); let stack_data_ptr = self.layout.stack_begin(slab); for _ in 0..self.layout.num_obj { let packed = (*slab.as_ptr()).stack.pop(stack_data_ptr); I::drop(I::unpack_ptr(packed), I::unpack_status(packed)); } self.alloc.dealloc(slab.cast()); } } fn is_full(&self, slab: NonNull<SlabHeader>) -> bool { unsafe { (*slab.as_ptr()).stack.size() == self.layout.num_obj } } fn is_empty(&self, slab: NonNull<SlabHeader>) -> bool { unsafe { (*slab.as_ptr()).stack.size() == 0 } } fn alloc(&self, slab: NonNull<SlabHeader>) -> (NonNull<u8>, I::Status) { unsafe { let stack_data_ptr = self.layout.stack_begin(slab); let packed = (*slab.as_ptr()).stack.pop(stack_data_ptr); (I::unpack_ptr(packed), I::unpack_status(packed)) } } fn dealloc(&self, obj: NonNull<u8>, init_status: I::Status) -> (NonNull<SlabHeader>, bool)
} pub struct SlabHeader { stack: Stack<usize>, // note: this is only the metadata; the real stack comes after this header color: Color, // extra padding added before array beginning next: Option<NonNull<SlabHeader>>, prev: Option<NonNull<SlabHeader>>, } impl Linkable for SlabHeader { fn next(&self) -> Option<NonNull<SlabHeader>> { self.next } fn prev(&self) -> Option<NonNull<SlabHeader>> { self.prev } fn set_next(&mut self, next: Option<NonNull<SlabHeader>>) { self.next = next; } fn set_prev(&mut self, prev: Option<NonNull<SlabHeader>>) { self.prev = prev; } } impl SlabHeader { pub fn get_color(&self) -> Color { self.color } } #[derive(Clone)] pub struct Layout { pub num_obj: usize, pub layout: alloc::Layout, pub stack_begin_offset: usize, pub array_begin_offset: usize, pub color_settings: ColorSettings, } impl Layout { /// Determines whether an allocator can be constructed for T using the given slab size. If so, /// it returns a constructed Layout for T using that slab size and the amount of unused space /// left at the end of the slab (when no coloring is used). pub fn for_slab_size(layout: alloc::Layout, slab_size: usize) -> Option<(Layout, usize)> { let obj_size = layout.size(); let obj_align = layout.align(); let hdr_size = mem::size_of::<SlabHeader>(); // padding between the SlabHeader and the base of the pointer stack let pre_stack_padding = Stack::<usize>::padding_after(hdr_size); let stack_begin_offset = hdr_size + pre_stack_padding; // Find the largest number of objects we can fit in the slab. array_begin_offset is the // offset from the beginning of the slab of the array of objects. let (mut num_obj, mut array_begin_offset) = (0, 0); loop { let candidate = num_obj + 1; // total_hdr_size = size of header, post-header padding, and stack let total_hdr_size = stack_begin_offset + Stack::<usize>::bytes_for(candidate); // Padding between the pointer stack and the array of objects. NOTE: // The Layout alignment isn't used here, so we use 1 because it's // guaranteed not to cause from_size_align to return None. let post_stack_padding = alloc::Layout::from_size_align(total_hdr_size, 1) .unwrap() .padding_needed_for(obj_align); if total_hdr_size + post_stack_padding + (candidate * obj_size) <= slab_size { num_obj = candidate; array_begin_offset = total_hdr_size + post_stack_padding; } else { break; } } if num_obj == 0 { return None; } assert!(array_begin_offset > 0); let unused_space = slab_size - array_begin_offset - (num_obj * obj_size); let l = Layout { num_obj: num_obj, layout: layout, stack_begin_offset: stack_begin_offset, array_begin_offset: array_begin_offset, color_settings: ColorSettings::new(obj_align, unused_space), }; // assert that the objects fit within the slab assert!( slab_size >= l.array_begin_offset + l.color_settings.max_color().as_usize() + (l.num_obj * obj_size) ); Some((l, unused_space)) } fn array_begin(&self, slab: NonNull<SlabHeader>, color: Color) -> NonNull<u8> { debug_assert!(color.as_usize() <= self.color_settings.max_color().as_usize()); unsafe { NonNull::new_unchecked( ((slab.as_ptr() as usize) + self.array_begin_offset + color.as_usize()) as *mut u8, ) } } fn stack_begin(&self, slab: NonNull<SlabHeader>) -> NonNull<usize> { unsafe { NonNull::new_unchecked( ((slab.as_ptr() as usize) + self.stack_begin_offset) as *mut usize, ) } } pub fn nth_obj(&self, slab: NonNull<SlabHeader>, color: Color, n: usize) -> NonNull<u8> { debug_assert!((n as usize) < self.num_obj); unsafe { NonNull::new_unchecked( (self.array_begin(slab, color).as_ptr() as usize + n * self.layout.size()) as *mut u8, ) } } }
{ unsafe { let slab = self.data.ptr_to_slab(self.alloc.layout().size(), obj); let was_empty = (*slab.as_ptr()).stack.size() == 0; let stack_data_ptr = self.layout.stack_begin(slab); (*slab.as_ptr()) .stack .push(stack_data_ptr, I::pack(obj, init_status)); (slab, was_empty) } }
identifier_body
text.rs
use std::cmp::max; use std::collections::HashMap; use std::string::ToString; use sdl2::pixels::Color; use sdl2::rect::Rect; use sdl2::render::{BlendMode, Canvas, Texture, TextureCreator, WindowCanvas}; use sdl2::surface::Surface; use sdl2::ttf::Font as Sdl2Font; use sdl2::video::WindowContext; use crate::ui::types::{Align, ScreenPos, ScreenText}; const ASCII: &str = "!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~"; pub struct Font<'a> { font_canvas: Canvas<Surface<'a>>, glyphs: Vec<GlyphRegion>, line_height: u32, space_advance: i32, texture_creator: &'a TextureCreator<WindowContext>, cached_texts: HashMap<String, (Texture<'a>, u32, u32)>, } struct GlyphRegion { start: i32, advance: i32, width: u32, height: u32, } impl<'a> Font<'a> { pub fn from_font( texture_creator: &'a TextureCreator<sdl2::video::WindowContext>, font: Sdl2Font, ) -> Result<Self, String>
total_width += w; total_height = h; } else { return Err(format!("Unsupported character: {}", c)); } } let mut font_canvas = Surface::new( total_width, total_height, texture_creator.default_pixel_format(), )? .into_canvas()?; let font_texture_creator = font_canvas.texture_creator(); let mut x = 0; for (i, c) in ASCII.char_indices() { let GlyphRegion { width,.. } = glyphs[i]; let char_surface = font .render(&c.to_string()) .blended(Color::RGBA(255, 255, 255, 255)) .map_err(to_string)?; let char_tex = font_texture_creator .create_texture_from_surface(&char_surface) .map_err(to_string)?; let target = Rect::new(x, 0, width, total_height); font_canvas.copy(&char_tex, None, Some(target))?; x += width as i32; } Ok(Font { font_canvas: font_canvas, glyphs, line_height: total_height, space_advance, texture_creator, cached_texts: HashMap::new(), }) } pub fn draw(&mut self, screen_txt: ScreenText, cvs: &mut WindowCanvas) -> Result<(), String> { let cache_key = screen_txt.text.to_string(); if let Some((ref mut tex, w, h)) = self.cached_texts.get_mut(&cache_key) { let (tw, th) = scale_dim(screen_txt.scale, *w, *h); let ScreenPos(x, y) = screen_txt.pos.align(screen_txt.align, tw, th); tex.set_alpha_mod(screen_txt.alpha); cvs.copy(tex, Rect::new(0, 0, *w, *h), Rect::new(x, y, tw, th))?; return Ok(()); } let pos = screen_txt.pos; let align = screen_txt.align; let prepared_text = prepare(screen_txt, self); let (w, h) = prepared_text.dim; let ScreenPos(x, y) = pos.align(align, w, h); let pixel_format = self.texture_creator.default_pixel_format(); // let pixel_format = sdl2::pixels::PixelFormatEnum::ARGB32; // draw the text to the temporay image let mut text_cvs = Surface::new(w, h, pixel_format)?.into_canvas()?; let font_texture_creator = text_cvs.texture_creator(); let mut font_texture = font_texture_creator .create_texture_from_surface(self.font_canvas.surface()) .map_err(to_string)?; draw_text(prepared_text, &mut text_cvs, &mut font_texture, (w, h))?; // create a texture for the correct render target and for caching let target_tex = self .texture_creator .create_texture_from_surface(text_cvs.surface()) .map_err(to_string)?; // actually draw the text texture cvs.copy(&target_tex, Rect::new(0, 0, w, h), Rect::new(x, y, w, h))?; // cache the created texture for future frames self.cached_texts.insert(cache_key, (target_tex, w, h)); Ok(()) } } struct PreparedWord { chars: Vec<(i32, i32, u32, u32)>, width: u32, } impl PreparedWord { fn prepare(glyphs: &Vec<GlyphRegion>, txt: &str) -> Self { let mut x = 0; let mut chars = Vec::new(); for c in txt.chars() { if let Some(r) = find_glyph_region(c, glyphs) { chars.push((r.start, r.advance, r.width, r.height)); x = x + r.advance; } } PreparedWord { chars, width: x as u32, } } fn draw( self: &Self, texture: &Texture, cvs: &mut Canvas<Surface>, pos: (i32, i32), ) -> Result<(), String> { let (mut x, y) = pos; for (start, advance, width, height) in self.chars.iter() { let from = Rect::new(*start, 0, *width, *height); let to = Rect::new(x, y, *width, *height); cvs.copy(&texture, Some(from), Some(to))?; x = x + advance; } Ok(()) } } struct PreparedText { lines: Vec<(i32, u32, Vec<(i32, PreparedWord)>)>, dim: (u32, u32), text_dim: (u32, u32), align: Align, color: (u8, u8, u8, u8), background: Option<Color>, padding: u32, border: Option<(u32, Color)>, } fn prepare<'a>(text: ScreenText, font: &'a Font) -> PreparedText { let (mut x, mut y) = (0, 0); let mut lines = Vec::new(); let mut text_width: u32 = 0; let border_width = text.border.map(|(w, _)| w).unwrap_or(0); let spacing = 2 * text.padding + 2 * border_width; let max_width = text.max_width - spacing; for line in text.text.into_string().lines() { let mut words = Vec::new(); let mut line_width: u32 = 0; for t in line.split_whitespace() { let word = PreparedWord::prepare(&font.glyphs, t); let text_width = word.width; let advance = font.space_advance + text_width as i32; if x > 0 && (x + advance) as u32 > max_width { // text does not fit in current line // => wrap text (no wrap if first word in line) lines.push((y, max_width, words)); words = Vec::new(); x = 0; y += font.line_height as i32; line_width = max_width; } words.push((x, word)); x += advance; if x as u32 > line_width { line_width = x as u32; } } lines.push((y, line_width, words)); x = 0; y += font.line_height as i32; text_width = max(text_width, line_width); } let w = text_width + spacing; let h = y as u32 + spacing; PreparedText { lines, dim: (max(text.min_width, w), max(text.min_height, h)), text_dim: (w, h), align: text.text_align, color: text.color, background: text.background.map(|(r, g, b, a)| Color::RGBA(r, g, b, a)), padding: text.padding, border: text .border .map(|(w, (r, g, b, a))| (w, Color::RGBA(r, g, b, a))), } } fn find_glyph_region(c: char, metrics: &Vec<GlyphRegion>) -> Option<&GlyphRegion> { let ascii_index = c as usize; if ascii_index >= 32 && ascii_index <= 126 { metrics.get(ascii_index - 32) } else { None } } fn to_string(s: impl ToString) -> String { s.to_string() } fn draw_background( cvs: &mut Canvas<Surface>, color: Color, x: i32, y: i32, w: u32, h: u32, ) -> Result<(), String> { if color.a < 255 { // set the background to transparent white // (the blending with default black bg is to dark) cvs.set_draw_color(Color::RGBA(255, 255, 255, 0)); cvs.clear(); cvs.set_blend_mode(BlendMode::Blend); } else { cvs.set_blend_mode(BlendMode::None); } cvs.set_draw_color(color); cvs.fill_rect(Rect::new(x, y, w, h)) } fn draw_border( cvs: &mut Canvas<Surface>, color: Color, bw: u32, x: i32, y: i32, w: u32, h: u32, ) -> Result<(), String> { let xl = x; let xr = x + w as i32 - bw as i32; let yt = y; let yb = y + h as i32 - bw as i32; cvs.set_draw_color(color); cvs.fill_rect(Rect::new(xl, yt, w, bw))?; // top cvs.fill_rect(Rect::new(xl, yt, bw, h))?; // left cvs.fill_rect(Rect::new(xr, yt, bw, h))?; // right cvs.fill_rect(Rect::new(xl, yb, w, bw))?; // bottom Ok(()) } fn draw_text( text: PreparedText, cvs: &mut Canvas<Surface>, texture: &mut Texture, (w, h): (u32, u32), ) -> Result<(), String> { if let Some(bg_color) = text.background { draw_background(cvs, bg_color, 0, 0, w, h)?; } if let Some((bw, border_color)) = text.border { draw_border(cvs, border_color, bw, 0, 0, w, h)?; } texture.set_alpha_mod(text.color.3); texture.set_color_mod(text.color.0, text.color.1, text.color.2); let shift = text.border.map(|(val, _)| val).unwrap_or(0) as i32 + text.padding as i32; let shift_y = align_line_vertical(text.align, text.text_dim.1, h) + shift; for (offset_y, line_width, line) in text.lines.iter() { let shift_x = align_line_horizontal(text.align, *line_width, w) + shift; for (offset_x, word) in line { word.draw(texture, cvs, (shift_x + offset_x, shift_y + offset_y))?; } } texture.set_alpha_mod(255); texture.set_color_mod(0, 0, 0); Ok(()) } fn scale_dim(scale_factor: f32, w: u32, h: u32) -> (u32, u32) { ( (w as f32 * scale_factor).round() as u32, (h as f32 * scale_factor).round() as u32, ) } fn align_line_horizontal(a: Align, line_width: u32, text_width: u32) -> i32 { match a { Align::TopLeft => 0, Align::MidCenter => (text_width - line_width) as i32 / 2, } } fn align_line_vertical(a: Align, text_height: u32, max_height: u32) -> i32 { match a { Align::TopLeft => 0, Align::MidCenter => (max_height - text_height) as i32 / 2, } }
{ let mut total_width = 0; let mut total_height = 0; let mut glyphs: Vec<GlyphRegion> = Vec::new(); let mut space_advance = 0; for c in ASCII.chars() { if let Some(metric) = font.find_glyph_metrics(c) { let (w, h) = font.size_of_char(c).map_err(to_string)?; glyphs.push(GlyphRegion { start: total_width as i32, width: w, height: h, advance: metric.advance, }); if c == ' ' { space_advance = metric.advance; }
identifier_body
text.rs
use std::cmp::max; use std::collections::HashMap; use std::string::ToString; use sdl2::pixels::Color; use sdl2::rect::Rect; use sdl2::render::{BlendMode, Canvas, Texture, TextureCreator, WindowCanvas}; use sdl2::surface::Surface; use sdl2::ttf::Font as Sdl2Font; use sdl2::video::WindowContext; use crate::ui::types::{Align, ScreenPos, ScreenText}; const ASCII: &str = "!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~"; pub struct Font<'a> { font_canvas: Canvas<Surface<'a>>, glyphs: Vec<GlyphRegion>, line_height: u32, space_advance: i32, texture_creator: &'a TextureCreator<WindowContext>, cached_texts: HashMap<String, (Texture<'a>, u32, u32)>, } struct GlyphRegion { start: i32, advance: i32, width: u32, height: u32, } impl<'a> Font<'a> { pub fn from_font( texture_creator: &'a TextureCreator<sdl2::video::WindowContext>, font: Sdl2Font, ) -> Result<Self, String> { let mut total_width = 0; let mut total_height = 0; let mut glyphs: Vec<GlyphRegion> = Vec::new(); let mut space_advance = 0; for c in ASCII.chars() { if let Some(metric) = font.find_glyph_metrics(c) { let (w, h) = font.size_of_char(c).map_err(to_string)?; glyphs.push(GlyphRegion { start: total_width as i32, width: w, height: h, advance: metric.advance, }); if c =='' { space_advance = metric.advance; } total_width += w; total_height = h; } else { return Err(format!("Unsupported character: {}", c)); } } let mut font_canvas = Surface::new( total_width, total_height, texture_creator.default_pixel_format(), )? .into_canvas()?; let font_texture_creator = font_canvas.texture_creator(); let mut x = 0; for (i, c) in ASCII.char_indices() { let GlyphRegion { width,.. } = glyphs[i]; let char_surface = font .render(&c.to_string()) .blended(Color::RGBA(255, 255, 255, 255)) .map_err(to_string)?; let char_tex = font_texture_creator .create_texture_from_surface(&char_surface) .map_err(to_string)?; let target = Rect::new(x, 0, width, total_height); font_canvas.copy(&char_tex, None, Some(target))?; x += width as i32; } Ok(Font {
line_height: total_height, space_advance, texture_creator, cached_texts: HashMap::new(), }) } pub fn draw(&mut self, screen_txt: ScreenText, cvs: &mut WindowCanvas) -> Result<(), String> { let cache_key = screen_txt.text.to_string(); if let Some((ref mut tex, w, h)) = self.cached_texts.get_mut(&cache_key) { let (tw, th) = scale_dim(screen_txt.scale, *w, *h); let ScreenPos(x, y) = screen_txt.pos.align(screen_txt.align, tw, th); tex.set_alpha_mod(screen_txt.alpha); cvs.copy(tex, Rect::new(0, 0, *w, *h), Rect::new(x, y, tw, th))?; return Ok(()); } let pos = screen_txt.pos; let align = screen_txt.align; let prepared_text = prepare(screen_txt, self); let (w, h) = prepared_text.dim; let ScreenPos(x, y) = pos.align(align, w, h); let pixel_format = self.texture_creator.default_pixel_format(); // let pixel_format = sdl2::pixels::PixelFormatEnum::ARGB32; // draw the text to the temporay image let mut text_cvs = Surface::new(w, h, pixel_format)?.into_canvas()?; let font_texture_creator = text_cvs.texture_creator(); let mut font_texture = font_texture_creator .create_texture_from_surface(self.font_canvas.surface()) .map_err(to_string)?; draw_text(prepared_text, &mut text_cvs, &mut font_texture, (w, h))?; // create a texture for the correct render target and for caching let target_tex = self .texture_creator .create_texture_from_surface(text_cvs.surface()) .map_err(to_string)?; // actually draw the text texture cvs.copy(&target_tex, Rect::new(0, 0, w, h), Rect::new(x, y, w, h))?; // cache the created texture for future frames self.cached_texts.insert(cache_key, (target_tex, w, h)); Ok(()) } } struct PreparedWord { chars: Vec<(i32, i32, u32, u32)>, width: u32, } impl PreparedWord { fn prepare(glyphs: &Vec<GlyphRegion>, txt: &str) -> Self { let mut x = 0; let mut chars = Vec::new(); for c in txt.chars() { if let Some(r) = find_glyph_region(c, glyphs) { chars.push((r.start, r.advance, r.width, r.height)); x = x + r.advance; } } PreparedWord { chars, width: x as u32, } } fn draw( self: &Self, texture: &Texture, cvs: &mut Canvas<Surface>, pos: (i32, i32), ) -> Result<(), String> { let (mut x, y) = pos; for (start, advance, width, height) in self.chars.iter() { let from = Rect::new(*start, 0, *width, *height); let to = Rect::new(x, y, *width, *height); cvs.copy(&texture, Some(from), Some(to))?; x = x + advance; } Ok(()) } } struct PreparedText { lines: Vec<(i32, u32, Vec<(i32, PreparedWord)>)>, dim: (u32, u32), text_dim: (u32, u32), align: Align, color: (u8, u8, u8, u8), background: Option<Color>, padding: u32, border: Option<(u32, Color)>, } fn prepare<'a>(text: ScreenText, font: &'a Font) -> PreparedText { let (mut x, mut y) = (0, 0); let mut lines = Vec::new(); let mut text_width: u32 = 0; let border_width = text.border.map(|(w, _)| w).unwrap_or(0); let spacing = 2 * text.padding + 2 * border_width; let max_width = text.max_width - spacing; for line in text.text.into_string().lines() { let mut words = Vec::new(); let mut line_width: u32 = 0; for t in line.split_whitespace() { let word = PreparedWord::prepare(&font.glyphs, t); let text_width = word.width; let advance = font.space_advance + text_width as i32; if x > 0 && (x + advance) as u32 > max_width { // text does not fit in current line // => wrap text (no wrap if first word in line) lines.push((y, max_width, words)); words = Vec::new(); x = 0; y += font.line_height as i32; line_width = max_width; } words.push((x, word)); x += advance; if x as u32 > line_width { line_width = x as u32; } } lines.push((y, line_width, words)); x = 0; y += font.line_height as i32; text_width = max(text_width, line_width); } let w = text_width + spacing; let h = y as u32 + spacing; PreparedText { lines, dim: (max(text.min_width, w), max(text.min_height, h)), text_dim: (w, h), align: text.text_align, color: text.color, background: text.background.map(|(r, g, b, a)| Color::RGBA(r, g, b, a)), padding: text.padding, border: text .border .map(|(w, (r, g, b, a))| (w, Color::RGBA(r, g, b, a))), } } fn find_glyph_region(c: char, metrics: &Vec<GlyphRegion>) -> Option<&GlyphRegion> { let ascii_index = c as usize; if ascii_index >= 32 && ascii_index <= 126 { metrics.get(ascii_index - 32) } else { None } } fn to_string(s: impl ToString) -> String { s.to_string() } fn draw_background( cvs: &mut Canvas<Surface>, color: Color, x: i32, y: i32, w: u32, h: u32, ) -> Result<(), String> { if color.a < 255 { // set the background to transparent white // (the blending with default black bg is to dark) cvs.set_draw_color(Color::RGBA(255, 255, 255, 0)); cvs.clear(); cvs.set_blend_mode(BlendMode::Blend); } else { cvs.set_blend_mode(BlendMode::None); } cvs.set_draw_color(color); cvs.fill_rect(Rect::new(x, y, w, h)) } fn draw_border( cvs: &mut Canvas<Surface>, color: Color, bw: u32, x: i32, y: i32, w: u32, h: u32, ) -> Result<(), String> { let xl = x; let xr = x + w as i32 - bw as i32; let yt = y; let yb = y + h as i32 - bw as i32; cvs.set_draw_color(color); cvs.fill_rect(Rect::new(xl, yt, w, bw))?; // top cvs.fill_rect(Rect::new(xl, yt, bw, h))?; // left cvs.fill_rect(Rect::new(xr, yt, bw, h))?; // right cvs.fill_rect(Rect::new(xl, yb, w, bw))?; // bottom Ok(()) } fn draw_text( text: PreparedText, cvs: &mut Canvas<Surface>, texture: &mut Texture, (w, h): (u32, u32), ) -> Result<(), String> { if let Some(bg_color) = text.background { draw_background(cvs, bg_color, 0, 0, w, h)?; } if let Some((bw, border_color)) = text.border { draw_border(cvs, border_color, bw, 0, 0, w, h)?; } texture.set_alpha_mod(text.color.3); texture.set_color_mod(text.color.0, text.color.1, text.color.2); let shift = text.border.map(|(val, _)| val).unwrap_or(0) as i32 + text.padding as i32; let shift_y = align_line_vertical(text.align, text.text_dim.1, h) + shift; for (offset_y, line_width, line) in text.lines.iter() { let shift_x = align_line_horizontal(text.align, *line_width, w) + shift; for (offset_x, word) in line { word.draw(texture, cvs, (shift_x + offset_x, shift_y + offset_y))?; } } texture.set_alpha_mod(255); texture.set_color_mod(0, 0, 0); Ok(()) } fn scale_dim(scale_factor: f32, w: u32, h: u32) -> (u32, u32) { ( (w as f32 * scale_factor).round() as u32, (h as f32 * scale_factor).round() as u32, ) } fn align_line_horizontal(a: Align, line_width: u32, text_width: u32) -> i32 { match a { Align::TopLeft => 0, Align::MidCenter => (text_width - line_width) as i32 / 2, } } fn align_line_vertical(a: Align, text_height: u32, max_height: u32) -> i32 { match a { Align::TopLeft => 0, Align::MidCenter => (max_height - text_height) as i32 / 2, } }
font_canvas: font_canvas, glyphs,
random_line_split
text.rs
use std::cmp::max; use std::collections::HashMap; use std::string::ToString; use sdl2::pixels::Color; use sdl2::rect::Rect; use sdl2::render::{BlendMode, Canvas, Texture, TextureCreator, WindowCanvas}; use sdl2::surface::Surface; use sdl2::ttf::Font as Sdl2Font; use sdl2::video::WindowContext; use crate::ui::types::{Align, ScreenPos, ScreenText}; const ASCII: &str = "!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~"; pub struct Font<'a> { font_canvas: Canvas<Surface<'a>>, glyphs: Vec<GlyphRegion>, line_height: u32, space_advance: i32, texture_creator: &'a TextureCreator<WindowContext>, cached_texts: HashMap<String, (Texture<'a>, u32, u32)>, } struct GlyphRegion { start: i32, advance: i32, width: u32, height: u32, } impl<'a> Font<'a> { pub fn from_font( texture_creator: &'a TextureCreator<sdl2::video::WindowContext>, font: Sdl2Font, ) -> Result<Self, String> { let mut total_width = 0; let mut total_height = 0; let mut glyphs: Vec<GlyphRegion> = Vec::new(); let mut space_advance = 0; for c in ASCII.chars() { if let Some(metric) = font.find_glyph_metrics(c) { let (w, h) = font.size_of_char(c).map_err(to_string)?; glyphs.push(GlyphRegion { start: total_width as i32, width: w, height: h, advance: metric.advance, }); if c =='' { space_advance = metric.advance; } total_width += w; total_height = h; } else { return Err(format!("Unsupported character: {}", c)); } } let mut font_canvas = Surface::new( total_width, total_height, texture_creator.default_pixel_format(), )? .into_canvas()?; let font_texture_creator = font_canvas.texture_creator(); let mut x = 0; for (i, c) in ASCII.char_indices() { let GlyphRegion { width,.. } = glyphs[i]; let char_surface = font .render(&c.to_string()) .blended(Color::RGBA(255, 255, 255, 255)) .map_err(to_string)?; let char_tex = font_texture_creator .create_texture_from_surface(&char_surface) .map_err(to_string)?; let target = Rect::new(x, 0, width, total_height); font_canvas.copy(&char_tex, None, Some(target))?; x += width as i32; } Ok(Font { font_canvas: font_canvas, glyphs, line_height: total_height, space_advance, texture_creator, cached_texts: HashMap::new(), }) } pub fn draw(&mut self, screen_txt: ScreenText, cvs: &mut WindowCanvas) -> Result<(), String> { let cache_key = screen_txt.text.to_string(); if let Some((ref mut tex, w, h)) = self.cached_texts.get_mut(&cache_key) { let (tw, th) = scale_dim(screen_txt.scale, *w, *h); let ScreenPos(x, y) = screen_txt.pos.align(screen_txt.align, tw, th); tex.set_alpha_mod(screen_txt.alpha); cvs.copy(tex, Rect::new(0, 0, *w, *h), Rect::new(x, y, tw, th))?; return Ok(()); } let pos = screen_txt.pos; let align = screen_txt.align; let prepared_text = prepare(screen_txt, self); let (w, h) = prepared_text.dim; let ScreenPos(x, y) = pos.align(align, w, h); let pixel_format = self.texture_creator.default_pixel_format(); // let pixel_format = sdl2::pixels::PixelFormatEnum::ARGB32; // draw the text to the temporay image let mut text_cvs = Surface::new(w, h, pixel_format)?.into_canvas()?; let font_texture_creator = text_cvs.texture_creator(); let mut font_texture = font_texture_creator .create_texture_from_surface(self.font_canvas.surface()) .map_err(to_string)?; draw_text(prepared_text, &mut text_cvs, &mut font_texture, (w, h))?; // create a texture for the correct render target and for caching let target_tex = self .texture_creator .create_texture_from_surface(text_cvs.surface()) .map_err(to_string)?; // actually draw the text texture cvs.copy(&target_tex, Rect::new(0, 0, w, h), Rect::new(x, y, w, h))?; // cache the created texture for future frames self.cached_texts.insert(cache_key, (target_tex, w, h)); Ok(()) } } struct PreparedWord { chars: Vec<(i32, i32, u32, u32)>, width: u32, } impl PreparedWord { fn prepare(glyphs: &Vec<GlyphRegion>, txt: &str) -> Self { let mut x = 0; let mut chars = Vec::new(); for c in txt.chars() { if let Some(r) = find_glyph_region(c, glyphs) { chars.push((r.start, r.advance, r.width, r.height)); x = x + r.advance; } } PreparedWord { chars, width: x as u32, } } fn draw( self: &Self, texture: &Texture, cvs: &mut Canvas<Surface>, pos: (i32, i32), ) -> Result<(), String> { let (mut x, y) = pos; for (start, advance, width, height) in self.chars.iter() { let from = Rect::new(*start, 0, *width, *height); let to = Rect::new(x, y, *width, *height); cvs.copy(&texture, Some(from), Some(to))?; x = x + advance; } Ok(()) } } struct PreparedText { lines: Vec<(i32, u32, Vec<(i32, PreparedWord)>)>, dim: (u32, u32), text_dim: (u32, u32), align: Align, color: (u8, u8, u8, u8), background: Option<Color>, padding: u32, border: Option<(u32, Color)>, } fn prepare<'a>(text: ScreenText, font: &'a Font) -> PreparedText { let (mut x, mut y) = (0, 0); let mut lines = Vec::new(); let mut text_width: u32 = 0; let border_width = text.border.map(|(w, _)| w).unwrap_or(0); let spacing = 2 * text.padding + 2 * border_width; let max_width = text.max_width - spacing; for line in text.text.into_string().lines() { let mut words = Vec::new(); let mut line_width: u32 = 0; for t in line.split_whitespace() { let word = PreparedWord::prepare(&font.glyphs, t); let text_width = word.width; let advance = font.space_advance + text_width as i32; if x > 0 && (x + advance) as u32 > max_width { // text does not fit in current line // => wrap text (no wrap if first word in line) lines.push((y, max_width, words)); words = Vec::new(); x = 0; y += font.line_height as i32; line_width = max_width; } words.push((x, word)); x += advance; if x as u32 > line_width { line_width = x as u32; } } lines.push((y, line_width, words)); x = 0; y += font.line_height as i32; text_width = max(text_width, line_width); } let w = text_width + spacing; let h = y as u32 + spacing; PreparedText { lines, dim: (max(text.min_width, w), max(text.min_height, h)), text_dim: (w, h), align: text.text_align, color: text.color, background: text.background.map(|(r, g, b, a)| Color::RGBA(r, g, b, a)), padding: text.padding, border: text .border .map(|(w, (r, g, b, a))| (w, Color::RGBA(r, g, b, a))), } } fn find_glyph_region(c: char, metrics: &Vec<GlyphRegion>) -> Option<&GlyphRegion> { let ascii_index = c as usize; if ascii_index >= 32 && ascii_index <= 126 { metrics.get(ascii_index - 32) } else { None } } fn to_string(s: impl ToString) -> String { s.to_string() } fn draw_background( cvs: &mut Canvas<Surface>, color: Color, x: i32, y: i32, w: u32, h: u32, ) -> Result<(), String> { if color.a < 255 { // set the background to transparent white // (the blending with default black bg is to dark) cvs.set_draw_color(Color::RGBA(255, 255, 255, 0)); cvs.clear(); cvs.set_blend_mode(BlendMode::Blend); } else { cvs.set_blend_mode(BlendMode::None); } cvs.set_draw_color(color); cvs.fill_rect(Rect::new(x, y, w, h)) } fn draw_border( cvs: &mut Canvas<Surface>, color: Color, bw: u32, x: i32, y: i32, w: u32, h: u32, ) -> Result<(), String> { let xl = x; let xr = x + w as i32 - bw as i32; let yt = y; let yb = y + h as i32 - bw as i32; cvs.set_draw_color(color); cvs.fill_rect(Rect::new(xl, yt, w, bw))?; // top cvs.fill_rect(Rect::new(xl, yt, bw, h))?; // left cvs.fill_rect(Rect::new(xr, yt, bw, h))?; // right cvs.fill_rect(Rect::new(xl, yb, w, bw))?; // bottom Ok(()) } fn draw_text( text: PreparedText, cvs: &mut Canvas<Surface>, texture: &mut Texture, (w, h): (u32, u32), ) -> Result<(), String> { if let Some(bg_color) = text.background { draw_background(cvs, bg_color, 0, 0, w, h)?; } if let Some((bw, border_color)) = text.border { draw_border(cvs, border_color, bw, 0, 0, w, h)?; } texture.set_alpha_mod(text.color.3); texture.set_color_mod(text.color.0, text.color.1, text.color.2); let shift = text.border.map(|(val, _)| val).unwrap_or(0) as i32 + text.padding as i32; let shift_y = align_line_vertical(text.align, text.text_dim.1, h) + shift; for (offset_y, line_width, line) in text.lines.iter() { let shift_x = align_line_horizontal(text.align, *line_width, w) + shift; for (offset_x, word) in line { word.draw(texture, cvs, (shift_x + offset_x, shift_y + offset_y))?; } } texture.set_alpha_mod(255); texture.set_color_mod(0, 0, 0); Ok(()) } fn
(scale_factor: f32, w: u32, h: u32) -> (u32, u32) { ( (w as f32 * scale_factor).round() as u32, (h as f32 * scale_factor).round() as u32, ) } fn align_line_horizontal(a: Align, line_width: u32, text_width: u32) -> i32 { match a { Align::TopLeft => 0, Align::MidCenter => (text_width - line_width) as i32 / 2, } } fn align_line_vertical(a: Align, text_height: u32, max_height: u32) -> i32 { match a { Align::TopLeft => 0, Align::MidCenter => (max_height - text_height) as i32 / 2, } }
scale_dim
identifier_name
text.rs
use std::cmp::max; use std::collections::HashMap; use std::string::ToString; use sdl2::pixels::Color; use sdl2::rect::Rect; use sdl2::render::{BlendMode, Canvas, Texture, TextureCreator, WindowCanvas}; use sdl2::surface::Surface; use sdl2::ttf::Font as Sdl2Font; use sdl2::video::WindowContext; use crate::ui::types::{Align, ScreenPos, ScreenText}; const ASCII: &str = "!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~"; pub struct Font<'a> { font_canvas: Canvas<Surface<'a>>, glyphs: Vec<GlyphRegion>, line_height: u32, space_advance: i32, texture_creator: &'a TextureCreator<WindowContext>, cached_texts: HashMap<String, (Texture<'a>, u32, u32)>, } struct GlyphRegion { start: i32, advance: i32, width: u32, height: u32, } impl<'a> Font<'a> { pub fn from_font( texture_creator: &'a TextureCreator<sdl2::video::WindowContext>, font: Sdl2Font, ) -> Result<Self, String> { let mut total_width = 0; let mut total_height = 0; let mut glyphs: Vec<GlyphRegion> = Vec::new(); let mut space_advance = 0; for c in ASCII.chars() { if let Some(metric) = font.find_glyph_metrics(c)
else { return Err(format!("Unsupported character: {}", c)); } } let mut font_canvas = Surface::new( total_width, total_height, texture_creator.default_pixel_format(), )? .into_canvas()?; let font_texture_creator = font_canvas.texture_creator(); let mut x = 0; for (i, c) in ASCII.char_indices() { let GlyphRegion { width,.. } = glyphs[i]; let char_surface = font .render(&c.to_string()) .blended(Color::RGBA(255, 255, 255, 255)) .map_err(to_string)?; let char_tex = font_texture_creator .create_texture_from_surface(&char_surface) .map_err(to_string)?; let target = Rect::new(x, 0, width, total_height); font_canvas.copy(&char_tex, None, Some(target))?; x += width as i32; } Ok(Font { font_canvas: font_canvas, glyphs, line_height: total_height, space_advance, texture_creator, cached_texts: HashMap::new(), }) } pub fn draw(&mut self, screen_txt: ScreenText, cvs: &mut WindowCanvas) -> Result<(), String> { let cache_key = screen_txt.text.to_string(); if let Some((ref mut tex, w, h)) = self.cached_texts.get_mut(&cache_key) { let (tw, th) = scale_dim(screen_txt.scale, *w, *h); let ScreenPos(x, y) = screen_txt.pos.align(screen_txt.align, tw, th); tex.set_alpha_mod(screen_txt.alpha); cvs.copy(tex, Rect::new(0, 0, *w, *h), Rect::new(x, y, tw, th))?; return Ok(()); } let pos = screen_txt.pos; let align = screen_txt.align; let prepared_text = prepare(screen_txt, self); let (w, h) = prepared_text.dim; let ScreenPos(x, y) = pos.align(align, w, h); let pixel_format = self.texture_creator.default_pixel_format(); // let pixel_format = sdl2::pixels::PixelFormatEnum::ARGB32; // draw the text to the temporay image let mut text_cvs = Surface::new(w, h, pixel_format)?.into_canvas()?; let font_texture_creator = text_cvs.texture_creator(); let mut font_texture = font_texture_creator .create_texture_from_surface(self.font_canvas.surface()) .map_err(to_string)?; draw_text(prepared_text, &mut text_cvs, &mut font_texture, (w, h))?; // create a texture for the correct render target and for caching let target_tex = self .texture_creator .create_texture_from_surface(text_cvs.surface()) .map_err(to_string)?; // actually draw the text texture cvs.copy(&target_tex, Rect::new(0, 0, w, h), Rect::new(x, y, w, h))?; // cache the created texture for future frames self.cached_texts.insert(cache_key, (target_tex, w, h)); Ok(()) } } struct PreparedWord { chars: Vec<(i32, i32, u32, u32)>, width: u32, } impl PreparedWord { fn prepare(glyphs: &Vec<GlyphRegion>, txt: &str) -> Self { let mut x = 0; let mut chars = Vec::new(); for c in txt.chars() { if let Some(r) = find_glyph_region(c, glyphs) { chars.push((r.start, r.advance, r.width, r.height)); x = x + r.advance; } } PreparedWord { chars, width: x as u32, } } fn draw( self: &Self, texture: &Texture, cvs: &mut Canvas<Surface>, pos: (i32, i32), ) -> Result<(), String> { let (mut x, y) = pos; for (start, advance, width, height) in self.chars.iter() { let from = Rect::new(*start, 0, *width, *height); let to = Rect::new(x, y, *width, *height); cvs.copy(&texture, Some(from), Some(to))?; x = x + advance; } Ok(()) } } struct PreparedText { lines: Vec<(i32, u32, Vec<(i32, PreparedWord)>)>, dim: (u32, u32), text_dim: (u32, u32), align: Align, color: (u8, u8, u8, u8), background: Option<Color>, padding: u32, border: Option<(u32, Color)>, } fn prepare<'a>(text: ScreenText, font: &'a Font) -> PreparedText { let (mut x, mut y) = (0, 0); let mut lines = Vec::new(); let mut text_width: u32 = 0; let border_width = text.border.map(|(w, _)| w).unwrap_or(0); let spacing = 2 * text.padding + 2 * border_width; let max_width = text.max_width - spacing; for line in text.text.into_string().lines() { let mut words = Vec::new(); let mut line_width: u32 = 0; for t in line.split_whitespace() { let word = PreparedWord::prepare(&font.glyphs, t); let text_width = word.width; let advance = font.space_advance + text_width as i32; if x > 0 && (x + advance) as u32 > max_width { // text does not fit in current line // => wrap text (no wrap if first word in line) lines.push((y, max_width, words)); words = Vec::new(); x = 0; y += font.line_height as i32; line_width = max_width; } words.push((x, word)); x += advance; if x as u32 > line_width { line_width = x as u32; } } lines.push((y, line_width, words)); x = 0; y += font.line_height as i32; text_width = max(text_width, line_width); } let w = text_width + spacing; let h = y as u32 + spacing; PreparedText { lines, dim: (max(text.min_width, w), max(text.min_height, h)), text_dim: (w, h), align: text.text_align, color: text.color, background: text.background.map(|(r, g, b, a)| Color::RGBA(r, g, b, a)), padding: text.padding, border: text .border .map(|(w, (r, g, b, a))| (w, Color::RGBA(r, g, b, a))), } } fn find_glyph_region(c: char, metrics: &Vec<GlyphRegion>) -> Option<&GlyphRegion> { let ascii_index = c as usize; if ascii_index >= 32 && ascii_index <= 126 { metrics.get(ascii_index - 32) } else { None } } fn to_string(s: impl ToString) -> String { s.to_string() } fn draw_background( cvs: &mut Canvas<Surface>, color: Color, x: i32, y: i32, w: u32, h: u32, ) -> Result<(), String> { if color.a < 255 { // set the background to transparent white // (the blending with default black bg is to dark) cvs.set_draw_color(Color::RGBA(255, 255, 255, 0)); cvs.clear(); cvs.set_blend_mode(BlendMode::Blend); } else { cvs.set_blend_mode(BlendMode::None); } cvs.set_draw_color(color); cvs.fill_rect(Rect::new(x, y, w, h)) } fn draw_border( cvs: &mut Canvas<Surface>, color: Color, bw: u32, x: i32, y: i32, w: u32, h: u32, ) -> Result<(), String> { let xl = x; let xr = x + w as i32 - bw as i32; let yt = y; let yb = y + h as i32 - bw as i32; cvs.set_draw_color(color); cvs.fill_rect(Rect::new(xl, yt, w, bw))?; // top cvs.fill_rect(Rect::new(xl, yt, bw, h))?; // left cvs.fill_rect(Rect::new(xr, yt, bw, h))?; // right cvs.fill_rect(Rect::new(xl, yb, w, bw))?; // bottom Ok(()) } fn draw_text( text: PreparedText, cvs: &mut Canvas<Surface>, texture: &mut Texture, (w, h): (u32, u32), ) -> Result<(), String> { if let Some(bg_color) = text.background { draw_background(cvs, bg_color, 0, 0, w, h)?; } if let Some((bw, border_color)) = text.border { draw_border(cvs, border_color, bw, 0, 0, w, h)?; } texture.set_alpha_mod(text.color.3); texture.set_color_mod(text.color.0, text.color.1, text.color.2); let shift = text.border.map(|(val, _)| val).unwrap_or(0) as i32 + text.padding as i32; let shift_y = align_line_vertical(text.align, text.text_dim.1, h) + shift; for (offset_y, line_width, line) in text.lines.iter() { let shift_x = align_line_horizontal(text.align, *line_width, w) + shift; for (offset_x, word) in line { word.draw(texture, cvs, (shift_x + offset_x, shift_y + offset_y))?; } } texture.set_alpha_mod(255); texture.set_color_mod(0, 0, 0); Ok(()) } fn scale_dim(scale_factor: f32, w: u32, h: u32) -> (u32, u32) { ( (w as f32 * scale_factor).round() as u32, (h as f32 * scale_factor).round() as u32, ) } fn align_line_horizontal(a: Align, line_width: u32, text_width: u32) -> i32 { match a { Align::TopLeft => 0, Align::MidCenter => (text_width - line_width) as i32 / 2, } } fn align_line_vertical(a: Align, text_height: u32, max_height: u32) -> i32 { match a { Align::TopLeft => 0, Align::MidCenter => (max_height - text_height) as i32 / 2, } }
{ let (w, h) = font.size_of_char(c).map_err(to_string)?; glyphs.push(GlyphRegion { start: total_width as i32, width: w, height: h, advance: metric.advance, }); if c == ' ' { space_advance = metric.advance; } total_width += w; total_height = h; }
conditional_block
common.rs
use std::collections::HashSet; pub type PlayerId = usize; /// Represent a player during a game. #[derive(Serialize, Clone, Debug, PartialEq)] pub struct Player { /// An integer that uniquely identifies each player during a game pub id: PlayerId, /// Number of tiles the player currently owns #[serde(skip_serializing_if = "has_no_tile")] pub owned_tiles: usize, /// Turn at which the player was defeated, if any #[serde(skip_serializing_if = "Option::is_none")] pub defeated_at: Option<usize>, } /// Small helper used by serde to avoid serializing the owned_tile field if the player does not own /// any tile. We try to keep the jsons as small as possible for network efficiency. fn has_no_tile(owned_tiles: &usize) -> bool { *owned_tiles == 0 } impl Player { /// Return a new undefeated player, with no owned tile. pub fn new(id: PlayerId) -> Self { Player { id, owned_tiles: 0, defeated_at: None, } } /// Return whether the player has been defeated already pub fn defeated(&self) -> bool { self.defeated_at.is_some() } /// Return whether the player can move. A player can move if it owns at least one tile, and if /// it has not been defeated. pub fn can_move(&self) -> bool { !self.defeated() && self.owned_tiles > 0 } } /// Represent an action a player can perform. #[derive(Copy, Clone, Debug, Deserialize, Serialize)] #[serde(tag = "type")] #[serde(rename_all = "lowercase")] pub enum Action { /// Resign Resign, /// Cancel all the moves already queued for the player #[serde(rename = "cancel_moves")] CancelMoves, /// Make a move from a tile to another Move(Move), } /// Represent a move from one tile to another. During a move, units are transfered from one tile to /// another adjacent tile. #[derive(Copy, Clone, Debug, Deserialize, Serialize)] pub struct Move { /// Player that is making the move. #[serde(skip)] pub player: PlayerId, /// Index of the tile from which troops are being moved. pub from: usize, /// Direction to which the troops are being moved. pub direction: Direction, } #[derive(Copy, Clone, Debug, PartialEq, Deserialize, Serialize)] #[serde(rename_all = "lowercase")] pub enum Direction {
Up, Down, } #[derive(Copy, Clone, Debug, PartialEq)] /// Outcome of a move pub enum MoveOutcome { /// Outcome when a move resulted in a general being captured. The player ID is the ID of the /// defeated player. GeneralCaptured(PlayerId), /// Outcome when a move resulted in an open tile or a city tile being captured. If the tile /// was belonging to a different player than the one making the move, the player's ID is /// specified. TileCaptured(Option<PlayerId>), /// Outcome when a move did not result in a tile being captured. StatuQuo, } /// Represent the different types of open (ie non-mountain) tiles #[derive(Copy, Clone, PartialEq, Debug, Serialize)] #[serde(rename_all = "lowercase")] pub enum TileKind { /// A tile that contains a general General, /// A tile that contains a city City, /// A regular tile Open, /// A tile that contains a mountain Mountain, } /// Represent an open tile. Open tiles are tiles that are not mountains, ie tiles that players can /// conquer. #[derive(Clone, PartialEq, Debug, Serialize)] pub struct Tile { /// The ID of the player that currenlty owns the tile (a player own a tile if he/she has units /// occupying the tile). #[serde(skip_serializing_if = "Option::is_none")] owner: Option<PlayerId>, /// Number of units occupying the tile #[serde(skip_serializing_if = "has_no_unit")] units: u16, /// The type of tile (open, city or general) #[serde(skip_serializing_if = "is_open")] kind: TileKind, /// List of players that can see the tile. To be able to see an open tile, a player must own a /// tile that touches it. #[serde(skip)] visible_by: HashSet<PlayerId>, /// Players that had visibility on this tile when it changed. #[serde(skip)] dirty_for: HashSet<PlayerId>, } /// Small helper used by serde to avoid serializing the `kind` field if the tile if of type /// `TileKind::Open`. We try to keep the jsons as small as possible for network efficiency. fn is_open(kind: &TileKind) -> bool { *kind == TileKind::Open } /// Small helper used by serde to avoid serializing the `units` field if the tile does not have any /// units. We try to keep the jsons as small as possible for network efficiency. fn has_no_unit(units: &u16) -> bool { *units == 0 } impl Tile { /// Return a new open tile or the given type, with no owner, and no unit. pub fn new() -> Self { Tile { owner: None, units: 0, dirty_for: HashSet::new(), visible_by: HashSet::new(), kind: TileKind::Mountain, } } /// Return whether the tile is marked as visible by the given player. pub fn is_visible_by(&self, player: PlayerId) -> bool { self.visible_by.contains(&player) } /// Mark the tile as invisible for the given player pub fn hide_from(&mut self, player: PlayerId) { let was_visible = self.visible_by.remove(&player); if was_visible { self.dirty_for.insert(player); } } /// Mark the tile as visible for the given player, updating the source and destination tiles /// state if necessary (number of units, owner, etc.). pub fn reveal_to(&mut self, player: PlayerId) { self.visible_by.insert(player); self.dirty_for.insert(player); } /// Perform a move from a source tile to a destination tile. pub fn attack(&mut self, dst: &mut Tile) -> Result<MoveOutcome, InvalidMove> { if self.is_mountain() { return Err(InvalidMove::FromInvalidTile); } if dst.is_mountain() { return Err(InvalidMove::ToInvalidTile); } if self.units() < 2 { return Err(InvalidMove::NotEnoughUnits); } let attacker = self.owner.ok_or(InvalidMove::SourceTileNotOwned)?; let outcome = match dst.owner { // The destination tile belongs to someone else Some(defender) if defender!= attacker => { // The defender has more units. if dst.units >= self.units - 1 { dst.units -= self.units - 1; MoveOutcome::StatuQuo } // The attacker has more units. Capture the tile. else { dst.units = self.units - 1 - dst.units; dst.owner = self.owner; // We're capturing a general if dst.kind == TileKind::General { // Turn the general into a regular city dst.kind = TileKind::City; MoveOutcome::GeneralCaptured(defender) } // We're capturing a regular tile else { MoveOutcome::TileCaptured(Some(defender)) } } } // The owner is the same for both tiles, just transfer the unit Some(_defender) => { dst.units += self.units - 1; MoveOutcome::StatuQuo } // The destination tile is not owned by anyone. None => { // The destination has more units, we can't capture it if dst.units >= self.units - 1 { dst.units -= self.units - 1; MoveOutcome::StatuQuo } else { dst.units = self.units - 1 - dst.units; dst.owner = self.owner; MoveOutcome::TileCaptured(None) } } }; // In any case, we always only leave 1 unit in the source tile // TODO: would be nice to support splitting the source tile units before moving. self.units = 1; self.set_dirty(); dst.set_dirty(); Ok(outcome) } /// Return the owner of the tile, if any pub fn owner(&self) -> Option<PlayerId> { self.owner } /// Return the number of units occupying the tile pub fn units(&self) -> u16 { self.units } /// Return whether the tile is open. A tile is open if it's not a city, a general or a /// mountain. pub fn is_open(&self) -> bool { self.kind == TileKind::Open } /// Return whether the tile is a general. pub fn is_general(&self) -> bool { self.kind == TileKind::General } /// Return whether the tile is a city. pub fn is_city(&self) -> bool { self.kind == TileKind::City } /// Return whether the tile is a mountain pub fn is_mountain(&self) -> bool { self.kind == TileKind::Mountain } /// Turn the tile into an open tile pub fn make_open(&mut self) { self.kind = TileKind::Open; self.set_dirty(); } pub fn set_dirty(&mut self) { for player_id in self.visible_by.iter() { self.dirty_for.insert(*player_id); } } /// Turn the tile into a general pub fn make_general(&mut self) { self.kind = TileKind::General; self.set_dirty(); } // // FIXME: unused for now, but that's because we don't have city yet // /// Turn the tile into a fortess. // pub fn make_city(&mut self) { // self.kind = TileKind::City; // self.set_dirty(); // } /// Turn the tile into a mountain. pub fn make_mountain(&mut self) { self.kind = TileKind::Mountain; self.set_dirty(); } /// Set the number of units occupying the tile pub fn set_units(&mut self, units: u16) { if self.is_mountain() { return; } self.units = units; self.set_dirty(); } /// Increment the number of units occupying the tile pub fn incr_units(&mut self, units: u16) { if self.is_mountain() { return; } self.units += units; self.set_dirty(); } /// Set the owner of the tile. To remove the existing owner, set the owner to `None`. pub fn set_owner(&mut self, player: Option<PlayerId>) { if self.is_mountain() { return; } // Mark the tile as dirty for the players that have visibility on the tile self.set_dirty(); // Mark the tile as dirty for the previous owner. As owner, it should have visibility on // the tile, so should have been added `dirty_for` already, but let's be safe, it's pretty // cheap. if let Some(owner) = self.owner { self.dirty_for.insert(owner); } self.owner = player; if let Some(owner) = self.owner { self.reveal_to(owner); } } /// Return whether the tile's state has changed. A tile state changes when its type, its owner, /// or the number of units occupying it changes. pub fn is_dirty(&self) -> bool { !self.dirty_for.is_empty() } pub fn is_dirty_for(&self, player_id: PlayerId) -> bool { self.dirty_for.contains(&player_id) } /// Mark the tile a clean. This should be called to acknoledge that the tile has been processed /// when after is was marked as dirty. pub fn set_clean(&mut self) { let _ = self.dirty_for.drain(); } } /// Represent an error that occurs when an invalid move is processed. #[derive(Debug, PartialEq, Eq)] pub enum InvalidMove { /// The source tile does not have enough units to perform the move. To be able to move from one /// tile, the tile must have at least two units. NotEnoughUnits, /// The destination tile is invalid (it can be a mountain or an out-of-grid tile. This occurs /// for instance if the source tile is on the top row, and the move is upward. ToInvalidTile, /// The source tile is either a mountain or out of the grid. FromInvalidTile, /// The source tile does not belong to the player making the move. A move can only be performed /// by a player. SourceTileNotOwned, } use std::error::Error; use std::fmt; impl Error for InvalidMove { fn description(&self) -> &str { match *self { InvalidMove::NotEnoughUnits => "not enough unit on the source tile", InvalidMove::ToInvalidTile => { "the destination tile is either a mountain or not on the map" } InvalidMove::FromInvalidTile => { "the source tile is either a mountain or not on the map" } InvalidMove::SourceTileNotOwned => { "the source tile does not belong to the player making the move" } } } fn cause(&self) -> Option<&Error> { None } } impl fmt::Display for InvalidMove { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Invalid move: {}", self.description()) } }
Right, Left,
random_line_split
common.rs
use std::collections::HashSet; pub type PlayerId = usize; /// Represent a player during a game. #[derive(Serialize, Clone, Debug, PartialEq)] pub struct Player { /// An integer that uniquely identifies each player during a game pub id: PlayerId, /// Number of tiles the player currently owns #[serde(skip_serializing_if = "has_no_tile")] pub owned_tiles: usize, /// Turn at which the player was defeated, if any #[serde(skip_serializing_if = "Option::is_none")] pub defeated_at: Option<usize>, } /// Small helper used by serde to avoid serializing the owned_tile field if the player does not own /// any tile. We try to keep the jsons as small as possible for network efficiency. fn has_no_tile(owned_tiles: &usize) -> bool { *owned_tiles == 0 } impl Player { /// Return a new undefeated player, with no owned tile. pub fn new(id: PlayerId) -> Self { Player { id, owned_tiles: 0, defeated_at: None, } } /// Return whether the player has been defeated already pub fn defeated(&self) -> bool { self.defeated_at.is_some() } /// Return whether the player can move. A player can move if it owns at least one tile, and if /// it has not been defeated. pub fn can_move(&self) -> bool { !self.defeated() && self.owned_tiles > 0 } } /// Represent an action a player can perform. #[derive(Copy, Clone, Debug, Deserialize, Serialize)] #[serde(tag = "type")] #[serde(rename_all = "lowercase")] pub enum Action { /// Resign Resign, /// Cancel all the moves already queued for the player #[serde(rename = "cancel_moves")] CancelMoves, /// Make a move from a tile to another Move(Move), } /// Represent a move from one tile to another. During a move, units are transfered from one tile to /// another adjacent tile. #[derive(Copy, Clone, Debug, Deserialize, Serialize)] pub struct Move { /// Player that is making the move. #[serde(skip)] pub player: PlayerId, /// Index of the tile from which troops are being moved. pub from: usize, /// Direction to which the troops are being moved. pub direction: Direction, } #[derive(Copy, Clone, Debug, PartialEq, Deserialize, Serialize)] #[serde(rename_all = "lowercase")] pub enum Direction { Right, Left, Up, Down, } #[derive(Copy, Clone, Debug, PartialEq)] /// Outcome of a move pub enum MoveOutcome { /// Outcome when a move resulted in a general being captured. The player ID is the ID of the /// defeated player. GeneralCaptured(PlayerId), /// Outcome when a move resulted in an open tile or a city tile being captured. If the tile /// was belonging to a different player than the one making the move, the player's ID is /// specified. TileCaptured(Option<PlayerId>), /// Outcome when a move did not result in a tile being captured. StatuQuo, } /// Represent the different types of open (ie non-mountain) tiles #[derive(Copy, Clone, PartialEq, Debug, Serialize)] #[serde(rename_all = "lowercase")] pub enum TileKind { /// A tile that contains a general General, /// A tile that contains a city City, /// A regular tile Open, /// A tile that contains a mountain Mountain, } /// Represent an open tile. Open tiles are tiles that are not mountains, ie tiles that players can /// conquer. #[derive(Clone, PartialEq, Debug, Serialize)] pub struct Tile { /// The ID of the player that currenlty owns the tile (a player own a tile if he/she has units /// occupying the tile). #[serde(skip_serializing_if = "Option::is_none")] owner: Option<PlayerId>, /// Number of units occupying the tile #[serde(skip_serializing_if = "has_no_unit")] units: u16, /// The type of tile (open, city or general) #[serde(skip_serializing_if = "is_open")] kind: TileKind, /// List of players that can see the tile. To be able to see an open tile, a player must own a /// tile that touches it. #[serde(skip)] visible_by: HashSet<PlayerId>, /// Players that had visibility on this tile when it changed. #[serde(skip)] dirty_for: HashSet<PlayerId>, } /// Small helper used by serde to avoid serializing the `kind` field if the tile if of type /// `TileKind::Open`. We try to keep the jsons as small as possible for network efficiency. fn is_open(kind: &TileKind) -> bool { *kind == TileKind::Open } /// Small helper used by serde to avoid serializing the `units` field if the tile does not have any /// units. We try to keep the jsons as small as possible for network efficiency. fn has_no_unit(units: &u16) -> bool { *units == 0 } impl Tile { /// Return a new open tile or the given type, with no owner, and no unit. pub fn new() -> Self { Tile { owner: None, units: 0, dirty_for: HashSet::new(), visible_by: HashSet::new(), kind: TileKind::Mountain, } } /// Return whether the tile is marked as visible by the given player. pub fn is_visible_by(&self, player: PlayerId) -> bool { self.visible_by.contains(&player) } /// Mark the tile as invisible for the given player pub fn hide_from(&mut self, player: PlayerId) { let was_visible = self.visible_by.remove(&player); if was_visible { self.dirty_for.insert(player); } } /// Mark the tile as visible for the given player, updating the source and destination tiles /// state if necessary (number of units, owner, etc.). pub fn reveal_to(&mut self, player: PlayerId) { self.visible_by.insert(player); self.dirty_for.insert(player); } /// Perform a move from a source tile to a destination tile. pub fn attack(&mut self, dst: &mut Tile) -> Result<MoveOutcome, InvalidMove> { if self.is_mountain() { return Err(InvalidMove::FromInvalidTile); } if dst.is_mountain() { return Err(InvalidMove::ToInvalidTile); } if self.units() < 2 { return Err(InvalidMove::NotEnoughUnits); } let attacker = self.owner.ok_or(InvalidMove::SourceTileNotOwned)?; let outcome = match dst.owner { // The destination tile belongs to someone else Some(defender) if defender!= attacker => { // The defender has more units. if dst.units >= self.units - 1 { dst.units -= self.units - 1; MoveOutcome::StatuQuo } // The attacker has more units. Capture the tile. else { dst.units = self.units - 1 - dst.units; dst.owner = self.owner; // We're capturing a general if dst.kind == TileKind::General { // Turn the general into a regular city dst.kind = TileKind::City; MoveOutcome::GeneralCaptured(defender) } // We're capturing a regular tile else { MoveOutcome::TileCaptured(Some(defender)) } } } // The owner is the same for both tiles, just transfer the unit Some(_defender) => { dst.units += self.units - 1; MoveOutcome::StatuQuo } // The destination tile is not owned by anyone. None => { // The destination has more units, we can't capture it if dst.units >= self.units - 1 { dst.units -= self.units - 1; MoveOutcome::StatuQuo } else { dst.units = self.units - 1 - dst.units; dst.owner = self.owner; MoveOutcome::TileCaptured(None) } } }; // In any case, we always only leave 1 unit in the source tile // TODO: would be nice to support splitting the source tile units before moving. self.units = 1; self.set_dirty(); dst.set_dirty(); Ok(outcome) } /// Return the owner of the tile, if any pub fn owner(&self) -> Option<PlayerId> { self.owner } /// Return the number of units occupying the tile pub fn units(&self) -> u16 { self.units } /// Return whether the tile is open. A tile is open if it's not a city, a general or a /// mountain. pub fn is_open(&self) -> bool { self.kind == TileKind::Open } /// Return whether the tile is a general. pub fn is_general(&self) -> bool { self.kind == TileKind::General } /// Return whether the tile is a city. pub fn is_city(&self) -> bool { self.kind == TileKind::City } /// Return whether the tile is a mountain pub fn is_mountain(&self) -> bool { self.kind == TileKind::Mountain } /// Turn the tile into an open tile pub fn make_open(&mut self) { self.kind = TileKind::Open; self.set_dirty(); } pub fn set_dirty(&mut self) { for player_id in self.visible_by.iter() { self.dirty_for.insert(*player_id); } } /// Turn the tile into a general pub fn make_general(&mut self) { self.kind = TileKind::General; self.set_dirty(); } // // FIXME: unused for now, but that's because we don't have city yet // /// Turn the tile into a fortess. // pub fn make_city(&mut self) { // self.kind = TileKind::City; // self.set_dirty(); // } /// Turn the tile into a mountain. pub fn make_mountain(&mut self) { self.kind = TileKind::Mountain; self.set_dirty(); } /// Set the number of units occupying the tile pub fn set_units(&mut self, units: u16) { if self.is_mountain() { return; } self.units = units; self.set_dirty(); } /// Increment the number of units occupying the tile pub fn incr_units(&mut self, units: u16) { if self.is_mountain() { return; } self.units += units; self.set_dirty(); } /// Set the owner of the tile. To remove the existing owner, set the owner to `None`. pub fn set_owner(&mut self, player: Option<PlayerId>) { if self.is_mountain() { return; } // Mark the tile as dirty for the players that have visibility on the tile self.set_dirty(); // Mark the tile as dirty for the previous owner. As owner, it should have visibility on // the tile, so should have been added `dirty_for` already, but let's be safe, it's pretty // cheap. if let Some(owner) = self.owner { self.dirty_for.insert(owner); } self.owner = player; if let Some(owner) = self.owner { self.reveal_to(owner); } } /// Return whether the tile's state has changed. A tile state changes when its type, its owner, /// or the number of units occupying it changes. pub fn is_dirty(&self) -> bool { !self.dirty_for.is_empty() } pub fn is_dirty_for(&self, player_id: PlayerId) -> bool { self.dirty_for.contains(&player_id) } /// Mark the tile a clean. This should be called to acknoledge that the tile has been processed /// when after is was marked as dirty. pub fn set_clean(&mut self) { let _ = self.dirty_for.drain(); } } /// Represent an error that occurs when an invalid move is processed. #[derive(Debug, PartialEq, Eq)] pub enum InvalidMove { /// The source tile does not have enough units to perform the move. To be able to move from one /// tile, the tile must have at least two units. NotEnoughUnits, /// The destination tile is invalid (it can be a mountain or an out-of-grid tile. This occurs /// for instance if the source tile is on the top row, and the move is upward. ToInvalidTile, /// The source tile is either a mountain or out of the grid. FromInvalidTile, /// The source tile does not belong to the player making the move. A move can only be performed /// by a player. SourceTileNotOwned, } use std::error::Error; use std::fmt; impl Error for InvalidMove { fn description(&self) -> &str { match *self { InvalidMove::NotEnoughUnits => "not enough unit on the source tile", InvalidMove::ToInvalidTile => { "the destination tile is either a mountain or not on the map" } InvalidMove::FromInvalidTile =>
InvalidMove::SourceTileNotOwned => { "the source tile does not belong to the player making the move" } } } fn cause(&self) -> Option<&Error> { None } } impl fmt::Display for InvalidMove { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Invalid move: {}", self.description()) } }
{ "the source tile is either a mountain or not on the map" }
conditional_block
common.rs
use std::collections::HashSet; pub type PlayerId = usize; /// Represent a player during a game. #[derive(Serialize, Clone, Debug, PartialEq)] pub struct Player { /// An integer that uniquely identifies each player during a game pub id: PlayerId, /// Number of tiles the player currently owns #[serde(skip_serializing_if = "has_no_tile")] pub owned_tiles: usize, /// Turn at which the player was defeated, if any #[serde(skip_serializing_if = "Option::is_none")] pub defeated_at: Option<usize>, } /// Small helper used by serde to avoid serializing the owned_tile field if the player does not own /// any tile. We try to keep the jsons as small as possible for network efficiency. fn has_no_tile(owned_tiles: &usize) -> bool { *owned_tiles == 0 } impl Player { /// Return a new undefeated player, with no owned tile. pub fn new(id: PlayerId) -> Self { Player { id, owned_tiles: 0, defeated_at: None, } } /// Return whether the player has been defeated already pub fn defeated(&self) -> bool { self.defeated_at.is_some() } /// Return whether the player can move. A player can move if it owns at least one tile, and if /// it has not been defeated. pub fn can_move(&self) -> bool { !self.defeated() && self.owned_tiles > 0 } } /// Represent an action a player can perform. #[derive(Copy, Clone, Debug, Deserialize, Serialize)] #[serde(tag = "type")] #[serde(rename_all = "lowercase")] pub enum Action { /// Resign Resign, /// Cancel all the moves already queued for the player #[serde(rename = "cancel_moves")] CancelMoves, /// Make a move from a tile to another Move(Move), } /// Represent a move from one tile to another. During a move, units are transfered from one tile to /// another adjacent tile. #[derive(Copy, Clone, Debug, Deserialize, Serialize)] pub struct Move { /// Player that is making the move. #[serde(skip)] pub player: PlayerId, /// Index of the tile from which troops are being moved. pub from: usize, /// Direction to which the troops are being moved. pub direction: Direction, } #[derive(Copy, Clone, Debug, PartialEq, Deserialize, Serialize)] #[serde(rename_all = "lowercase")] pub enum Direction { Right, Left, Up, Down, } #[derive(Copy, Clone, Debug, PartialEq)] /// Outcome of a move pub enum MoveOutcome { /// Outcome when a move resulted in a general being captured. The player ID is the ID of the /// defeated player. GeneralCaptured(PlayerId), /// Outcome when a move resulted in an open tile or a city tile being captured. If the tile /// was belonging to a different player than the one making the move, the player's ID is /// specified. TileCaptured(Option<PlayerId>), /// Outcome when a move did not result in a tile being captured. StatuQuo, } /// Represent the different types of open (ie non-mountain) tiles #[derive(Copy, Clone, PartialEq, Debug, Serialize)] #[serde(rename_all = "lowercase")] pub enum TileKind { /// A tile that contains a general General, /// A tile that contains a city City, /// A regular tile Open, /// A tile that contains a mountain Mountain, } /// Represent an open tile. Open tiles are tiles that are not mountains, ie tiles that players can /// conquer. #[derive(Clone, PartialEq, Debug, Serialize)] pub struct Tile { /// The ID of the player that currenlty owns the tile (a player own a tile if he/she has units /// occupying the tile). #[serde(skip_serializing_if = "Option::is_none")] owner: Option<PlayerId>, /// Number of units occupying the tile #[serde(skip_serializing_if = "has_no_unit")] units: u16, /// The type of tile (open, city or general) #[serde(skip_serializing_if = "is_open")] kind: TileKind, /// List of players that can see the tile. To be able to see an open tile, a player must own a /// tile that touches it. #[serde(skip)] visible_by: HashSet<PlayerId>, /// Players that had visibility on this tile when it changed. #[serde(skip)] dirty_for: HashSet<PlayerId>, } /// Small helper used by serde to avoid serializing the `kind` field if the tile if of type /// `TileKind::Open`. We try to keep the jsons as small as possible for network efficiency. fn is_open(kind: &TileKind) -> bool { *kind == TileKind::Open } /// Small helper used by serde to avoid serializing the `units` field if the tile does not have any /// units. We try to keep the jsons as small as possible for network efficiency. fn has_no_unit(units: &u16) -> bool { *units == 0 } impl Tile { /// Return a new open tile or the given type, with no owner, and no unit. pub fn new() -> Self { Tile { owner: None, units: 0, dirty_for: HashSet::new(), visible_by: HashSet::new(), kind: TileKind::Mountain, } } /// Return whether the tile is marked as visible by the given player. pub fn is_visible_by(&self, player: PlayerId) -> bool { self.visible_by.contains(&player) } /// Mark the tile as invisible for the given player pub fn
(&mut self, player: PlayerId) { let was_visible = self.visible_by.remove(&player); if was_visible { self.dirty_for.insert(player); } } /// Mark the tile as visible for the given player, updating the source and destination tiles /// state if necessary (number of units, owner, etc.). pub fn reveal_to(&mut self, player: PlayerId) { self.visible_by.insert(player); self.dirty_for.insert(player); } /// Perform a move from a source tile to a destination tile. pub fn attack(&mut self, dst: &mut Tile) -> Result<MoveOutcome, InvalidMove> { if self.is_mountain() { return Err(InvalidMove::FromInvalidTile); } if dst.is_mountain() { return Err(InvalidMove::ToInvalidTile); } if self.units() < 2 { return Err(InvalidMove::NotEnoughUnits); } let attacker = self.owner.ok_or(InvalidMove::SourceTileNotOwned)?; let outcome = match dst.owner { // The destination tile belongs to someone else Some(defender) if defender!= attacker => { // The defender has more units. if dst.units >= self.units - 1 { dst.units -= self.units - 1; MoveOutcome::StatuQuo } // The attacker has more units. Capture the tile. else { dst.units = self.units - 1 - dst.units; dst.owner = self.owner; // We're capturing a general if dst.kind == TileKind::General { // Turn the general into a regular city dst.kind = TileKind::City; MoveOutcome::GeneralCaptured(defender) } // We're capturing a regular tile else { MoveOutcome::TileCaptured(Some(defender)) } } } // The owner is the same for both tiles, just transfer the unit Some(_defender) => { dst.units += self.units - 1; MoveOutcome::StatuQuo } // The destination tile is not owned by anyone. None => { // The destination has more units, we can't capture it if dst.units >= self.units - 1 { dst.units -= self.units - 1; MoveOutcome::StatuQuo } else { dst.units = self.units - 1 - dst.units; dst.owner = self.owner; MoveOutcome::TileCaptured(None) } } }; // In any case, we always only leave 1 unit in the source tile // TODO: would be nice to support splitting the source tile units before moving. self.units = 1; self.set_dirty(); dst.set_dirty(); Ok(outcome) } /// Return the owner of the tile, if any pub fn owner(&self) -> Option<PlayerId> { self.owner } /// Return the number of units occupying the tile pub fn units(&self) -> u16 { self.units } /// Return whether the tile is open. A tile is open if it's not a city, a general or a /// mountain. pub fn is_open(&self) -> bool { self.kind == TileKind::Open } /// Return whether the tile is a general. pub fn is_general(&self) -> bool { self.kind == TileKind::General } /// Return whether the tile is a city. pub fn is_city(&self) -> bool { self.kind == TileKind::City } /// Return whether the tile is a mountain pub fn is_mountain(&self) -> bool { self.kind == TileKind::Mountain } /// Turn the tile into an open tile pub fn make_open(&mut self) { self.kind = TileKind::Open; self.set_dirty(); } pub fn set_dirty(&mut self) { for player_id in self.visible_by.iter() { self.dirty_for.insert(*player_id); } } /// Turn the tile into a general pub fn make_general(&mut self) { self.kind = TileKind::General; self.set_dirty(); } // // FIXME: unused for now, but that's because we don't have city yet // /// Turn the tile into a fortess. // pub fn make_city(&mut self) { // self.kind = TileKind::City; // self.set_dirty(); // } /// Turn the tile into a mountain. pub fn make_mountain(&mut self) { self.kind = TileKind::Mountain; self.set_dirty(); } /// Set the number of units occupying the tile pub fn set_units(&mut self, units: u16) { if self.is_mountain() { return; } self.units = units; self.set_dirty(); } /// Increment the number of units occupying the tile pub fn incr_units(&mut self, units: u16) { if self.is_mountain() { return; } self.units += units; self.set_dirty(); } /// Set the owner of the tile. To remove the existing owner, set the owner to `None`. pub fn set_owner(&mut self, player: Option<PlayerId>) { if self.is_mountain() { return; } // Mark the tile as dirty for the players that have visibility on the tile self.set_dirty(); // Mark the tile as dirty for the previous owner. As owner, it should have visibility on // the tile, so should have been added `dirty_for` already, but let's be safe, it's pretty // cheap. if let Some(owner) = self.owner { self.dirty_for.insert(owner); } self.owner = player; if let Some(owner) = self.owner { self.reveal_to(owner); } } /// Return whether the tile's state has changed. A tile state changes when its type, its owner, /// or the number of units occupying it changes. pub fn is_dirty(&self) -> bool { !self.dirty_for.is_empty() } pub fn is_dirty_for(&self, player_id: PlayerId) -> bool { self.dirty_for.contains(&player_id) } /// Mark the tile a clean. This should be called to acknoledge that the tile has been processed /// when after is was marked as dirty. pub fn set_clean(&mut self) { let _ = self.dirty_for.drain(); } } /// Represent an error that occurs when an invalid move is processed. #[derive(Debug, PartialEq, Eq)] pub enum InvalidMove { /// The source tile does not have enough units to perform the move. To be able to move from one /// tile, the tile must have at least two units. NotEnoughUnits, /// The destination tile is invalid (it can be a mountain or an out-of-grid tile. This occurs /// for instance if the source tile is on the top row, and the move is upward. ToInvalidTile, /// The source tile is either a mountain or out of the grid. FromInvalidTile, /// The source tile does not belong to the player making the move. A move can only be performed /// by a player. SourceTileNotOwned, } use std::error::Error; use std::fmt; impl Error for InvalidMove { fn description(&self) -> &str { match *self { InvalidMove::NotEnoughUnits => "not enough unit on the source tile", InvalidMove::ToInvalidTile => { "the destination tile is either a mountain or not on the map" } InvalidMove::FromInvalidTile => { "the source tile is either a mountain or not on the map" } InvalidMove::SourceTileNotOwned => { "the source tile does not belong to the player making the move" } } } fn cause(&self) -> Option<&Error> { None } } impl fmt::Display for InvalidMove { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Invalid move: {}", self.description()) } }
hide_from
identifier_name
common.rs
use std::collections::HashSet; pub type PlayerId = usize; /// Represent a player during a game. #[derive(Serialize, Clone, Debug, PartialEq)] pub struct Player { /// An integer that uniquely identifies each player during a game pub id: PlayerId, /// Number of tiles the player currently owns #[serde(skip_serializing_if = "has_no_tile")] pub owned_tiles: usize, /// Turn at which the player was defeated, if any #[serde(skip_serializing_if = "Option::is_none")] pub defeated_at: Option<usize>, } /// Small helper used by serde to avoid serializing the owned_tile field if the player does not own /// any tile. We try to keep the jsons as small as possible for network efficiency. fn has_no_tile(owned_tiles: &usize) -> bool { *owned_tiles == 0 } impl Player { /// Return a new undefeated player, with no owned tile. pub fn new(id: PlayerId) -> Self { Player { id, owned_tiles: 0, defeated_at: None, } } /// Return whether the player has been defeated already pub fn defeated(&self) -> bool { self.defeated_at.is_some() } /// Return whether the player can move. A player can move if it owns at least one tile, and if /// it has not been defeated. pub fn can_move(&self) -> bool { !self.defeated() && self.owned_tiles > 0 } } /// Represent an action a player can perform. #[derive(Copy, Clone, Debug, Deserialize, Serialize)] #[serde(tag = "type")] #[serde(rename_all = "lowercase")] pub enum Action { /// Resign Resign, /// Cancel all the moves already queued for the player #[serde(rename = "cancel_moves")] CancelMoves, /// Make a move from a tile to another Move(Move), } /// Represent a move from one tile to another. During a move, units are transfered from one tile to /// another adjacent tile. #[derive(Copy, Clone, Debug, Deserialize, Serialize)] pub struct Move { /// Player that is making the move. #[serde(skip)] pub player: PlayerId, /// Index of the tile from which troops are being moved. pub from: usize, /// Direction to which the troops are being moved. pub direction: Direction, } #[derive(Copy, Clone, Debug, PartialEq, Deserialize, Serialize)] #[serde(rename_all = "lowercase")] pub enum Direction { Right, Left, Up, Down, } #[derive(Copy, Clone, Debug, PartialEq)] /// Outcome of a move pub enum MoveOutcome { /// Outcome when a move resulted in a general being captured. The player ID is the ID of the /// defeated player. GeneralCaptured(PlayerId), /// Outcome when a move resulted in an open tile or a city tile being captured. If the tile /// was belonging to a different player than the one making the move, the player's ID is /// specified. TileCaptured(Option<PlayerId>), /// Outcome when a move did not result in a tile being captured. StatuQuo, } /// Represent the different types of open (ie non-mountain) tiles #[derive(Copy, Clone, PartialEq, Debug, Serialize)] #[serde(rename_all = "lowercase")] pub enum TileKind { /// A tile that contains a general General, /// A tile that contains a city City, /// A regular tile Open, /// A tile that contains a mountain Mountain, } /// Represent an open tile. Open tiles are tiles that are not mountains, ie tiles that players can /// conquer. #[derive(Clone, PartialEq, Debug, Serialize)] pub struct Tile { /// The ID of the player that currenlty owns the tile (a player own a tile if he/she has units /// occupying the tile). #[serde(skip_serializing_if = "Option::is_none")] owner: Option<PlayerId>, /// Number of units occupying the tile #[serde(skip_serializing_if = "has_no_unit")] units: u16, /// The type of tile (open, city or general) #[serde(skip_serializing_if = "is_open")] kind: TileKind, /// List of players that can see the tile. To be able to see an open tile, a player must own a /// tile that touches it. #[serde(skip)] visible_by: HashSet<PlayerId>, /// Players that had visibility on this tile when it changed. #[serde(skip)] dirty_for: HashSet<PlayerId>, } /// Small helper used by serde to avoid serializing the `kind` field if the tile if of type /// `TileKind::Open`. We try to keep the jsons as small as possible for network efficiency. fn is_open(kind: &TileKind) -> bool { *kind == TileKind::Open } /// Small helper used by serde to avoid serializing the `units` field if the tile does not have any /// units. We try to keep the jsons as small as possible for network efficiency. fn has_no_unit(units: &u16) -> bool { *units == 0 } impl Tile { /// Return a new open tile or the given type, with no owner, and no unit. pub fn new() -> Self { Tile { owner: None, units: 0, dirty_for: HashSet::new(), visible_by: HashSet::new(), kind: TileKind::Mountain, } } /// Return whether the tile is marked as visible by the given player. pub fn is_visible_by(&self, player: PlayerId) -> bool { self.visible_by.contains(&player) } /// Mark the tile as invisible for the given player pub fn hide_from(&mut self, player: PlayerId) { let was_visible = self.visible_by.remove(&player); if was_visible { self.dirty_for.insert(player); } } /// Mark the tile as visible for the given player, updating the source and destination tiles /// state if necessary (number of units, owner, etc.). pub fn reveal_to(&mut self, player: PlayerId) { self.visible_by.insert(player); self.dirty_for.insert(player); } /// Perform a move from a source tile to a destination tile. pub fn attack(&mut self, dst: &mut Tile) -> Result<MoveOutcome, InvalidMove>
// The attacker has more units. Capture the tile. else { dst.units = self.units - 1 - dst.units; dst.owner = self.owner; // We're capturing a general if dst.kind == TileKind::General { // Turn the general into a regular city dst.kind = TileKind::City; MoveOutcome::GeneralCaptured(defender) } // We're capturing a regular tile else { MoveOutcome::TileCaptured(Some(defender)) } } } // The owner is the same for both tiles, just transfer the unit Some(_defender) => { dst.units += self.units - 1; MoveOutcome::StatuQuo } // The destination tile is not owned by anyone. None => { // The destination has more units, we can't capture it if dst.units >= self.units - 1 { dst.units -= self.units - 1; MoveOutcome::StatuQuo } else { dst.units = self.units - 1 - dst.units; dst.owner = self.owner; MoveOutcome::TileCaptured(None) } } }; // In any case, we always only leave 1 unit in the source tile // TODO: would be nice to support splitting the source tile units before moving. self.units = 1; self.set_dirty(); dst.set_dirty(); Ok(outcome) } /// Return the owner of the tile, if any pub fn owner(&self) -> Option<PlayerId> { self.owner } /// Return the number of units occupying the tile pub fn units(&self) -> u16 { self.units } /// Return whether the tile is open. A tile is open if it's not a city, a general or a /// mountain. pub fn is_open(&self) -> bool { self.kind == TileKind::Open } /// Return whether the tile is a general. pub fn is_general(&self) -> bool { self.kind == TileKind::General } /// Return whether the tile is a city. pub fn is_city(&self) -> bool { self.kind == TileKind::City } /// Return whether the tile is a mountain pub fn is_mountain(&self) -> bool { self.kind == TileKind::Mountain } /// Turn the tile into an open tile pub fn make_open(&mut self) { self.kind = TileKind::Open; self.set_dirty(); } pub fn set_dirty(&mut self) { for player_id in self.visible_by.iter() { self.dirty_for.insert(*player_id); } } /// Turn the tile into a general pub fn make_general(&mut self) { self.kind = TileKind::General; self.set_dirty(); } // // FIXME: unused for now, but that's because we don't have city yet // /// Turn the tile into a fortess. // pub fn make_city(&mut self) { // self.kind = TileKind::City; // self.set_dirty(); // } /// Turn the tile into a mountain. pub fn make_mountain(&mut self) { self.kind = TileKind::Mountain; self.set_dirty(); } /// Set the number of units occupying the tile pub fn set_units(&mut self, units: u16) { if self.is_mountain() { return; } self.units = units; self.set_dirty(); } /// Increment the number of units occupying the tile pub fn incr_units(&mut self, units: u16) { if self.is_mountain() { return; } self.units += units; self.set_dirty(); } /// Set the owner of the tile. To remove the existing owner, set the owner to `None`. pub fn set_owner(&mut self, player: Option<PlayerId>) { if self.is_mountain() { return; } // Mark the tile as dirty for the players that have visibility on the tile self.set_dirty(); // Mark the tile as dirty for the previous owner. As owner, it should have visibility on // the tile, so should have been added `dirty_for` already, but let's be safe, it's pretty // cheap. if let Some(owner) = self.owner { self.dirty_for.insert(owner); } self.owner = player; if let Some(owner) = self.owner { self.reveal_to(owner); } } /// Return whether the tile's state has changed. A tile state changes when its type, its owner, /// or the number of units occupying it changes. pub fn is_dirty(&self) -> bool { !self.dirty_for.is_empty() } pub fn is_dirty_for(&self, player_id: PlayerId) -> bool { self.dirty_for.contains(&player_id) } /// Mark the tile a clean. This should be called to acknoledge that the tile has been processed /// when after is was marked as dirty. pub fn set_clean(&mut self) { let _ = self.dirty_for.drain(); } } /// Represent an error that occurs when an invalid move is processed. #[derive(Debug, PartialEq, Eq)] pub enum InvalidMove { /// The source tile does not have enough units to perform the move. To be able to move from one /// tile, the tile must have at least two units. NotEnoughUnits, /// The destination tile is invalid (it can be a mountain or an out-of-grid tile. This occurs /// for instance if the source tile is on the top row, and the move is upward. ToInvalidTile, /// The source tile is either a mountain or out of the grid. FromInvalidTile, /// The source tile does not belong to the player making the move. A move can only be performed /// by a player. SourceTileNotOwned, } use std::error::Error; use std::fmt; impl Error for InvalidMove { fn description(&self) -> &str { match *self { InvalidMove::NotEnoughUnits => "not enough unit on the source tile", InvalidMove::ToInvalidTile => { "the destination tile is either a mountain or not on the map" } InvalidMove::FromInvalidTile => { "the source tile is either a mountain or not on the map" } InvalidMove::SourceTileNotOwned => { "the source tile does not belong to the player making the move" } } } fn cause(&self) -> Option<&Error> { None } } impl fmt::Display for InvalidMove { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Invalid move: {}", self.description()) } }
{ if self.is_mountain() { return Err(InvalidMove::FromInvalidTile); } if dst.is_mountain() { return Err(InvalidMove::ToInvalidTile); } if self.units() < 2 { return Err(InvalidMove::NotEnoughUnits); } let attacker = self.owner.ok_or(InvalidMove::SourceTileNotOwned)?; let outcome = match dst.owner { // The destination tile belongs to someone else Some(defender) if defender != attacker => { // The defender has more units. if dst.units >= self.units - 1 { dst.units -= self.units - 1; MoveOutcome::StatuQuo }
identifier_body
from_str.rs
use fen4::{Position, PositionParseError}; use std::str::FromStr; use crate::types::*; use thiserror::Error; #[derive(Error, PartialEq, Clone, Debug)] pub enum MoveError { #[error("Basic move is malformed.")] Other, #[error("A move starts with O-O, but is not a correct type of move.")] Castle, #[error("Unable to parse basic move because {0}")] PositionInvalid(#[from] PositionParseError), } impl FromStr for BasicMove { type Err = MoveError; fn from_str(string: &str) -> Result<Self, Self::Err> { let mut iter = string.chars(); let start = iter.next().ok_or(MoveError::Other)?; let (piece, pieceless) = if start.is_ascii_lowercase() { ('P', string) } else { (start, iter.as_str()) }; let mateless = pieceless.trim_end_matches('#'); let checkless = mateless.trim_end_matches('+'); let mates = pieceless.len() - mateless.len(); let checks = mateless.len() - checkless.len(); let (two_pos, promotion) = if let Some(equals) = checkless.find('=') { let (left_over, promote) = checkless.split_at(equals); let mut iter = promote.chars(); if iter.next()!= Some('=') { return Err(MoveError::Other); } let p = iter.next().ok_or(MoveError::Other)?; if iter.next().is_some() { return Err(MoveError::Other); } (left_over, Some(p)) } else { (checkless, None) }; let loc = if let Some(dash) = two_pos.find('-') { dash } else if let Some(x) = two_pos.find('x') { x } else { return Err(MoveError::Other); }; let (left, tmp) = two_pos.split_at(loc); let (mid, mut right) = tmp.split_at(1); // x and - are both ascii and therefore 1 byte let from = left.parse::<Position>()?; let captured = if mid == "x" { let mut iter = right.chars(); let start = iter.next().ok_or(MoveError::Other)?; Some(if start.is_ascii_lowercase() { 'P' } else { right = iter.as_str(); start }) } else { None }; let to = right.parse::<Position>()?; Ok(BasicMove { piece, from, captured, to, promotion, checks, mates, }) } } impl FromStr for Move { type Err = MoveError; fn from_str(string: &str) -> Result<Self, Self::Err> { use Move::*; Ok(match string { "#" => Checkmate, "S" => Stalemate, "T" => Timeout, "R" => Resign, s if s.starts_with("O-O") => { let mateless = s.trim_end_matches('#'); let mates = s.len() - mateless.len(); match mateless { "O-O-O" => QueenCastle(mates), "O-O" => KingCastle(mates), _ => return Err(MoveError::Castle), } } _ => Normal(string.parse::<BasicMove>()?), }) } } struct MovePair { main: Move, modifier: Option<Move>, stalemate: bool, } impl FromStr for MovePair { type Err = MoveError; fn from_str(string: &str) -> Result<Self, Self::Err> { let mut stalemate = false; let break_index = if string.len() == 2 { 1 // No move is 2 bytes long } else if string.len() > 2 { if string.ends_with("RS") &&!string.ends_with("=RS") || string.ends_with("TS") &&!string.ends_with("=TS") { stalemate = true; string.len() - 2 } else if (string.ends_with('R') &&!string.ends_with("=R")) || (string.ends_with('S') &&!string.ends_with("=S")) || (string.ends_with('T') &&!string.ends_with("=T")) { string.len() - 1 } else { 0 } } else { 0 }; Ok(if break_index == 0 { Self { main: string.parse()?, modifier: None, stalemate, } } else { Self { main: string.get(..break_index).ok_or(MoveError::Other)?.parse()?, modifier: Some( string .get(break_index..(break_index + 1)) .ok_or(MoveError::Other)? .parse()?, ), stalemate, } }) } } #[derive(PartialEq, Clone, Debug)] enum IntermediateError { Other(usize), TurnNumber(usize), TurnNumberParse(usize, String), TurnTooLong(usize), MoveErr(MoveError, String, usize), Description(usize), } fn parse_quarter(string: &str) -> Result<(QuarterTurn, &str), IntermediateError> { /// Generally the move is bounded by whitespace, but supporting pgns that don't /// have all the neccessary whitespace is good. Notably, whitespace before a new /// line number is critical. fn next_move(c: char) -> bool { c.is_whitespace() || match c { '.' | '{' | '(' | ')' => true, _ => false, } } use IntermediateError::*; let trimmed = string.trim_start(); if trimmed == "" { return Err(Other(trimmed.len())); } let split = trimmed.find(next_move).unwrap_or(string.len() - 1); let (main_str, mut rest) = trimmed.split_at(split); let move_pair = main_str .trim() .parse::<MovePair>() .map_err(|m| MoveErr(m, main_str.to_owned(), rest.len()))?; let mut description = None; let mut alternatives = Vec::new(); rest = rest.trim_start(); if let Some(c) = rest.chars().next() { if c == '{' { let desc_end = rest.find('}').ok_or(Description(rest.len()))?; let (mut desc_str, rest_tmp) = rest.split_at(desc_end + 1); desc_str = desc_str.strip_prefix("{ ").ok_or(Description(rest.len()))?; desc_str = desc_str.strip_suffix(" }").ok_or(Description(rest.len()))?; description = Some(desc_str.to_owned()); rest = rest_tmp; } } else { return Ok(( QuarterTurn { main: move_pair.main, modifier: move_pair.modifier, extra_stalemate: move_pair.stalemate, description, alternatives, }, rest, )); }; rest = rest.trim_start(); while let Some(rest_tmp) = rest.strip_prefix('(') { rest = rest_tmp; let mut turns = Vec::new(); while rest.chars().next()!= Some(')') { let (turn, rest_tmp) = parse_turn(rest)?; rest = rest_tmp; turns.push(turn); } rest = rest.strip_prefix(')').unwrap().trim_start(); alternatives.push(turns); } Ok(( QuarterTurn { main: move_pair.main, modifier: move_pair.modifier, extra_stalemate: move_pair.stalemate, description, alternatives, }, rest, )) } fn
(string: &str) -> Result<(Turn, &str), IntermediateError> { use IntermediateError::*; let trimmed = string.trim_start(); let dot_loc = trimmed.find('.').ok_or(TurnNumber(trimmed.len()))?; let (number_str, dots) = trimmed.split_at(dot_loc); let number = if number_str == "" { 0 } else { number_str .parse() .map_err(|_| TurnNumberParse(trimmed.len(), number_str.to_string()))? }; let dot = dots.strip_prefix('.').unwrap(); let (mut rest, double_dot) = if let Some(dotted) = dot.strip_prefix('.') { (dotted, true) } else { (dot, false) }; let mut turns = Vec::new(); let for_error = rest.len(); let (qturn, rest_tmp) = parse_quarter(rest)?; rest = rest_tmp.trim_start(); turns.push(qturn); while let Some(rest_tmp) = rest.strip_prefix("..") { if turns.len() >= 4 { return Err(TurnTooLong(for_error)); } let (qturn, rest_tmp) = parse_quarter(rest_tmp)?; rest = rest_tmp.trim_start(); turns.push(qturn); } Ok(( Turn { number, double_dot, turns, }, rest, )) } #[derive(Error, PartialEq, Clone, Debug)] pub enum PGN4Error { #[error("Some error occured at {0}")] Other(ErrorLocation), #[error("Expected a turn number starting at {0}, but there isn't a dot")] TurnNumber(ErrorLocation), #[error("Turn number at {0} is malformed \"{1}\" should be a number or \"\"")] TurnNumberParse(ErrorLocation, String), #[error("More than 4 quarter turns are present in the turn starting at {0}")] TurnTooLong(ErrorLocation), #[error("Tag starting at {0} is malformed")] BadTagged(ErrorLocation), #[error("Move \"{1}\" at {2} failed to parse. {0}")] BadMove(MoveError, String, ErrorLocation), #[error("Description starting at {0} is malformed")] BadDescription(ErrorLocation), } #[derive(PartialEq, Clone, Debug)] pub struct ErrorLocation { pub line: usize, pub column: usize, pub raw_offset: usize, } impl std::fmt::Display for ErrorLocation { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "line {} column {}", self.line, self.column) } } impl FromStr for PGN4 { type Err = PGN4Error; fn from_str(string: &str) -> Result<Self, Self::Err> { let mut bracketed = Vec::new(); let mut rest = string; while let Some(rest_tmp) = rest.strip_prefix('[') { let label_end = rest_tmp.find(|c: char| c.is_whitespace()).unwrap_or(0); let (label, middle) = rest_tmp.split_at(label_end); rest = middle .trim_start() .strip_prefix('"') .ok_or_else(|| make_tagged(rest_tmp, string))?; let value_end = rest .find('"') .ok_or_else(|| make_tagged(rest_tmp, string))?; let (value, end) = rest.split_at(value_end); rest = end .strip_prefix("\"]") .ok_or_else(|| make_tagged(rest_tmp, string))? .trim_start(); bracketed.push((label.to_owned(), value.to_owned())); } let mut turns = Vec::new(); while rest!= "" { let (turn, rest_tmp) = parse_turn(rest).map_err(|ie| add_details(ie, string))?; rest = rest_tmp; turns.push(turn); } Ok(PGN4 { bracketed, turns }) } } fn map_location(bytes_left: usize, base: &str) -> ErrorLocation { let front = base.split_at(base.len() - bytes_left).0; let from_last_newline = front.lines().last().unwrap(); let line = front.lines().count(); ErrorLocation { line, column: from_last_newline.chars().count(), raw_offset: front.len(), } } fn make_tagged(rest: &str, string: &str) -> PGN4Error { PGN4Error::BadTagged(map_location(rest.len(), string)) } fn add_details(ie: IntermediateError, string: &str) -> PGN4Error { use IntermediateError::*; match ie { Other(r) => PGN4Error::Other(map_location(r, string)), TurnNumber(r) => PGN4Error::TurnNumber(map_location(r, string)), TurnNumberParse(r, num) => PGN4Error::TurnNumberParse(map_location(r, string), num), TurnTooLong(r) => PGN4Error::TurnTooLong(map_location(r, string)), MoveErr(m, e, r) => PGN4Error::BadMove(m, e, map_location(r, string)), Description(r) => PGN4Error::BadDescription(map_location(r, string)), } }
parse_turn
identifier_name
from_str.rs
use fen4::{Position, PositionParseError}; use std::str::FromStr; use crate::types::*; use thiserror::Error; #[derive(Error, PartialEq, Clone, Debug)] pub enum MoveError { #[error("Basic move is malformed.")] Other, #[error("A move starts with O-O, but is not a correct type of move.")] Castle, #[error("Unable to parse basic move because {0}")] PositionInvalid(#[from] PositionParseError), } impl FromStr for BasicMove { type Err = MoveError; fn from_str(string: &str) -> Result<Self, Self::Err> { let mut iter = string.chars(); let start = iter.next().ok_or(MoveError::Other)?; let (piece, pieceless) = if start.is_ascii_lowercase() { ('P', string) } else { (start, iter.as_str()) }; let mateless = pieceless.trim_end_matches('#'); let checkless = mateless.trim_end_matches('+'); let mates = pieceless.len() - mateless.len(); let checks = mateless.len() - checkless.len(); let (two_pos, promotion) = if let Some(equals) = checkless.find('=') { let (left_over, promote) = checkless.split_at(equals); let mut iter = promote.chars(); if iter.next()!= Some('=') { return Err(MoveError::Other); } let p = iter.next().ok_or(MoveError::Other)?; if iter.next().is_some() { return Err(MoveError::Other); } (left_over, Some(p)) } else { (checkless, None) }; let loc = if let Some(dash) = two_pos.find('-') { dash } else if let Some(x) = two_pos.find('x') { x } else { return Err(MoveError::Other); }; let (left, tmp) = two_pos.split_at(loc); let (mid, mut right) = tmp.split_at(1); // x and - are both ascii and therefore 1 byte let from = left.parse::<Position>()?; let captured = if mid == "x" { let mut iter = right.chars(); let start = iter.next().ok_or(MoveError::Other)?; Some(if start.is_ascii_lowercase() { 'P' } else { right = iter.as_str(); start }) } else { None }; let to = right.parse::<Position>()?; Ok(BasicMove { piece, from, captured, to, promotion, checks, mates, }) } } impl FromStr for Move { type Err = MoveError; fn from_str(string: &str) -> Result<Self, Self::Err> { use Move::*; Ok(match string { "#" => Checkmate, "S" => Stalemate, "T" => Timeout, "R" => Resign, s if s.starts_with("O-O") =>
_ => Normal(string.parse::<BasicMove>()?), }) } } struct MovePair { main: Move, modifier: Option<Move>, stalemate: bool, } impl FromStr for MovePair { type Err = MoveError; fn from_str(string: &str) -> Result<Self, Self::Err> { let mut stalemate = false; let break_index = if string.len() == 2 { 1 // No move is 2 bytes long } else if string.len() > 2 { if string.ends_with("RS") &&!string.ends_with("=RS") || string.ends_with("TS") &&!string.ends_with("=TS") { stalemate = true; string.len() - 2 } else if (string.ends_with('R') &&!string.ends_with("=R")) || (string.ends_with('S') &&!string.ends_with("=S")) || (string.ends_with('T') &&!string.ends_with("=T")) { string.len() - 1 } else { 0 } } else { 0 }; Ok(if break_index == 0 { Self { main: string.parse()?, modifier: None, stalemate, } } else { Self { main: string.get(..break_index).ok_or(MoveError::Other)?.parse()?, modifier: Some( string .get(break_index..(break_index + 1)) .ok_or(MoveError::Other)? .parse()?, ), stalemate, } }) } } #[derive(PartialEq, Clone, Debug)] enum IntermediateError { Other(usize), TurnNumber(usize), TurnNumberParse(usize, String), TurnTooLong(usize), MoveErr(MoveError, String, usize), Description(usize), } fn parse_quarter(string: &str) -> Result<(QuarterTurn, &str), IntermediateError> { /// Generally the move is bounded by whitespace, but supporting pgns that don't /// have all the neccessary whitespace is good. Notably, whitespace before a new /// line number is critical. fn next_move(c: char) -> bool { c.is_whitespace() || match c { '.' | '{' | '(' | ')' => true, _ => false, } } use IntermediateError::*; let trimmed = string.trim_start(); if trimmed == "" { return Err(Other(trimmed.len())); } let split = trimmed.find(next_move).unwrap_or(string.len() - 1); let (main_str, mut rest) = trimmed.split_at(split); let move_pair = main_str .trim() .parse::<MovePair>() .map_err(|m| MoveErr(m, main_str.to_owned(), rest.len()))?; let mut description = None; let mut alternatives = Vec::new(); rest = rest.trim_start(); if let Some(c) = rest.chars().next() { if c == '{' { let desc_end = rest.find('}').ok_or(Description(rest.len()))?; let (mut desc_str, rest_tmp) = rest.split_at(desc_end + 1); desc_str = desc_str.strip_prefix("{ ").ok_or(Description(rest.len()))?; desc_str = desc_str.strip_suffix(" }").ok_or(Description(rest.len()))?; description = Some(desc_str.to_owned()); rest = rest_tmp; } } else { return Ok(( QuarterTurn { main: move_pair.main, modifier: move_pair.modifier, extra_stalemate: move_pair.stalemate, description, alternatives, }, rest, )); }; rest = rest.trim_start(); while let Some(rest_tmp) = rest.strip_prefix('(') { rest = rest_tmp; let mut turns = Vec::new(); while rest.chars().next()!= Some(')') { let (turn, rest_tmp) = parse_turn(rest)?; rest = rest_tmp; turns.push(turn); } rest = rest.strip_prefix(')').unwrap().trim_start(); alternatives.push(turns); } Ok(( QuarterTurn { main: move_pair.main, modifier: move_pair.modifier, extra_stalemate: move_pair.stalemate, description, alternatives, }, rest, )) } fn parse_turn(string: &str) -> Result<(Turn, &str), IntermediateError> { use IntermediateError::*; let trimmed = string.trim_start(); let dot_loc = trimmed.find('.').ok_or(TurnNumber(trimmed.len()))?; let (number_str, dots) = trimmed.split_at(dot_loc); let number = if number_str == "" { 0 } else { number_str .parse() .map_err(|_| TurnNumberParse(trimmed.len(), number_str.to_string()))? }; let dot = dots.strip_prefix('.').unwrap(); let (mut rest, double_dot) = if let Some(dotted) = dot.strip_prefix('.') { (dotted, true) } else { (dot, false) }; let mut turns = Vec::new(); let for_error = rest.len(); let (qturn, rest_tmp) = parse_quarter(rest)?; rest = rest_tmp.trim_start(); turns.push(qturn); while let Some(rest_tmp) = rest.strip_prefix("..") { if turns.len() >= 4 { return Err(TurnTooLong(for_error)); } let (qturn, rest_tmp) = parse_quarter(rest_tmp)?; rest = rest_tmp.trim_start(); turns.push(qturn); } Ok(( Turn { number, double_dot, turns, }, rest, )) } #[derive(Error, PartialEq, Clone, Debug)] pub enum PGN4Error { #[error("Some error occured at {0}")] Other(ErrorLocation), #[error("Expected a turn number starting at {0}, but there isn't a dot")] TurnNumber(ErrorLocation), #[error("Turn number at {0} is malformed \"{1}\" should be a number or \"\"")] TurnNumberParse(ErrorLocation, String), #[error("More than 4 quarter turns are present in the turn starting at {0}")] TurnTooLong(ErrorLocation), #[error("Tag starting at {0} is malformed")] BadTagged(ErrorLocation), #[error("Move \"{1}\" at {2} failed to parse. {0}")] BadMove(MoveError, String, ErrorLocation), #[error("Description starting at {0} is malformed")] BadDescription(ErrorLocation), } #[derive(PartialEq, Clone, Debug)] pub struct ErrorLocation { pub line: usize, pub column: usize, pub raw_offset: usize, } impl std::fmt::Display for ErrorLocation { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "line {} column {}", self.line, self.column) } } impl FromStr for PGN4 { type Err = PGN4Error; fn from_str(string: &str) -> Result<Self, Self::Err> { let mut bracketed = Vec::new(); let mut rest = string; while let Some(rest_tmp) = rest.strip_prefix('[') { let label_end = rest_tmp.find(|c: char| c.is_whitespace()).unwrap_or(0); let (label, middle) = rest_tmp.split_at(label_end); rest = middle .trim_start() .strip_prefix('"') .ok_or_else(|| make_tagged(rest_tmp, string))?; let value_end = rest .find('"') .ok_or_else(|| make_tagged(rest_tmp, string))?; let (value, end) = rest.split_at(value_end); rest = end .strip_prefix("\"]") .ok_or_else(|| make_tagged(rest_tmp, string))? .trim_start(); bracketed.push((label.to_owned(), value.to_owned())); } let mut turns = Vec::new(); while rest!= "" { let (turn, rest_tmp) = parse_turn(rest).map_err(|ie| add_details(ie, string))?; rest = rest_tmp; turns.push(turn); } Ok(PGN4 { bracketed, turns }) } } fn map_location(bytes_left: usize, base: &str) -> ErrorLocation { let front = base.split_at(base.len() - bytes_left).0; let from_last_newline = front.lines().last().unwrap(); let line = front.lines().count(); ErrorLocation { line, column: from_last_newline.chars().count(), raw_offset: front.len(), } } fn make_tagged(rest: &str, string: &str) -> PGN4Error { PGN4Error::BadTagged(map_location(rest.len(), string)) } fn add_details(ie: IntermediateError, string: &str) -> PGN4Error { use IntermediateError::*; match ie { Other(r) => PGN4Error::Other(map_location(r, string)), TurnNumber(r) => PGN4Error::TurnNumber(map_location(r, string)), TurnNumberParse(r, num) => PGN4Error::TurnNumberParse(map_location(r, string), num), TurnTooLong(r) => PGN4Error::TurnTooLong(map_location(r, string)), MoveErr(m, e, r) => PGN4Error::BadMove(m, e, map_location(r, string)), Description(r) => PGN4Error::BadDescription(map_location(r, string)), } }
{ let mateless = s.trim_end_matches('#'); let mates = s.len() - mateless.len(); match mateless { "O-O-O" => QueenCastle(mates), "O-O" => KingCastle(mates), _ => return Err(MoveError::Castle), } }
conditional_block
from_str.rs
use fen4::{Position, PositionParseError}; use std::str::FromStr; use crate::types::*; use thiserror::Error; #[derive(Error, PartialEq, Clone, Debug)] pub enum MoveError { #[error("Basic move is malformed.")] Other, #[error("A move starts with O-O, but is not a correct type of move.")] Castle, #[error("Unable to parse basic move because {0}")] PositionInvalid(#[from] PositionParseError), } impl FromStr for BasicMove { type Err = MoveError; fn from_str(string: &str) -> Result<Self, Self::Err> { let mut iter = string.chars(); let start = iter.next().ok_or(MoveError::Other)?; let (piece, pieceless) = if start.is_ascii_lowercase() { ('P', string) } else { (start, iter.as_str()) }; let mateless = pieceless.trim_end_matches('#'); let checkless = mateless.trim_end_matches('+'); let mates = pieceless.len() - mateless.len(); let checks = mateless.len() - checkless.len(); let (two_pos, promotion) = if let Some(equals) = checkless.find('=') { let (left_over, promote) = checkless.split_at(equals); let mut iter = promote.chars(); if iter.next()!= Some('=') { return Err(MoveError::Other); } let p = iter.next().ok_or(MoveError::Other)?; if iter.next().is_some() { return Err(MoveError::Other); } (left_over, Some(p)) } else { (checkless, None) }; let loc = if let Some(dash) = two_pos.find('-') { dash } else if let Some(x) = two_pos.find('x') { x } else { return Err(MoveError::Other); }; let (left, tmp) = two_pos.split_at(loc); let (mid, mut right) = tmp.split_at(1); // x and - are both ascii and therefore 1 byte let from = left.parse::<Position>()?; let captured = if mid == "x" { let mut iter = right.chars(); let start = iter.next().ok_or(MoveError::Other)?; Some(if start.is_ascii_lowercase() { 'P' } else { right = iter.as_str(); start }) } else { None }; let to = right.parse::<Position>()?; Ok(BasicMove { piece, from, captured, to, promotion, checks, mates, }) } } impl FromStr for Move { type Err = MoveError; fn from_str(string: &str) -> Result<Self, Self::Err> { use Move::*; Ok(match string { "#" => Checkmate, "S" => Stalemate, "T" => Timeout, "R" => Resign, s if s.starts_with("O-O") => { let mateless = s.trim_end_matches('#'); let mates = s.len() - mateless.len(); match mateless { "O-O-O" => QueenCastle(mates), "O-O" => KingCastle(mates), _ => return Err(MoveError::Castle), } } _ => Normal(string.parse::<BasicMove>()?), }) } } struct MovePair { main: Move, modifier: Option<Move>, stalemate: bool, } impl FromStr for MovePair { type Err = MoveError; fn from_str(string: &str) -> Result<Self, Self::Err> { let mut stalemate = false; let break_index = if string.len() == 2 { 1 // No move is 2 bytes long } else if string.len() > 2 { if string.ends_with("RS") &&!string.ends_with("=RS") || string.ends_with("TS") &&!string.ends_with("=TS") { stalemate = true; string.len() - 2 } else if (string.ends_with('R') &&!string.ends_with("=R")) || (string.ends_with('S') &&!string.ends_with("=S")) || (string.ends_with('T') &&!string.ends_with("=T")) { string.len() - 1 } else { 0 } } else { 0 }; Ok(if break_index == 0 { Self { main: string.parse()?, modifier: None, stalemate, } } else { Self { main: string.get(..break_index).ok_or(MoveError::Other)?.parse()?, modifier: Some( string .get(break_index..(break_index + 1)) .ok_or(MoveError::Other)? .parse()?, ), stalemate, } }) } } #[derive(PartialEq, Clone, Debug)] enum IntermediateError { Other(usize), TurnNumber(usize), TurnNumberParse(usize, String), TurnTooLong(usize), MoveErr(MoveError, String, usize), Description(usize), } fn parse_quarter(string: &str) -> Result<(QuarterTurn, &str), IntermediateError> { /// Generally the move is bounded by whitespace, but supporting pgns that don't /// have all the neccessary whitespace is good. Notably, whitespace before a new /// line number is critical. fn next_move(c: char) -> bool { c.is_whitespace() || match c { '.' | '{' | '(' | ')' => true, _ => false, } } use IntermediateError::*; let trimmed = string.trim_start(); if trimmed == "" { return Err(Other(trimmed.len())); } let split = trimmed.find(next_move).unwrap_or(string.len() - 1); let (main_str, mut rest) = trimmed.split_at(split); let move_pair = main_str .trim() .parse::<MovePair>() .map_err(|m| MoveErr(m, main_str.to_owned(), rest.len()))?; let mut description = None; let mut alternatives = Vec::new(); rest = rest.trim_start(); if let Some(c) = rest.chars().next() { if c == '{' { let desc_end = rest.find('}').ok_or(Description(rest.len()))?; let (mut desc_str, rest_tmp) = rest.split_at(desc_end + 1); desc_str = desc_str.strip_prefix("{ ").ok_or(Description(rest.len()))?; desc_str = desc_str.strip_suffix(" }").ok_or(Description(rest.len()))?; description = Some(desc_str.to_owned()); rest = rest_tmp; } } else { return Ok(( QuarterTurn { main: move_pair.main, modifier: move_pair.modifier, extra_stalemate: move_pair.stalemate, description, alternatives, }, rest, )); }; rest = rest.trim_start(); while let Some(rest_tmp) = rest.strip_prefix('(') { rest = rest_tmp; let mut turns = Vec::new(); while rest.chars().next()!= Some(')') { let (turn, rest_tmp) = parse_turn(rest)?; rest = rest_tmp; turns.push(turn); } rest = rest.strip_prefix(')').unwrap().trim_start(); alternatives.push(turns); } Ok(( QuarterTurn { main: move_pair.main, modifier: move_pair.modifier, extra_stalemate: move_pair.stalemate, description,
}, rest, )) } fn parse_turn(string: &str) -> Result<(Turn, &str), IntermediateError> { use IntermediateError::*; let trimmed = string.trim_start(); let dot_loc = trimmed.find('.').ok_or(TurnNumber(trimmed.len()))?; let (number_str, dots) = trimmed.split_at(dot_loc); let number = if number_str == "" { 0 } else { number_str .parse() .map_err(|_| TurnNumberParse(trimmed.len(), number_str.to_string()))? }; let dot = dots.strip_prefix('.').unwrap(); let (mut rest, double_dot) = if let Some(dotted) = dot.strip_prefix('.') { (dotted, true) } else { (dot, false) }; let mut turns = Vec::new(); let for_error = rest.len(); let (qturn, rest_tmp) = parse_quarter(rest)?; rest = rest_tmp.trim_start(); turns.push(qturn); while let Some(rest_tmp) = rest.strip_prefix("..") { if turns.len() >= 4 { return Err(TurnTooLong(for_error)); } let (qturn, rest_tmp) = parse_quarter(rest_tmp)?; rest = rest_tmp.trim_start(); turns.push(qturn); } Ok(( Turn { number, double_dot, turns, }, rest, )) } #[derive(Error, PartialEq, Clone, Debug)] pub enum PGN4Error { #[error("Some error occured at {0}")] Other(ErrorLocation), #[error("Expected a turn number starting at {0}, but there isn't a dot")] TurnNumber(ErrorLocation), #[error("Turn number at {0} is malformed \"{1}\" should be a number or \"\"")] TurnNumberParse(ErrorLocation, String), #[error("More than 4 quarter turns are present in the turn starting at {0}")] TurnTooLong(ErrorLocation), #[error("Tag starting at {0} is malformed")] BadTagged(ErrorLocation), #[error("Move \"{1}\" at {2} failed to parse. {0}")] BadMove(MoveError, String, ErrorLocation), #[error("Description starting at {0} is malformed")] BadDescription(ErrorLocation), } #[derive(PartialEq, Clone, Debug)] pub struct ErrorLocation { pub line: usize, pub column: usize, pub raw_offset: usize, } impl std::fmt::Display for ErrorLocation { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "line {} column {}", self.line, self.column) } } impl FromStr for PGN4 { type Err = PGN4Error; fn from_str(string: &str) -> Result<Self, Self::Err> { let mut bracketed = Vec::new(); let mut rest = string; while let Some(rest_tmp) = rest.strip_prefix('[') { let label_end = rest_tmp.find(|c: char| c.is_whitespace()).unwrap_or(0); let (label, middle) = rest_tmp.split_at(label_end); rest = middle .trim_start() .strip_prefix('"') .ok_or_else(|| make_tagged(rest_tmp, string))?; let value_end = rest .find('"') .ok_or_else(|| make_tagged(rest_tmp, string))?; let (value, end) = rest.split_at(value_end); rest = end .strip_prefix("\"]") .ok_or_else(|| make_tagged(rest_tmp, string))? .trim_start(); bracketed.push((label.to_owned(), value.to_owned())); } let mut turns = Vec::new(); while rest!= "" { let (turn, rest_tmp) = parse_turn(rest).map_err(|ie| add_details(ie, string))?; rest = rest_tmp; turns.push(turn); } Ok(PGN4 { bracketed, turns }) } } fn map_location(bytes_left: usize, base: &str) -> ErrorLocation { let front = base.split_at(base.len() - bytes_left).0; let from_last_newline = front.lines().last().unwrap(); let line = front.lines().count(); ErrorLocation { line, column: from_last_newline.chars().count(), raw_offset: front.len(), } } fn make_tagged(rest: &str, string: &str) -> PGN4Error { PGN4Error::BadTagged(map_location(rest.len(), string)) } fn add_details(ie: IntermediateError, string: &str) -> PGN4Error { use IntermediateError::*; match ie { Other(r) => PGN4Error::Other(map_location(r, string)), TurnNumber(r) => PGN4Error::TurnNumber(map_location(r, string)), TurnNumberParse(r, num) => PGN4Error::TurnNumberParse(map_location(r, string), num), TurnTooLong(r) => PGN4Error::TurnTooLong(map_location(r, string)), MoveErr(m, e, r) => PGN4Error::BadMove(m, e, map_location(r, string)), Description(r) => PGN4Error::BadDescription(map_location(r, string)), } }
alternatives,
random_line_split
base.rs
/// Returns a human readable name of the object kind. /// /// This is also used in alternate formatting: /// /// ```rust /// # use symbolic_debuginfo::ObjectKind; /// assert_eq!(format!("{:#}", ObjectKind::Executable), ObjectKind::Executable.human_name()); /// ``` pub fn human_name(self) -> &'static str { match self { ObjectKind::None => "file", ObjectKind::Relocatable => "object", ObjectKind::Executable => "executable", ObjectKind::Library => "library", ObjectKind::Dump => "memory dump", ObjectKind::Debug => "debug companion", ObjectKind::Sources => "sources", ObjectKind::Other => "file", } } } impl fmt::Display for ObjectKind { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if f.alternate() { f.write_str(self.human_name()) } else { f.write_str(self.name()) } } } impl FromStr for ObjectKind { type Err = UnknownObjectKindError; fn from_str(string: &str) -> Result<ObjectKind, UnknownObjectKindError> { Ok(match string { "none" => ObjectKind::None, "rel" => ObjectKind::Relocatable, "exe" => ObjectKind::Executable, "lib" => ObjectKind::Library, "dump" => ObjectKind::Dump, "dbg" => ObjectKind::Debug, "src" => ObjectKind::Sources, "other" => ObjectKind::Other, _ => return Err(UnknownObjectKindError), }) } } /// An error returned for unknown or invalid [`FileFormats`](enum.FileFormat.html). #[derive(Debug)] pub struct UnknownFileFormatError; impl fmt::Display for UnknownFileFormatError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "unknown file format") } } impl std::error::Error for UnknownFileFormatError {} /// Represents the physical object file format. #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Copy, Clone)] pub enum FileFormat { /// An unknown file format. Unknown, /// Breakpad ASCII symbol. Breakpad, /// Executable and Linkable Format, used on Linux. Elf, /// Mach Objects, used on macOS and iOS derivatives. MachO, /// Program Database, the debug companion format on Windows. Pdb, /// Portable Executable, an extension of COFF used on Windows. Pe, /// Source code bundle ZIP. SourceBundle, /// WASM container. Wasm, /// Portable PDB PortablePdb, } impl FileFormat { /// Returns the name of the file format. pub fn name(self) -> &'static str { match self { FileFormat::Unknown => "unknown", FileFormat::Breakpad => "breakpad", FileFormat::Elf => "elf", FileFormat::MachO => "macho", FileFormat::Pdb => "pdb", FileFormat::Pe => "pe", FileFormat::SourceBundle => "sourcebundle", FileFormat::Wasm => "wasm", FileFormat::PortablePdb => "portablepdb", } } } impl fmt::Display for FileFormat { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(self.name()) } } impl FromStr for FileFormat { type Err = UnknownFileFormatError; fn from_str(string: &str) -> Result<FileFormat, UnknownFileFormatError> { Ok(match string { "breakpad" => FileFormat::Breakpad, "elf" => FileFormat::Elf, "macho" => FileFormat::MachO, "pdb" => FileFormat::Pdb, "pe" => FileFormat::Pe, "sourcebundle" => FileFormat::SourceBundle, "wasm" => FileFormat::Wasm, "portablepdb" => FileFormat::PortablePdb, _ => return Err(UnknownFileFormatError), }) } } /// A symbol from a symbol table. #[derive(Clone, Default, Eq, PartialEq)] pub struct Symbol<'data> { /// The name of the symbol. /// /// This name is generally mangled. It can be demangled by constructing a `Name` instance and /// calling demangle on it. Certain object files might only store demangled symbol names. pub name: Option<Cow<'data, str>>, /// The relative address of this symbol. pub address: u64, /// The size of this symbol, if known. /// /// When loading symbols from an object file, the size will generally not be known. Instead, /// construct a [`SymbolMap`] from the object, which also fills in sizes. /// /// [`SymbolMap`]: struct.SymbolMap.html pub size: u64, } impl<'data> Symbol<'data> { /// Returns the name of this symbol as string. pub fn name(&self) -> Option<&str> { self.name.as_ref().map(Cow::as_ref) } /// Determines whether the given address is covered by this symbol. /// /// If the symbol size has not been computed, the address is assumed to be covered if it is /// greated than the symbol address. Otherwise, the address must be in the half-open interval /// `[address, address + size)`. pub fn contains(&self, address: u64) -> bool { address >= self.address && (self.size == 0 || address < self.address + self.size) } } impl<'d> fmt::Debug for Symbol<'d> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Symbol") .field("name", &self.name().unwrap_or("<unknown>")) .field("address", &format_args!("{:#x}", self.address)) .field("size", &format_args!("{:#x}", self.size)) .finish() } } /// IntoIterator type for [`SymbolMap`](struct.SymbolMap.html). pub type SymbolMapIter<'data> = std::vec::IntoIter<Symbol<'data>>; /// A sorted list of symbols, suitable for quick lookups. /// /// This type can either be computed from a list or iterator of symbols, or preferrably directly /// by calling [`ObjectLike::symbol_map`] on any object. Symbols in the symbol map are guaranteed to /// have a `size` set, except for the last symbol, which is computed by taking the offset to the /// subsequent symbol. /// /// `SymbolMap` also exposes a read-only view on the sorted slice of symbols. It can be converted to /// and from lists of symbols. /// /// ## Example /// /// ```rust /// # use symbolic_debuginfo::{Symbol, SymbolMap}; /// let map = SymbolMap::from(vec![ /// Symbol { name: Some("A".into()), address: 0x4400, size: 0 }, /// Symbol { name: Some("B".into()), address: 0x4200, size: 0 }, /// Symbol { name: Some("C".into()), address: 0x4000, size: 0 }, /// ]); /// /// assert_eq!(map[0], Symbol { /// name: Some("C".into()), /// address: 0x4000, /// size: 0x200, /// }); /// ``` /// /// [`ObjectLike::symbol_map`]: trait.ObjectLike.html#tymethod.symbol_map #[derive(Clone, Debug, Default)] pub struct SymbolMap<'data> { symbols: Vec<Symbol<'data>>, } impl<'data> SymbolMap<'data> { /// Creates a new, empty symbol map. pub fn new() -> Self { SymbolMap { symbols: Vec::new(), } } /// Looks up the symbol covering the given address. pub fn lookup(&self, address: u64) -> Option<&Symbol<'data>> { match self.symbols.binary_search_by_key(&address, Self::key) { Ok(index) => Some(&self.symbols[index]), Err(0) => None, Err(next_index) => { let symbol = &self.symbols[next_index - 1]; if symbol.contains(address) { Some(symbol) } else { None } } } } /// Looks up a symbol by its start address. pub fn lookup_exact(&self, address: u64) -> Option<&Symbol<'data>> { let idx = self .symbols .binary_search_by_key(&address, Self::key) .ok()?; self.symbols.get(idx) } /// Looks up a symbol covering an entire range. /// /// This is similar to [`lookup`], but it only returns the symbol result if it _also_ covers the /// inclusive end address of the range. /// /// [`lookup`]: struct.SymbolMap.html#method.lookup pub fn lookup_range<R>(&self, range: R) -> Option<&Symbol<'data>> where R: RangeBounds<u64>, { let start = match range.start_bound() { Bound::Included(start) => *start, Bound::Excluded(start) => *start + 1, Bound::Unbounded => 0, }; let symbol = self.lookup(start)?; let end = match range.end_bound() { Bound::Included(end) => *end, Bound::Excluded(end) => *end - 1, Bound::Unbounded => u64::max_value(), }; if end <= start || symbol.contains(end) { Some(symbol) } else { None } } /// Returns the lookup key for a symbol, which is the symbol's address. #[inline(always)] fn key(symbol: &Symbol<'data>) -> u64 { symbol.address } } impl<'d> Deref for SymbolMap<'d> { type Target = [Symbol<'d>]; fn deref(&self) -> &Self::Target { &self.symbols } } impl<'data> IntoIterator for SymbolMap<'data> { type Item = Symbol<'data>; type IntoIter = SymbolMapIter<'data>; fn into_iter(self) -> Self::IntoIter { self.symbols.into_iter() } } impl<'data, 'a> IntoIterator for &'a SymbolMap<'data> { type Item = &'a Symbol<'data>; type IntoIter = std::slice::Iter<'a, Symbol<'data>>; fn into_iter(self) -> Self::IntoIter { self.symbols.iter() } } impl<'d> AsRef<[Symbol<'d>]> for SymbolMap<'d> { fn as_ref(&self) -> &[Symbol<'d>] { &self.symbols } } impl<'d> From<Vec<Symbol<'d>>> for SymbolMap<'d> { fn from(mut symbols: Vec<Symbol<'d>>) -> Self { if!symbols.is_empty() { // NB: This might require stable sorting to ensure determinism if multiple symbols point // at the same location. However, this only seems to happen for equivalent variants of // the same function. // // An example would be destructors where D2 (base object destructor) and D1 (complete // object destructor) might share the same code. Since those always demangle to the same // name, we do not care which function to keep in this case. // // Inlined functions will generally not appear in this list, unless they _also_ have an // explicit function body, in which case they will have a unique address, again. dmsort::sort_by_key(&mut symbols, Self::key); // Compute sizes of consecutive symbols if the size has not been provided by the symbol // iterator. In the same go, drop all but the first symbols at any given address. We do // not rely on the size of symbols in this case, since the ranges might still be // overlapping. symbols.dedup_by(|next, symbol| { if symbol.size == 0 { symbol.size = next.address - symbol.address; } symbol.address == next.address }) } SymbolMap { symbols } } } impl<'d> FromIterator<Symbol<'d>> for SymbolMap<'d> { fn from_iter<I>(iter: I) -> Self where I: IntoIterator<Item = Symbol<'d>>, { Vec::from_iter(iter).into() } } /// File information referred by [`LineInfo`](struct.LineInfo.html) comprising a directory and name. /// /// The file path is usually relative to a compilation directory. It might contain parent directory /// segments (`../`). #[derive(Clone, Default, Eq, PartialEq)] pub struct FileInfo<'data> { /// The file's basename. name: Cow<'data, [u8]>, /// Path to the file. dir: Cow<'data, [u8]>, } impl<'data> FileInfo<'data> { /// Creates a `FileInfo` with a given directory and the file name. #[cfg(feature = "dwarf")] pub fn new(dir: Cow<'data, [u8]>, name: Cow<'data, [u8]>) -> Self { FileInfo { name, dir } } /// Creates a `FileInfo` from a joined path by trying to split it. #[cfg(any(feature = "breakpad", feature = "ms", feature = "sourcebundle"))] pub fn from_path(path: &'data [u8]) -> Self { let (dir, name) = symbolic_common::split_path_bytes(path); FileInfo { name: Cow::Borrowed(name), dir: match dir { Some(dir) => Cow::Borrowed(dir), None => Cow::default(), }, } } /// Creates a `FileInfo` from a joined path by trying to split it. /// Unlike from_path(), copies the given data instead of referencing it. #[cfg(feature = "ppdb")] pub(crate) fn from_path_owned(path: &[u8]) -> Self { let (dir, name) = symbolic_common::split_path_bytes(path); FileInfo { name: Cow::Owned(name.to_vec()), dir: match dir { Some(dir) => Cow::Owned(dir.to_vec()), None => Cow::default(), }, } } /// Creates a `FileInfo` with the file name. pub fn from_filename(name: &'data [u8]) -> Self { FileInfo { name: Cow::Borrowed(name), dir: Cow::default(), } } /// The file name as UTF-8 string. pub fn name_str(&self) -> Cow<'data, str> { from_utf8_cow_lossy(&self.name) } /// Path to the file relative to the compilation directory. pub fn dir_str(&self) -> Cow<'data, str> { from_utf8_cow_lossy(&self.dir) } /// The full path to the file, relative to the compilation directory. pub fn path_str(&self) -> String { let joined = join_path(&self.dir_str(), &self.name_str()); clean_path(&joined).into_owned() } } #[allow(clippy::ptr_arg)] // false positive https://github.com/rust-lang/rust-clippy/issues/9218 pub(crate) fn from_utf8_cow_lossy<'data>(input: &Cow<'data, [u8]>) -> Cow<'data, str> { // See https://github.com/rust-lang/rust/issues/32669 match input { Cow::Borrowed(bytes) => String::from_utf8_lossy(bytes), Cow::Owned(bytes) => match String::from_utf8_lossy(bytes) { Cow::Borrowed(_) => unsafe { String::from_utf8_unchecked(bytes.to_vec()) }.into(), Cow::Owned(s) => s.into(), }, } } impl fmt::Debug for FileInfo<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("FileInfo") .field("name", &self.name_str()) .field("dir", &self.dir_str()) .finish() } } /// File information comprising a compilation directory, relative path and name. pub struct FileEntry<'data> { /// Path to the compilation directory. File paths are relative to this. compilation_dir: Cow<'data, [u8]>, /// File name and path. pub info: FileInfo<'data>, } impl<'data> FileEntry<'data> { /// Path to the compilation directory. pub fn new(compilation_dir: Cow<'data, [u8]>, info: FileInfo<'data>) -> Self { FileEntry { compilation_dir, info, } } /// Path to the compilation directory. pub fn compilation_dir_str(&self) -> Cow<'data, str> { from_utf8_cow_lossy(&self.compilation_dir) } /// Absolute path to the file, including the compilation directory. pub fn abs_path_str(&self) -> String { let joined_path = join_path(&self.dir_str(), &self.name_str()); let joined = join_path(&self.compilation_dir_str(), &joined_path); clean_path(&joined).into_owned() } } impl fmt::Debug for FileEntry<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("FileInfo") .field("compilation_dir", &self.compilation_dir_str()) .field("name", &self.name_str()) .field("dir", &self.dir_str()) .finish() } } impl<'data> Deref for FileEntry<'data> { type Target = FileInfo<'data>; fn deref(&self) -> &Self::Target { &self.info } } /// File and line number mapping for an instruction address. #[derive(Clone, Eq, PartialEq)] pub struct LineInfo<'data> { /// The instruction address relative to the image base (load address). pub address: u64, /// Total code size covered by this line record. pub size: Option<u64>, /// File name and path. pub file: FileInfo<'data>, /// Absolute line number starting at 1. Zero means no line number. pub line: u64, } #[cfg(test)] impl LineInfo<'static> { pub(crate) fn new(address: u64, size: u64, file: &[u8], line: u64) -> LineInfo { LineInfo { address, size: Some(size), file: FileInfo::from_filename(file), line, } } } impl fmt::Debug for LineInfo<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut s = f.debug_struct("LineInfo"); s.field("address", &format_args!("{:#x}", self.address)); match self.size { Some(size) => s.field("size", &format_args!("{size:#x}")), None => s.field("size", &self.size), }; s.field("file", &self.file) .field("line", &self.line) .finish() } } /// Debug information for a function. #[derive(Clone)] pub struct Function<'data> { /// Relative instruction address of the start of the function. pub address: u64, /// Total code size covered by the function body, including inlined functions. pub size: u64, /// The name and language of the function symbol. pub name: Name<'data>, /// Path to the compilation directory. File paths are relative to this. pub compilation_dir: &'data [u8], /// Lines covered by this function, including inlined children. pub lines: Vec<LineInfo<'data>>, /// Functions that have been inlined into this function's body. pub inlinees: Vec<Function<'data>>, /// Specifies whether this function is inlined. pub inline: bool, } impl Function<'_> { /// End address of the entire function body, including inlined functions. /// /// This address points at the first instruction after the function body. pub fn end_address(&self) -> u64 { self.address.saturating_add(self.size) } } impl fmt::Debug for Function<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
} /// A dynamically dispatched iterator over items with the given lifetime. pub type DynIterator<'a, T> = Box<dyn Iterator<Item = T> + 'a>; /// A stateful session for interfacing with debug information. /// /// Debug sessions can be obtained via [`ObjectLike::debug_session`]. Since computing a session may /// be a costly operation, try to reuse the session as much as possible. /// /// ## Implementing DebugSession /// /// Reading debug information from object files usually requires loading multiple sections into /// memory and computing maps for quick random access to certain information. Since this can be a /// quite costly process, this is encapsulated into a `DebugSession`. The session may hold whatever /// data and caches may be necessary for efficiently interfacing with the debug info. /// /// All trait methods on a `DebugSession` receive `&mut self`, to allow mutation of internal cache /// structures. Lifetimes of returned types are tied to this session's lifetime, which allows to /// borrow data from the session. /// /// Examples for things to compute when building a debug session are: /// /// - Decompress debug information if it is stored with compression. /// - Build a symbol map for random access to public symbols. /// - Map string tables and other lookup tables. /// - Read headers of compilation units (compilands) to resolve cross-unit references. /// /// [`ObjectLike::debug_session`]: trait.ObjectLike.html#tymethod.debug_session pub trait DebugSession<'session> { /// The error returned when reading debug information fails. type Error; /// An iterator over all functions in this debug file. type FunctionIterator: Iterator<Item = Result<Function<'session>, Self::Error>>; /// An iterator over all source files referenced by this debug file. type FileIterator: Iterator<Item = Result<FileEntry<'session>, Self::Error>>; /// Returns an iterator over all functions in this debug file. /// /// Functions are iterated in the order they are declared in their compilation units. The /// functions yielded by this iterator include all inlinees and line records resolved. /// /// Note that the iterator holds a mutable borrow on the debug session, which allows it to use /// caches and optimize resources while resolving function and line information. fn functions(&'session self) -> Self::FunctionIterator; /// Returns an iterator over all source files referenced by this debug file. fn files(&'session self) -> Self::FileIterator; /// Looks up a
{ f.debug_struct("Function") .field("address", &format_args!("{:#x}", self.address)) .field("size", &format_args!("{:#x}", self.size)) .field("name", &self.name) .field( "compilation_dir", &String::from_utf8_lossy(self.compilation_dir), ) .field("lines", &self.lines) .field("inlinees", &self.inlinees) .field("inline", &self.inline) .finish() }
identifier_body
base.rs
} /// Returns a human readable name of the object kind. /// /// This is also used in alternate formatting: /// /// ```rust /// # use symbolic_debuginfo::ObjectKind; /// assert_eq!(format!("{:#}", ObjectKind::Executable), ObjectKind::Executable.human_name()); /// ``` pub fn human_name(self) -> &'static str { match self { ObjectKind::None => "file", ObjectKind::Relocatable => "object", ObjectKind::Executable => "executable", ObjectKind::Library => "library", ObjectKind::Dump => "memory dump", ObjectKind::Debug => "debug companion", ObjectKind::Sources => "sources", ObjectKind::Other => "file", } } } impl fmt::Display for ObjectKind { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if f.alternate() { f.write_str(self.human_name()) } else { f.write_str(self.name()) } } } impl FromStr for ObjectKind { type Err = UnknownObjectKindError; fn from_str(string: &str) -> Result<ObjectKind, UnknownObjectKindError> { Ok(match string { "none" => ObjectKind::None, "rel" => ObjectKind::Relocatable, "exe" => ObjectKind::Executable, "lib" => ObjectKind::Library, "dump" => ObjectKind::Dump, "dbg" => ObjectKind::Debug, "src" => ObjectKind::Sources, "other" => ObjectKind::Other, _ => return Err(UnknownObjectKindError), }) } } /// An error returned for unknown or invalid [`FileFormats`](enum.FileFormat.html). #[derive(Debug)] pub struct UnknownFileFormatError; impl fmt::Display for UnknownFileFormatError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "unknown file format") } } impl std::error::Error for UnknownFileFormatError {} /// Represents the physical object file format. #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Copy, Clone)] pub enum FileFormat { /// An unknown file format. Unknown, /// Breakpad ASCII symbol. Breakpad, /// Executable and Linkable Format, used on Linux. Elf, /// Mach Objects, used on macOS and iOS derivatives. MachO, /// Program Database, the debug companion format on Windows. Pdb, /// Portable Executable, an extension of COFF used on Windows. Pe, /// Source code bundle ZIP. SourceBundle, /// WASM container. Wasm, /// Portable PDB PortablePdb, } impl FileFormat { /// Returns the name of the file format. pub fn name(self) -> &'static str { match self { FileFormat::Unknown => "unknown", FileFormat::Breakpad => "breakpad", FileFormat::Elf => "elf", FileFormat::MachO => "macho", FileFormat::Pdb => "pdb", FileFormat::Pe => "pe", FileFormat::SourceBundle => "sourcebundle", FileFormat::Wasm => "wasm", FileFormat::PortablePdb => "portablepdb", } } } impl fmt::Display for FileFormat { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(self.name()) } } impl FromStr for FileFormat { type Err = UnknownFileFormatError; fn from_str(string: &str) -> Result<FileFormat, UnknownFileFormatError> { Ok(match string { "breakpad" => FileFormat::Breakpad, "elf" => FileFormat::Elf, "macho" => FileFormat::MachO, "pdb" => FileFormat::Pdb, "pe" => FileFormat::Pe, "sourcebundle" => FileFormat::SourceBundle, "wasm" => FileFormat::Wasm, "portablepdb" => FileFormat::PortablePdb, _ => return Err(UnknownFileFormatError), }) } } /// A symbol from a symbol table. #[derive(Clone, Default, Eq, PartialEq)] pub struct Symbol<'data> { /// The name of the symbol. /// /// This name is generally mangled. It can be demangled by constructing a `Name` instance and /// calling demangle on it. Certain object files might only store demangled symbol names. pub name: Option<Cow<'data, str>>, /// The relative address of this symbol. pub address: u64, /// The size of this symbol, if known. /// /// When loading symbols from an object file, the size will generally not be known. Instead, /// construct a [`SymbolMap`] from the object, which also fills in sizes. /// /// [`SymbolMap`]: struct.SymbolMap.html pub size: u64, } impl<'data> Symbol<'data> { /// Returns the name of this symbol as string. pub fn name(&self) -> Option<&str> { self.name.as_ref().map(Cow::as_ref) } /// Determines whether the given address is covered by this symbol. /// /// If the symbol size has not been computed, the address is assumed to be covered if it is /// greated than the symbol address. Otherwise, the address must be in the half-open interval /// `[address, address + size)`. pub fn contains(&self, address: u64) -> bool { address >= self.address && (self.size == 0 || address < self.address + self.size) } } impl<'d> fmt::Debug for Symbol<'d> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Symbol") .field("name", &self.name().unwrap_or("<unknown>")) .field("address", &format_args!("{:#x}", self.address)) .field("size", &format_args!("{:#x}", self.size)) .finish() } } /// IntoIterator type for [`SymbolMap`](struct.SymbolMap.html). pub type SymbolMapIter<'data> = std::vec::IntoIter<Symbol<'data>>; /// A sorted list of symbols, suitable for quick lookups. /// /// This type can either be computed from a list or iterator of symbols, or preferrably directly /// by calling [`ObjectLike::symbol_map`] on any object. Symbols in the symbol map are guaranteed to /// have a `size` set, except for the last symbol, which is computed by taking the offset to the /// subsequent symbol. /// /// `SymbolMap` also exposes a read-only view on the sorted slice of symbols. It can be converted to /// and from lists of symbols. /// /// ## Example /// /// ```rust /// # use symbolic_debuginfo::{Symbol, SymbolMap}; /// let map = SymbolMap::from(vec![ /// Symbol { name: Some("A".into()), address: 0x4400, size: 0 }, /// Symbol { name: Some("B".into()), address: 0x4200, size: 0 }, /// Symbol { name: Some("C".into()), address: 0x4000, size: 0 }, /// ]); /// /// assert_eq!(map[0], Symbol { /// name: Some("C".into()), /// address: 0x4000, /// size: 0x200, /// }); /// ``` /// /// [`ObjectLike::symbol_map`]: trait.ObjectLike.html#tymethod.symbol_map #[derive(Clone, Debug, Default)] pub struct SymbolMap<'data> { symbols: Vec<Symbol<'data>>, } impl<'data> SymbolMap<'data> { /// Creates a new, empty symbol map. pub fn new() -> Self { SymbolMap { symbols: Vec::new(), } } /// Looks up the symbol covering the given address. pub fn lookup(&self, address: u64) -> Option<&Symbol<'data>> { match self.symbols.binary_search_by_key(&address, Self::key) { Ok(index) => Some(&self.symbols[index]), Err(0) => None, Err(next_index) => { let symbol = &self.symbols[next_index - 1]; if symbol.contains(address) { Some(symbol) } else { None } } } } /// Looks up a symbol by its start address. pub fn lookup_exact(&self, address: u64) -> Option<&Symbol<'data>> { let idx = self .symbols .binary_search_by_key(&address, Self::key) .ok()?; self.symbols.get(idx) } /// Looks up a symbol covering an entire range. /// /// This is similar to [`lookup`], but it only returns the symbol result if it _also_ covers the /// inclusive end address of the range. /// /// [`lookup`]: struct.SymbolMap.html#method.lookup pub fn lookup_range<R>(&self, range: R) -> Option<&Symbol<'data>> where R: RangeBounds<u64>, { let start = match range.start_bound() { Bound::Included(start) => *start, Bound::Excluded(start) => *start + 1, Bound::Unbounded => 0, }; let symbol = self.lookup(start)?; let end = match range.end_bound() { Bound::Included(end) => *end, Bound::Excluded(end) => *end - 1, Bound::Unbounded => u64::max_value(), }; if end <= start || symbol.contains(end) { Some(symbol) } else { None } } /// Returns the lookup key for a symbol, which is the symbol's address. #[inline(always)] fn key(symbol: &Symbol<'data>) -> u64 { symbol.address } } impl<'d> Deref for SymbolMap<'d> { type Target = [Symbol<'d>]; fn deref(&self) -> &Self::Target { &self.symbols } } impl<'data> IntoIterator for SymbolMap<'data> { type Item = Symbol<'data>; type IntoIter = SymbolMapIter<'data>; fn into_iter(self) -> Self::IntoIter { self.symbols.into_iter() } } impl<'data, 'a> IntoIterator for &'a SymbolMap<'data> { type Item = &'a Symbol<'data>; type IntoIter = std::slice::Iter<'a, Symbol<'data>>; fn into_iter(self) -> Self::IntoIter { self.symbols.iter() } } impl<'d> AsRef<[Symbol<'d>]> for SymbolMap<'d> { fn as_ref(&self) -> &[Symbol<'d>] { &self.symbols } } impl<'d> From<Vec<Symbol<'d>>> for SymbolMap<'d> { fn from(mut symbols: Vec<Symbol<'d>>) -> Self { if!symbols.is_empty() { // NB: This might require stable sorting to ensure determinism if multiple symbols point // at the same location. However, this only seems to happen for equivalent variants of // the same function. // // An example would be destructors where D2 (base object destructor) and D1 (complete // object destructor) might share the same code. Since those always demangle to the same // name, we do not care which function to keep in this case. // // Inlined functions will generally not appear in this list, unless they _also_ have an // explicit function body, in which case they will have a unique address, again. dmsort::sort_by_key(&mut symbols, Self::key); // Compute sizes of consecutive symbols if the size has not been provided by the symbol // iterator. In the same go, drop all but the first symbols at any given address. We do // not rely on the size of symbols in this case, since the ranges might still be // overlapping. symbols.dedup_by(|next, symbol| { if symbol.size == 0 { symbol.size = next.address - symbol.address; } symbol.address == next.address }) } SymbolMap { symbols } } } impl<'d> FromIterator<Symbol<'d>> for SymbolMap<'d> { fn from_iter<I>(iter: I) -> Self where I: IntoIterator<Item = Symbol<'d>>, { Vec::from_iter(iter).into() } } /// File information referred by [`LineInfo`](struct.LineInfo.html) comprising a directory and name. /// /// The file path is usually relative to a compilation directory. It might contain parent directory /// segments (`../`). #[derive(Clone, Default, Eq, PartialEq)] pub struct FileInfo<'data> { /// The file's basename. name: Cow<'data, [u8]>,
impl<'data> FileInfo<'data> { /// Creates a `FileInfo` with a given directory and the file name. #[cfg(feature = "dwarf")] pub fn new(dir: Cow<'data, [u8]>, name: Cow<'data, [u8]>) -> Self { FileInfo { name, dir } } /// Creates a `FileInfo` from a joined path by trying to split it. #[cfg(any(feature = "breakpad", feature = "ms", feature = "sourcebundle"))] pub fn from_path(path: &'data [u8]) -> Self { let (dir, name) = symbolic_common::split_path_bytes(path); FileInfo { name: Cow::Borrowed(name), dir: match dir { Some(dir) => Cow::Borrowed(dir), None => Cow::default(), }, } } /// Creates a `FileInfo` from a joined path by trying to split it. /// Unlike from_path(), copies the given data instead of referencing it. #[cfg(feature = "ppdb")] pub(crate) fn from_path_owned(path: &[u8]) -> Self { let (dir, name) = symbolic_common::split_path_bytes(path); FileInfo { name: Cow::Owned(name.to_vec()), dir: match dir { Some(dir) => Cow::Owned(dir.to_vec()), None => Cow::default(), }, } } /// Creates a `FileInfo` with the file name. pub fn from_filename(name: &'data [u8]) -> Self { FileInfo { name: Cow::Borrowed(name), dir: Cow::default(), } } /// The file name as UTF-8 string. pub fn name_str(&self) -> Cow<'data, str> { from_utf8_cow_lossy(&self.name) } /// Path to the file relative to the compilation directory. pub fn dir_str(&self) -> Cow<'data, str> { from_utf8_cow_lossy(&self.dir) } /// The full path to the file, relative to the compilation directory. pub fn path_str(&self) -> String { let joined = join_path(&self.dir_str(), &self.name_str()); clean_path(&joined).into_owned() } } #[allow(clippy::ptr_arg)] // false positive https://github.com/rust-lang/rust-clippy/issues/9218 pub(crate) fn from_utf8_cow_lossy<'data>(input: &Cow<'data, [u8]>) -> Cow<'data, str> { // See https://github.com/rust-lang/rust/issues/32669 match input { Cow::Borrowed(bytes) => String::from_utf8_lossy(bytes), Cow::Owned(bytes) => match String::from_utf8_lossy(bytes) { Cow::Borrowed(_) => unsafe { String::from_utf8_unchecked(bytes.to_vec()) }.into(), Cow::Owned(s) => s.into(), }, } } impl fmt::Debug for FileInfo<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("FileInfo") .field("name", &self.name_str()) .field("dir", &self.dir_str()) .finish() } } /// File information comprising a compilation directory, relative path and name. pub struct FileEntry<'data> { /// Path to the compilation directory. File paths are relative to this. compilation_dir: Cow<'data, [u8]>, /// File name and path. pub info: FileInfo<'data>, } impl<'data> FileEntry<'data> { /// Path to the compilation directory. pub fn new(compilation_dir: Cow<'data, [u8]>, info: FileInfo<'data>) -> Self { FileEntry { compilation_dir, info, } } /// Path to the compilation directory. pub fn compilation_dir_str(&self) -> Cow<'data, str> { from_utf8_cow_lossy(&self.compilation_dir) } /// Absolute path to the file, including the compilation directory. pub fn abs_path_str(&self) -> String { let joined_path = join_path(&self.dir_str(), &self.name_str()); let joined = join_path(&self.compilation_dir_str(), &joined_path); clean_path(&joined).into_owned() } } impl fmt::Debug for FileEntry<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("FileInfo") .field("compilation_dir", &self.compilation_dir_str()) .field("name", &self.name_str()) .field("dir", &self.dir_str()) .finish() } } impl<'data> Deref for FileEntry<'data> { type Target = FileInfo<'data>; fn deref(&self) -> &Self::Target { &self.info } } /// File and line number mapping for an instruction address. #[derive(Clone, Eq, PartialEq)] pub struct LineInfo<'data> { /// The instruction address relative to the image base (load address). pub address: u64, /// Total code size covered by this line record. pub size: Option<u64>, /// File name and path. pub file: FileInfo<'data>, /// Absolute line number starting at 1. Zero means no line number. pub line: u64, } #[cfg(test)] impl LineInfo<'static> { pub(crate) fn new(address: u64, size: u64, file: &[u8], line: u64) -> LineInfo { LineInfo { address, size: Some(size), file: FileInfo::from_filename(file), line, } } } impl fmt::Debug for LineInfo<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut s = f.debug_struct("LineInfo"); s.field("address", &format_args!("{:#x}", self.address)); match self.size { Some(size) => s.field("size", &format_args!("{size:#x}")), None => s.field("size", &self.size), }; s.field("file", &self.file) .field("line", &self.line) .finish() } } /// Debug information for a function. #[derive(Clone)] pub struct Function<'data> { /// Relative instruction address of the start of the function. pub address: u64, /// Total code size covered by the function body, including inlined functions. pub size: u64, /// The name and language of the function symbol. pub name: Name<'data>, /// Path to the compilation directory. File paths are relative to this. pub compilation_dir: &'data [u8], /// Lines covered by this function, including inlined children. pub lines: Vec<LineInfo<'data>>, /// Functions that have been inlined into this function's body. pub inlinees: Vec<Function<'data>>, /// Specifies whether this function is inlined. pub inline: bool, } impl Function<'_> { /// End address of the entire function body, including inlined functions. /// /// This address points at the first instruction after the function body. pub fn end_address(&self) -> u64 { self.address.saturating_add(self.size) } } impl fmt::Debug for Function<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Function") .field("address", &format_args!("{:#x}", self.address)) .field("size", &format_args!("{:#x}", self.size)) .field("name", &self.name) .field( "compilation_dir", &String::from_utf8_lossy(self.compilation_dir), ) .field("lines", &self.lines) .field("inlinees", &self.inlinees) .field("inline", &self.inline) .finish() } } /// A dynamically dispatched iterator over items with the given lifetime. pub type DynIterator<'a, T> = Box<dyn Iterator<Item = T> + 'a>; /// A stateful session for interfacing with debug information. /// /// Debug sessions can be obtained via [`ObjectLike::debug_session`]. Since computing a session may /// be a costly operation, try to reuse the session as much as possible. /// /// ## Implementing DebugSession /// /// Reading debug information from object files usually requires loading multiple sections into /// memory and computing maps for quick random access to certain information. Since this can be a /// quite costly process, this is encapsulated into a `DebugSession`. The session may hold whatever /// data and caches may be necessary for efficiently interfacing with the debug info. /// /// All trait methods on a `DebugSession` receive `&mut self`, to allow mutation of internal cache /// structures. Lifetimes of returned types are tied to this session's lifetime, which allows to /// borrow data from the session. /// /// Examples for things to compute when building a debug session are: /// /// - Decompress debug information if it is stored with compression. /// - Build a symbol map for random access to public symbols. /// - Map string tables and other lookup tables. /// - Read headers of compilation units (compilands) to resolve cross-unit references. /// /// [`ObjectLike::debug_session`]: trait.ObjectLike.html#tymethod.debug_session pub trait DebugSession<'session> { /// The error returned when reading debug information fails. type Error; /// An iterator over all functions in this debug file. type FunctionIterator: Iterator<Item = Result<Function<'session>, Self::Error>>; /// An iterator over all source files referenced by this debug file. type FileIterator: Iterator<Item = Result<FileEntry<'session>, Self::Error>>; /// Returns an iterator over all functions in this debug file. /// /// Functions are iterated in the order they are declared in their compilation units. The /// functions yielded by this iterator include all inlinees and line records resolved. /// /// Note that the iterator holds a mutable borrow on the debug session, which allows it to use /// caches and optimize resources while resolving function and line information. fn functions(&'session self) -> Self::FunctionIterator; /// Returns an iterator over all source files referenced by this debug file. fn files(&'session self) -> Self::FileIterator; /// Looks up a file
/// Path to the file. dir: Cow<'data, [u8]>, }
random_line_split
base.rs
/// Returns a human readable name of the object kind. /// /// This is also used in alternate formatting: /// /// ```rust /// # use symbolic_debuginfo::ObjectKind; /// assert_eq!(format!("{:#}", ObjectKind::Executable), ObjectKind::Executable.human_name()); /// ``` pub fn human_name(self) -> &'static str { match self { ObjectKind::None => "file", ObjectKind::Relocatable => "object", ObjectKind::Executable => "executable", ObjectKind::Library => "library", ObjectKind::Dump => "memory dump", ObjectKind::Debug => "debug companion", ObjectKind::Sources => "sources", ObjectKind::Other => "file", } } } impl fmt::Display for ObjectKind { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if f.alternate() { f.write_str(self.human_name()) } else { f.write_str(self.name()) } } } impl FromStr for ObjectKind { type Err = UnknownObjectKindError; fn from_str(string: &str) -> Result<ObjectKind, UnknownObjectKindError> { Ok(match string { "none" => ObjectKind::None, "rel" => ObjectKind::Relocatable, "exe" => ObjectKind::Executable, "lib" => ObjectKind::Library, "dump" => ObjectKind::Dump, "dbg" => ObjectKind::Debug, "src" => ObjectKind::Sources, "other" => ObjectKind::Other, _ => return Err(UnknownObjectKindError), }) } } /// An error returned for unknown or invalid [`FileFormats`](enum.FileFormat.html). #[derive(Debug)] pub struct UnknownFileFormatError; impl fmt::Display for UnknownFileFormatError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "unknown file format") } } impl std::error::Error for UnknownFileFormatError {} /// Represents the physical object file format. #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Copy, Clone)] pub enum FileFormat { /// An unknown file format. Unknown, /// Breakpad ASCII symbol. Breakpad, /// Executable and Linkable Format, used on Linux. Elf, /// Mach Objects, used on macOS and iOS derivatives. MachO, /// Program Database, the debug companion format on Windows. Pdb, /// Portable Executable, an extension of COFF used on Windows. Pe, /// Source code bundle ZIP. SourceBundle, /// WASM container. Wasm, /// Portable PDB PortablePdb, } impl FileFormat { /// Returns the name of the file format. pub fn name(self) -> &'static str { match self { FileFormat::Unknown => "unknown", FileFormat::Breakpad => "breakpad", FileFormat::Elf => "elf", FileFormat::MachO => "macho", FileFormat::Pdb => "pdb", FileFormat::Pe => "pe", FileFormat::SourceBundle => "sourcebundle", FileFormat::Wasm => "wasm", FileFormat::PortablePdb => "portablepdb", } } } impl fmt::Display for FileFormat { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(self.name()) } } impl FromStr for FileFormat { type Err = UnknownFileFormatError; fn from_str(string: &str) -> Result<FileFormat, UnknownFileFormatError> { Ok(match string { "breakpad" => FileFormat::Breakpad, "elf" => FileFormat::Elf, "macho" => FileFormat::MachO, "pdb" => FileFormat::Pdb, "pe" => FileFormat::Pe, "sourcebundle" => FileFormat::SourceBundle, "wasm" => FileFormat::Wasm, "portablepdb" => FileFormat::PortablePdb, _ => return Err(UnknownFileFormatError), }) } } /// A symbol from a symbol table. #[derive(Clone, Default, Eq, PartialEq)] pub struct Symbol<'data> { /// The name of the symbol. /// /// This name is generally mangled. It can be demangled by constructing a `Name` instance and /// calling demangle on it. Certain object files might only store demangled symbol names. pub name: Option<Cow<'data, str>>, /// The relative address of this symbol. pub address: u64, /// The size of this symbol, if known. /// /// When loading symbols from an object file, the size will generally not be known. Instead, /// construct a [`SymbolMap`] from the object, which also fills in sizes. /// /// [`SymbolMap`]: struct.SymbolMap.html pub size: u64, } impl<'data> Symbol<'data> { /// Returns the name of this symbol as string. pub fn name(&self) -> Option<&str> { self.name.as_ref().map(Cow::as_ref) } /// Determines whether the given address is covered by this symbol. /// /// If the symbol size has not been computed, the address is assumed to be covered if it is /// greated than the symbol address. Otherwise, the address must be in the half-open interval /// `[address, address + size)`. pub fn contains(&self, address: u64) -> bool { address >= self.address && (self.size == 0 || address < self.address + self.size) } } impl<'d> fmt::Debug for Symbol<'d> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Symbol") .field("name", &self.name().unwrap_or("<unknown>")) .field("address", &format_args!("{:#x}", self.address)) .field("size", &format_args!("{:#x}", self.size)) .finish() } } /// IntoIterator type for [`SymbolMap`](struct.SymbolMap.html). pub type SymbolMapIter<'data> = std::vec::IntoIter<Symbol<'data>>; /// A sorted list of symbols, suitable for quick lookups. /// /// This type can either be computed from a list or iterator of symbols, or preferrably directly /// by calling [`ObjectLike::symbol_map`] on any object. Symbols in the symbol map are guaranteed to /// have a `size` set, except for the last symbol, which is computed by taking the offset to the /// subsequent symbol. /// /// `SymbolMap` also exposes a read-only view on the sorted slice of symbols. It can be converted to /// and from lists of symbols. /// /// ## Example /// /// ```rust /// # use symbolic_debuginfo::{Symbol, SymbolMap}; /// let map = SymbolMap::from(vec![ /// Symbol { name: Some("A".into()), address: 0x4400, size: 0 }, /// Symbol { name: Some("B".into()), address: 0x4200, size: 0 }, /// Symbol { name: Some("C".into()), address: 0x4000, size: 0 }, /// ]); /// /// assert_eq!(map[0], Symbol { /// name: Some("C".into()), /// address: 0x4000, /// size: 0x200, /// }); /// ``` /// /// [`ObjectLike::symbol_map`]: trait.ObjectLike.html#tymethod.symbol_map #[derive(Clone, Debug, Default)] pub struct SymbolMap<'data> { symbols: Vec<Symbol<'data>>, } impl<'data> SymbolMap<'data> { /// Creates a new, empty symbol map. pub fn new() -> Self { SymbolMap { symbols: Vec::new(), } } /// Looks up the symbol covering the given address. pub fn lookup(&self, address: u64) -> Option<&Symbol<'data>> { match self.symbols.binary_search_by_key(&address, Self::key) { Ok(index) => Some(&self.symbols[index]), Err(0) => None, Err(next_index) => { let symbol = &self.symbols[next_index - 1]; if symbol.contains(address) { Some(symbol) } else { None } } } } /// Looks up a symbol by its start address. pub fn lookup_exact(&self, address: u64) -> Option<&Symbol<'data>> { let idx = self .symbols .binary_search_by_key(&address, Self::key) .ok()?; self.symbols.get(idx) } /// Looks up a symbol covering an entire range. /// /// This is similar to [`lookup`], but it only returns the symbol result if it _also_ covers the /// inclusive end address of the range. /// /// [`lookup`]: struct.SymbolMap.html#method.lookup pub fn lookup_range<R>(&self, range: R) -> Option<&Symbol<'data>> where R: RangeBounds<u64>, { let start = match range.start_bound() { Bound::Included(start) => *start, Bound::Excluded(start) => *start + 1, Bound::Unbounded => 0, }; let symbol = self.lookup(start)?; let end = match range.end_bound() { Bound::Included(end) => *end, Bound::Excluded(end) => *end - 1, Bound::Unbounded => u64::max_value(), }; if end <= start || symbol.contains(end) { Some(symbol) } else { None } } /// Returns the lookup key for a symbol, which is the symbol's address. #[inline(always)] fn key(symbol: &Symbol<'data>) -> u64 { symbol.address } } impl<'d> Deref for SymbolMap<'d> { type Target = [Symbol<'d>]; fn deref(&self) -> &Self::Target { &self.symbols } } impl<'data> IntoIterator for SymbolMap<'data> { type Item = Symbol<'data>; type IntoIter = SymbolMapIter<'data>; fn into_iter(self) -> Self::IntoIter { self.symbols.into_iter() } } impl<'data, 'a> IntoIterator for &'a SymbolMap<'data> { type Item = &'a Symbol<'data>; type IntoIter = std::slice::Iter<'a, Symbol<'data>>; fn into_iter(self) -> Self::IntoIter { self.symbols.iter() } } impl<'d> AsRef<[Symbol<'d>]> for SymbolMap<'d> { fn as_ref(&self) -> &[Symbol<'d>] { &self.symbols } } impl<'d> From<Vec<Symbol<'d>>> for SymbolMap<'d> { fn from(mut symbols: Vec<Symbol<'d>>) -> Self { if!symbols.is_empty() { // NB: This might require stable sorting to ensure determinism if multiple symbols point // at the same location. However, this only seems to happen for equivalent variants of // the same function. // // An example would be destructors where D2 (base object destructor) and D1 (complete // object destructor) might share the same code. Since those always demangle to the same // name, we do not care which function to keep in this case. // // Inlined functions will generally not appear in this list, unless they _also_ have an // explicit function body, in which case they will have a unique address, again. dmsort::sort_by_key(&mut symbols, Self::key); // Compute sizes of consecutive symbols if the size has not been provided by the symbol // iterator. In the same go, drop all but the first symbols at any given address. We do // not rely on the size of symbols in this case, since the ranges might still be // overlapping. symbols.dedup_by(|next, symbol| { if symbol.size == 0 { symbol.size = next.address - symbol.address; } symbol.address == next.address }) } SymbolMap { symbols } } } impl<'d> FromIterator<Symbol<'d>> for SymbolMap<'d> { fn from_iter<I>(iter: I) -> Self where I: IntoIterator<Item = Symbol<'d>>, { Vec::from_iter(iter).into() } } /// File information referred by [`LineInfo`](struct.LineInfo.html) comprising a directory and name. /// /// The file path is usually relative to a compilation directory. It might contain parent directory /// segments (`../`). #[derive(Clone, Default, Eq, PartialEq)] pub struct FileInfo<'data> { /// The file's basename. name: Cow<'data, [u8]>, /// Path to the file. dir: Cow<'data, [u8]>, } impl<'data> FileInfo<'data> { /// Creates a `FileInfo` with a given directory and the file name. #[cfg(feature = "dwarf")] pub fn new(dir: Cow<'data, [u8]>, name: Cow<'data, [u8]>) -> Self { FileInfo { name, dir } } /// Creates a `FileInfo` from a joined path by trying to split it. #[cfg(any(feature = "breakpad", feature = "ms", feature = "sourcebundle"))] pub fn from_path(path: &'data [u8]) -> Self { let (dir, name) = symbolic_common::split_path_bytes(path); FileInfo { name: Cow::Borrowed(name), dir: match dir { Some(dir) => Cow::Borrowed(dir), None => Cow::default(), }, } } /// Creates a `FileInfo` from a joined path by trying to split it. /// Unlike from_path(), copies the given data instead of referencing it. #[cfg(feature = "ppdb")] pub(crate) fn from_path_owned(path: &[u8]) -> Self { let (dir, name) = symbolic_common::split_path_bytes(path); FileInfo { name: Cow::Owned(name.to_vec()), dir: match dir { Some(dir) => Cow::Owned(dir.to_vec()), None => Cow::default(), }, } } /// Creates a `FileInfo` with the file name. pub fn from_filename(name: &'data [u8]) -> Self { FileInfo { name: Cow::Borrowed(name), dir: Cow::default(), } } /// The file name as UTF-8 string. pub fn name_str(&self) -> Cow<'data, str> { from_utf8_cow_lossy(&self.name) } /// Path to the file relative to the compilation directory. pub fn dir_str(&self) -> Cow<'data, str> { from_utf8_cow_lossy(&self.dir) } /// The full path to the file, relative to the compilation directory. pub fn path_str(&self) -> String { let joined = join_path(&self.dir_str(), &self.name_str()); clean_path(&joined).into_owned() } } #[allow(clippy::ptr_arg)] // false positive https://github.com/rust-lang/rust-clippy/issues/9218 pub(crate) fn from_utf8_cow_lossy<'data>(input: &Cow<'data, [u8]>) -> Cow<'data, str> { // See https://github.com/rust-lang/rust/issues/32669 match input { Cow::Borrowed(bytes) => String::from_utf8_lossy(bytes), Cow::Owned(bytes) => match String::from_utf8_lossy(bytes) { Cow::Borrowed(_) => unsafe { String::from_utf8_unchecked(bytes.to_vec()) }.into(), Cow::Owned(s) => s.into(), }, } } impl fmt::Debug for FileInfo<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("FileInfo") .field("name", &self.name_str()) .field("dir", &self.dir_str()) .finish() } } /// File information comprising a compilation directory, relative path and name. pub struct FileEntry<'data> { /// Path to the compilation directory. File paths are relative to this. compilation_dir: Cow<'data, [u8]>, /// File name and path. pub info: FileInfo<'data>, } impl<'data> FileEntry<'data> { /// Path to the compilation directory. pub fn new(compilation_dir: Cow<'data, [u8]>, info: FileInfo<'data>) -> Self { FileEntry { compilation_dir, info, } } /// Path to the compilation directory. pub fn compilation_dir_str(&self) -> Cow<'data, str> { from_utf8_cow_lossy(&self.compilation_dir) } /// Absolute path to the file, including the compilation directory. pub fn abs_path_str(&self) -> String { let joined_path = join_path(&self.dir_str(), &self.name_str()); let joined = join_path(&self.compilation_dir_str(), &joined_path); clean_path(&joined).into_owned() } } impl fmt::Debug for FileEntry<'_> { fn
(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("FileInfo") .field("compilation_dir", &self.compilation_dir_str()) .field("name", &self.name_str()) .field("dir", &self.dir_str()) .finish() } } impl<'data> Deref for FileEntry<'data> { type Target = FileInfo<'data>; fn deref(&self) -> &Self::Target { &self.info } } /// File and line number mapping for an instruction address. #[derive(Clone, Eq, PartialEq)] pub struct LineInfo<'data> { /// The instruction address relative to the image base (load address). pub address: u64, /// Total code size covered by this line record. pub size: Option<u64>, /// File name and path. pub file: FileInfo<'data>, /// Absolute line number starting at 1. Zero means no line number. pub line: u64, } #[cfg(test)] impl LineInfo<'static> { pub(crate) fn new(address: u64, size: u64, file: &[u8], line: u64) -> LineInfo { LineInfo { address, size: Some(size), file: FileInfo::from_filename(file), line, } } } impl fmt::Debug for LineInfo<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut s = f.debug_struct("LineInfo"); s.field("address", &format_args!("{:#x}", self.address)); match self.size { Some(size) => s.field("size", &format_args!("{size:#x}")), None => s.field("size", &self.size), }; s.field("file", &self.file) .field("line", &self.line) .finish() } } /// Debug information for a function. #[derive(Clone)] pub struct Function<'data> { /// Relative instruction address of the start of the function. pub address: u64, /// Total code size covered by the function body, including inlined functions. pub size: u64, /// The name and language of the function symbol. pub name: Name<'data>, /// Path to the compilation directory. File paths are relative to this. pub compilation_dir: &'data [u8], /// Lines covered by this function, including inlined children. pub lines: Vec<LineInfo<'data>>, /// Functions that have been inlined into this function's body. pub inlinees: Vec<Function<'data>>, /// Specifies whether this function is inlined. pub inline: bool, } impl Function<'_> { /// End address of the entire function body, including inlined functions. /// /// This address points at the first instruction after the function body. pub fn end_address(&self) -> u64 { self.address.saturating_add(self.size) } } impl fmt::Debug for Function<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Function") .field("address", &format_args!("{:#x}", self.address)) .field("size", &format_args!("{:#x}", self.size)) .field("name", &self.name) .field( "compilation_dir", &String::from_utf8_lossy(self.compilation_dir), ) .field("lines", &self.lines) .field("inlinees", &self.inlinees) .field("inline", &self.inline) .finish() } } /// A dynamically dispatched iterator over items with the given lifetime. pub type DynIterator<'a, T> = Box<dyn Iterator<Item = T> + 'a>; /// A stateful session for interfacing with debug information. /// /// Debug sessions can be obtained via [`ObjectLike::debug_session`]. Since computing a session may /// be a costly operation, try to reuse the session as much as possible. /// /// ## Implementing DebugSession /// /// Reading debug information from object files usually requires loading multiple sections into /// memory and computing maps for quick random access to certain information. Since this can be a /// quite costly process, this is encapsulated into a `DebugSession`. The session may hold whatever /// data and caches may be necessary for efficiently interfacing with the debug info. /// /// All trait methods on a `DebugSession` receive `&mut self`, to allow mutation of internal cache /// structures. Lifetimes of returned types are tied to this session's lifetime, which allows to /// borrow data from the session. /// /// Examples for things to compute when building a debug session are: /// /// - Decompress debug information if it is stored with compression. /// - Build a symbol map for random access to public symbols. /// - Map string tables and other lookup tables. /// - Read headers of compilation units (compilands) to resolve cross-unit references. /// /// [`ObjectLike::debug_session`]: trait.ObjectLike.html#tymethod.debug_session pub trait DebugSession<'session> { /// The error returned when reading debug information fails. type Error; /// An iterator over all functions in this debug file. type FunctionIterator: Iterator<Item = Result<Function<'session>, Self::Error>>; /// An iterator over all source files referenced by this debug file. type FileIterator: Iterator<Item = Result<FileEntry<'session>, Self::Error>>; /// Returns an iterator over all functions in this debug file. /// /// Functions are iterated in the order they are declared in their compilation units. The /// functions yielded by this iterator include all inlinees and line records resolved. /// /// Note that the iterator holds a mutable borrow on the debug session, which allows it to use /// caches and optimize resources while resolving function and line information. fn functions(&'session self) -> Self::FunctionIterator; /// Returns an iterator over all source files referenced by this debug file. fn files(&'session self) -> Self::FileIterator; /// Looks up a
fmt
identifier_name
mod.rs
_codec::{Decode, Encode}; use sp_consensus_grandpa::{AuthorityId, AuthorityWeight, SetId}; use sp_core::H256; use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; use sp_runtime::EncodedJustification; pub use types::{AuthoritySet, AuthoritySetChange}; #[derive(Encode, Decode, Clone, PartialEq, Serialize, Deserialize)] pub struct BridgeInfo<T: Config> { #[serde(bound( serialize = "T::Header: ::serde::Serialize", deserialize = "T::Header: ::serde::de::DeserializeOwned" ))] last_finalized_block_header: T::Header, #[serde(with = "more::scale_bytes")] current_set: AuthoritySet, } impl<T: Config> BridgeInfo<T> { pub fn new(block_header: T::Header, validator_set: AuthoritySet) -> Self { BridgeInfo { last_finalized_block_header: block_header, current_set: validator_set, } } } type BridgeId = u64; pub trait Config: frame_system::Config<Hash = H256> { type Block: BlockT<Hash = H256, Header = Self::Header>; } impl Config for chain::Runtime { type Block = chain::Block; } #[derive(Encode, Decode, Clone, Serialize, Deserialize)] pub struct LightValidation<T: Config> { num_bridges: BridgeId, #[serde(bound( serialize = "T::Header: ::serde::Serialize", deserialize = "T::Header: ::serde::de::DeserializeOwned" ))] tracked_bridges: BTreeMap<BridgeId, BridgeInfo<T>>, } impl<T: Config> LightValidation<T> where NumberFor<T::Block>: AsPrimitive<usize>, { #[allow(clippy::new_without_default)] pub fn new() -> Self { LightValidation { num_bridges: 0, tracked_bridges: BTreeMap::new(), } } pub fn initialize_bridge( &mut self, block_header: T::Header, validator_set: AuthoritySet, proof: StorageProof, ) -> Result<BridgeId> { let state_root = block_header.state_root(); Self::check_validator_set_proof(state_root, proof, &validator_set.list, validator_set.id) .map_err(anyhow::Error::msg)?; let bridge_info = BridgeInfo::new(block_header, validator_set); let new_bridge_id = self.num_bridges + 1; self.tracked_bridges.insert(new_bridge_id, bridge_info); self.num_bridges = new_bridge_id; Ok(new_bridge_id) } /// Submits a sequence of block headers to the light client to validate /// /// The light client accepts a sequence of block headers, optionally with an authority set change /// in the last block. Without the authority set change, it assumes the authority set and the set /// id remains the same after submitting the blocks. One submission can have at most one authortiy /// set change (change.set_id == last_set_id + 1). pub fn submit_finalized_headers( &mut self, bridge_id: BridgeId, header: T::Header, ancestry_proof: Vec<T::Header>, grandpa_proof: EncodedJustification, auhtority_set_change: Option<AuthoritySetChange>, ) -> Result<()> { let bridge = self .tracked_bridges .get(&bridge_id) .ok_or_else(|| anyhow::Error::msg(Error::NoSuchBridgeExists))?; // Check that the new header is a decendent of the old header let last_header = &bridge.last_finalized_block_header; verify_ancestry(ancestry_proof, last_header.hash(), &header)?; let block_hash = header.hash(); let block_num = *header.number(); // Check that the header has been finalized let voters = &bridge.current_set; let voter_set = VoterSet::new(voters.list.clone()).unwrap(); let voter_set_id = voters.id; verify_grandpa_proof::<T::Block>( grandpa_proof, block_hash, block_num, voter_set_id, &voter_set, )?; match self.tracked_bridges.get_mut(&bridge_id) { Some(bridge_info) => { bridge_info.last_finalized_block_header = header; if let Some(change) = auhtority_set_change { // Check the validator set increment if change.authority_set.id!= voter_set_id + 1 { return Err(anyhow::Error::msg(Error::UnexpectedValidatorSetId)); } // Check validator set change proof let state_root = bridge_info.last_finalized_block_header.state_root(); Self::check_validator_set_proof( state_root, change.authority_proof, &change.authority_set.list, change.authority_set.id, )?; // Commit bridge_info.current_set = AuthoritySet { list: change.authority_set.list, id: change.authority_set.id, } } } _ => panic!("We succesfully got this bridge earlier, therefore it exists; qed"), }; Ok(()) } pub fn validate_storage_proof( &self, state_root: T::Hash,
items: &[(&[u8], &[u8])], // &[(key, value)] ) -> Result<()> { let checker = StorageProofChecker::<T::Hashing>::new(state_root, proof)?; for (k, v) in items { let actual_value = checker .read_value(k)? .ok_or_else(|| anyhow::Error::msg(Error::StorageValueUnavailable))?; if actual_value.as_slice()!= *v { return Err(anyhow::Error::msg(Error::StorageValueMismatch)); } } Ok(()) } } #[derive(Debug)] pub enum Error { // InvalidStorageProof, // StorageRootMismatch, StorageValueUnavailable, // InvalidValidatorSetProof, ValidatorSetMismatch, InvalidAncestryProof, NoSuchBridgeExists, InvalidFinalityProof, // UnknownClientError, // HeaderAncestryMismatch, UnexpectedValidatorSetId, StorageValueMismatch, } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { // Error::StorageRootMismatch => write!(f, "storage root mismatch"), Error::StorageValueUnavailable => write!(f, "storage value unavailable"), Error::ValidatorSetMismatch => write!(f, "validator set mismatch"), Error::InvalidAncestryProof => write!(f, "invalid ancestry proof"), Error::NoSuchBridgeExists => write!(f, "no such bridge exists"), Error::InvalidFinalityProof => write!(f, "invalid finality proof"), // Error::HeaderAncestryMismatch => write!(f, "header ancestry mismatch"), Error::UnexpectedValidatorSetId => write!(f, "unexpected validator set id"), Error::StorageValueMismatch => write!(f, "storage value mismatch"), } } } impl From<JustificationError> for Error { fn from(e: JustificationError) -> Self { match e { JustificationError::BadJustification(msg) => { error!("InvalidFinalityProof(BadJustification({}))", msg); Error::InvalidFinalityProof } JustificationError::JustificationDecode => { error!("InvalidFinalityProof(JustificationDecode)"); Error::InvalidFinalityProof } } } } impl<T: Config> LightValidation<T> where NumberFor<T::Block>: AsPrimitive<usize>, { fn check_validator_set_proof( state_root: &T::Hash, proof: StorageProof, validator_set: &[(AuthorityId, AuthorityWeight)], _set_id: SetId, ) -> Result<()> { let checker = <StorageProofChecker<T::Hashing>>::new(*state_root, proof)?; // By encoding the given set we should have an easy way to compare // with the stuff we get out of storage via `read_value` let mut encoded_validator_set = validator_set.encode(); encoded_validator_set.insert(0, 1); // Add AUTHORITIES_VERISON == 1 let actual_validator_set = checker .read_value(b":grandpa_authorities")? .ok_or_else(|| anyhow::Error::msg(Error::StorageValueUnavailable))?; // TODO: check set_id // checker.read_value(grandpa::CurrentSetId.key()) if encoded_validator_set == actual_validator_set { Ok(()) } else { Err(anyhow::Error::msg(Error::ValidatorSetMismatch)) } } } // A naive way to check whether a `child` header is a decendent // of an `ancestor` header. For this it requires a proof which // is a chain of headers between (but not including) the `child` // and `ancestor`. This could be updated to use something like // Log2 Ancestors (#2053) in the future. fn verify_ancestry<H>(proof: Vec<H>, ancestor_hash: H::Hash, child: &H) -> Result<()> where H: Header<Hash = H256>, { { info!("ancestor_hash: {}", ancestor_hash); for h in proof.iter() { info!( "block {:?} - hash: {} parent: {}", h.number(), h.hash(), h.parent_hash() ); } info!( "child block {:?} - hash: {} parent: {}", child.number(), child.hash(), child.parent_hash() ); } let mut parent_hash = child.parent_hash(); if *parent_hash == ancestor_hash { return Ok(()); } // If we find that the header's parent hash matches our ancestor's hash we're done for header in proof.iter() { // Need to check that blocks are actually related if header.hash()!= *parent_hash { break; } parent_hash = header.parent_hash(); if *parent_hash == ancestor_hash { return Ok(()); } } Err(anyhow::Error::msg(Error::InvalidAncestryProof)) } fn verify_grandpa_proof<B>( justification: EncodedJustification, hash: B::Hash, number: NumberFor<B>, set_id: u64, voters: &VoterSet<AuthorityId>, ) -> Result<()> where B: BlockT<Hash = H256>, NumberFor<B>: finality_grandpa::BlockNumberOps, { // We don't really care about the justification, as long as it's valid let _ = GrandpaJustification::<B>::decode_and_verify_finalizes( &justification, (hash, number), set_id, voters, ) .map_err(anyhow::Error::msg)?; Ok(()) } impl<T: Config> fmt::Debug for LightValidation<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "LightValidationTest {{ num_bridges: {}, tracked_bridges: {:?} }}", self.num_bridges, self.tracked_bridges ) } } impl<T: Config> fmt::Debug for BridgeInfo<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "BridgeInfo {{ last_finalized_block_header: {:?}, current_validator_set: {:?}, current_validator_set_id: {} }}", self.last_finalized_block_header, self.current_set.list, self.current_set.id) } } pub mod utils { use parity_scale_codec::Encode; /// Gets the prefix of a storage item pub fn storage_prefix(module: &str, storage: &str) -> Vec<u8> { let mut bytes = sp_core::twox_128(module.as_bytes()).to_vec(); bytes.extend(&sp_core::twox_128(storage.as_bytes())[..]); bytes } /// Calculates the Substrate storage key prefix for a StorageMap pub fn storage_map_prefix_twox_64_concat( module: &[u8], storage_item: &[u8], key: &(impl Encode +?Sized), ) -> Vec<u8> { let mut bytes = sp_core::twox_128(module).to_vec(); bytes.extend(&sp_core::twox_128(storage_item)[..]); let encoded = key.encode(); bytes.extend(sp_core::twox_64(&encoded)); bytes.extend(&encoded); bytes } #[test] #[ignore = "for debug"] fn show_keys() { let modules = [ "System", "Timestamp", "RandomnessCollectiveFlip", "Utility", "Multisig", "Proxy", "Vesting", "Scheduler", "Preimage", "ParachainInfo", "ParachainSystem", "XcmpQueue", "CumulusXcm", "DmpQueue", "PolkadotXcm", "Balances", "TransactionPayment", "Authorship", "CollatorSelection", "Session", "Aura", "AuraExt", "Identity", "Democracy", "Council", "Treasury", "Bounties", "Lottery", "TechnicalCommittee", "TechnicalMembership", "PhragmenElection", "Tips", "ChildBounties", "ChainBridge", "XcmBridge", "XTransfer", "PhalaMq", "PhalaRegistry", "PhalaComputation", "PhalaStakePool", "Assets", "AssetsRegistry", "PhalaStakePoolv2", "PhalaVault", "PhalaWrappedBalances", "PhalaBasePool", "Uniques", "RmrkCore", "RmrkEquip", "RmrkMarket", "PWNftSale", "PWIncubation", ]; for module in modules.iter() { let key = storage_prefix(module, ""); println!("{module}: 0x{}", hex::encode(key)); } let storage_keys = [ "Collections", "Nfts", "Priorities", "Children", "Resources", "EquippableBases", "EquippableSlots", "Properties", "Lock", "DummyStorage", ]; for key in storage_keys.iter() { let prefix = storage_prefix("RmrkCore", key); println!("RmrkCore::{key}: 0x{}", hex::encode(prefix)); } /* System: 0x26aa394eea5630e07c48ae0c9558cef799e9d85137db46ef4bbea33613baafd5 Timestamp: 0xf0c365c3cf59d671eb72da0e7a4113c499e9d85137db46ef4bbea33613baafd5 RandomnessCollectiveFlip: 0xbd2a529379475088d3e29a918cd4787299e9d85137db46ef4bbea33613baafd5 Utility: 0xd5e1a2fa16732ce6906189438c0a82c699e9d85137db46ef4bbea33613baafd5 Multisig: 0x7474449cca95dc5d0c00e71735a6d17d99e9d85137db46ef4bbea33613baafd5 Proxy: 0x1809d78346727a0ef58c0fa03bafa32399e9d85137db46ef4bbea33613baafd5 Vesting: 0x5f27b51b5ec208ee9cb25b55d872824399e9d85137db46ef4bbea33613baafd5 Scheduler: 0x3db7a24cfdc9de785974746c14a99df999e9d85137db46ef4bbea33613baafd5 Preimage: 0xd8f314b7f4e6b095f0f8ee4656a4482599e9d85137db46ef4bbea33613baafd5 ParachainInfo: 0x0d715f2646c8f85767b5d2764bb2782699e9d85137db46ef4bbea33613baafd5 ParachainSystem: 0x45323df7cc47150b3930e2666b0aa31399e9d85137db46ef4bbea33613baafd5 XcmpQueue: 0x7b3237373ffdfeb1cab4222e3b520d6b99e9d85137db46ef4bbea33613baafd5 CumulusXcm: 0x79e2fe5d327165001f8232643023ed8b99e9d85137db46ef4bbea33613baafd5 DmpQueue: 0xcd5c1f6df63bc97f4a8ce37f14a50ca799e9d85137db46ef4bbea33613baafd5 PolkadotXcm: 0xe38f185207498abb5c213d0fb059b3d899e9d85137db46ef4bbea33613baafd5 Balances: 0xc2261276cc9d1f8598ea4b6a74b15c2f99e9d85137db46ef4bbea33613baafd5 TransactionPayment: 0x3f1467a096bcd71a5b6a0c8155e2081099e9d85137db46ef4bbea33613baafd5 Authorship: 0xd57bce545fb382c34570e5dfbf338f5e99e9d85137db46ef4bbea33613baafd5 CollatorSelection: 0x15464cac3378d46f113cd5b7a4d71c8499e9d85137db46ef4bbea33613baafd5 Session: 0xcec5070d609dd3497f72bde07fc96ba099e9d85137db46ef4bbea33613baafd5 Aura: 0x57f8dc2f5ab09467896f47300f04243899e9d85137db46ef4bbea33613baafd5 AuraExt: 0x3c311d57d4daf52904616cf69648081e99e9d85137db46ef4bbea33613baafd5 Identity: 0x2aeddc77fe58c98d50bd37f1b90840f999e9d85137db46ef4bbea33613baafd5 Democracy: 0xf2794c22e353e9a839f12faab03a911b99e9d85137db46ef4bbea33613baafd5 Council: 0xaebd463ed9925c488c112434d61debc099e9d85137db46ef4bbea33613baafd5 Treasury: 0x89d139e01a5eb2256f222e5fc5dbe6b399e9d85137db46ef4bbea33613baafd5 Bounties: 0xa37f719efab16103103a0c8c2c784ce199e9d85137db46ef4bbea33613baafd5 Lottery: 0xfbc9f53700f75f681f234e70fb7241eb99e9d85137db46ef4bbea33613baafd5 TechnicalCommittee: 0xed25f63942de25ac5253ba64b5eb64d199e9d85137db46ef4bbea33613baafd5 TechnicalMembership: 0x3a2d6c9353500637d8f8e3e0fa0bb1c599e9d85137db46ef4bbea33613baafd5 PhragmenElection: 0xe2e62dd81c48a88f73b6f6463555fd8e99e9d85137db46ef4bbea33613baafd5 Tips: 0x2c5de123c468aef7f3ac2ab3a76f87ce99e9d85137db46ef4bbea33613baafd5 ChildBounties: 0xedfb05b766f199ce00df85317e33050e99e9d85137db46ef4bbea33613baafd5 ChainBridge: 0x43cdcd39d5edb1d16e24fa028edde0de99e9d85137db46ef4bbea33613baafd5 XcmBridge: 0x9d0cdc3697970df81fa5fabe88fa03ea99e9d85137db46ef4bbea33613baafd5 XTransfer: 0xc0cf946351a2b7b37cc8f3086b3674a199e9d85137db46ef4bbea33613baafd5 PhalaMq: 0x2f039a6a7f13e94b9545257e54062a0499e9d85137db46ef4bbea33613baafd5 PhalaRegistry: 0x0d746931e7a6bfd47fbcccfd71984aef99e9d85137db46ef4bbea33613baafd5 PhalaComputation: 0
proof: StorageProof,
random_line_split
mod.rs
::{Decode, Encode}; use sp_consensus_grandpa::{AuthorityId, AuthorityWeight, SetId}; use sp_core::H256; use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; use sp_runtime::EncodedJustification; pub use types::{AuthoritySet, AuthoritySetChange}; #[derive(Encode, Decode, Clone, PartialEq, Serialize, Deserialize)] pub struct BridgeInfo<T: Config> { #[serde(bound( serialize = "T::Header: ::serde::Serialize", deserialize = "T::Header: ::serde::de::DeserializeOwned" ))] last_finalized_block_header: T::Header, #[serde(with = "more::scale_bytes")] current_set: AuthoritySet, } impl<T: Config> BridgeInfo<T> { pub fn new(block_header: T::Header, validator_set: AuthoritySet) -> Self
} type BridgeId = u64; pub trait Config: frame_system::Config<Hash = H256> { type Block: BlockT<Hash = H256, Header = Self::Header>; } impl Config for chain::Runtime { type Block = chain::Block; } #[derive(Encode, Decode, Clone, Serialize, Deserialize)] pub struct LightValidation<T: Config> { num_bridges: BridgeId, #[serde(bound( serialize = "T::Header: ::serde::Serialize", deserialize = "T::Header: ::serde::de::DeserializeOwned" ))] tracked_bridges: BTreeMap<BridgeId, BridgeInfo<T>>, } impl<T: Config> LightValidation<T> where NumberFor<T::Block>: AsPrimitive<usize>, { #[allow(clippy::new_without_default)] pub fn new() -> Self { LightValidation { num_bridges: 0, tracked_bridges: BTreeMap::new(), } } pub fn initialize_bridge( &mut self, block_header: T::Header, validator_set: AuthoritySet, proof: StorageProof, ) -> Result<BridgeId> { let state_root = block_header.state_root(); Self::check_validator_set_proof(state_root, proof, &validator_set.list, validator_set.id) .map_err(anyhow::Error::msg)?; let bridge_info = BridgeInfo::new(block_header, validator_set); let new_bridge_id = self.num_bridges + 1; self.tracked_bridges.insert(new_bridge_id, bridge_info); self.num_bridges = new_bridge_id; Ok(new_bridge_id) } /// Submits a sequence of block headers to the light client to validate /// /// The light client accepts a sequence of block headers, optionally with an authority set change /// in the last block. Without the authority set change, it assumes the authority set and the set /// id remains the same after submitting the blocks. One submission can have at most one authortiy /// set change (change.set_id == last_set_id + 1). pub fn submit_finalized_headers( &mut self, bridge_id: BridgeId, header: T::Header, ancestry_proof: Vec<T::Header>, grandpa_proof: EncodedJustification, auhtority_set_change: Option<AuthoritySetChange>, ) -> Result<()> { let bridge = self .tracked_bridges .get(&bridge_id) .ok_or_else(|| anyhow::Error::msg(Error::NoSuchBridgeExists))?; // Check that the new header is a decendent of the old header let last_header = &bridge.last_finalized_block_header; verify_ancestry(ancestry_proof, last_header.hash(), &header)?; let block_hash = header.hash(); let block_num = *header.number(); // Check that the header has been finalized let voters = &bridge.current_set; let voter_set = VoterSet::new(voters.list.clone()).unwrap(); let voter_set_id = voters.id; verify_grandpa_proof::<T::Block>( grandpa_proof, block_hash, block_num, voter_set_id, &voter_set, )?; match self.tracked_bridges.get_mut(&bridge_id) { Some(bridge_info) => { bridge_info.last_finalized_block_header = header; if let Some(change) = auhtority_set_change { // Check the validator set increment if change.authority_set.id!= voter_set_id + 1 { return Err(anyhow::Error::msg(Error::UnexpectedValidatorSetId)); } // Check validator set change proof let state_root = bridge_info.last_finalized_block_header.state_root(); Self::check_validator_set_proof( state_root, change.authority_proof, &change.authority_set.list, change.authority_set.id, )?; // Commit bridge_info.current_set = AuthoritySet { list: change.authority_set.list, id: change.authority_set.id, } } } _ => panic!("We succesfully got this bridge earlier, therefore it exists; qed"), }; Ok(()) } pub fn validate_storage_proof( &self, state_root: T::Hash, proof: StorageProof, items: &[(&[u8], &[u8])], // &[(key, value)] ) -> Result<()> { let checker = StorageProofChecker::<T::Hashing>::new(state_root, proof)?; for (k, v) in items { let actual_value = checker .read_value(k)? .ok_or_else(|| anyhow::Error::msg(Error::StorageValueUnavailable))?; if actual_value.as_slice()!= *v { return Err(anyhow::Error::msg(Error::StorageValueMismatch)); } } Ok(()) } } #[derive(Debug)] pub enum Error { // InvalidStorageProof, // StorageRootMismatch, StorageValueUnavailable, // InvalidValidatorSetProof, ValidatorSetMismatch, InvalidAncestryProof, NoSuchBridgeExists, InvalidFinalityProof, // UnknownClientError, // HeaderAncestryMismatch, UnexpectedValidatorSetId, StorageValueMismatch, } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { // Error::StorageRootMismatch => write!(f, "storage root mismatch"), Error::StorageValueUnavailable => write!(f, "storage value unavailable"), Error::ValidatorSetMismatch => write!(f, "validator set mismatch"), Error::InvalidAncestryProof => write!(f, "invalid ancestry proof"), Error::NoSuchBridgeExists => write!(f, "no such bridge exists"), Error::InvalidFinalityProof => write!(f, "invalid finality proof"), // Error::HeaderAncestryMismatch => write!(f, "header ancestry mismatch"), Error::UnexpectedValidatorSetId => write!(f, "unexpected validator set id"), Error::StorageValueMismatch => write!(f, "storage value mismatch"), } } } impl From<JustificationError> for Error { fn from(e: JustificationError) -> Self { match e { JustificationError::BadJustification(msg) => { error!("InvalidFinalityProof(BadJustification({}))", msg); Error::InvalidFinalityProof } JustificationError::JustificationDecode => { error!("InvalidFinalityProof(JustificationDecode)"); Error::InvalidFinalityProof } } } } impl<T: Config> LightValidation<T> where NumberFor<T::Block>: AsPrimitive<usize>, { fn check_validator_set_proof( state_root: &T::Hash, proof: StorageProof, validator_set: &[(AuthorityId, AuthorityWeight)], _set_id: SetId, ) -> Result<()> { let checker = <StorageProofChecker<T::Hashing>>::new(*state_root, proof)?; // By encoding the given set we should have an easy way to compare // with the stuff we get out of storage via `read_value` let mut encoded_validator_set = validator_set.encode(); encoded_validator_set.insert(0, 1); // Add AUTHORITIES_VERISON == 1 let actual_validator_set = checker .read_value(b":grandpa_authorities")? .ok_or_else(|| anyhow::Error::msg(Error::StorageValueUnavailable))?; // TODO: check set_id // checker.read_value(grandpa::CurrentSetId.key()) if encoded_validator_set == actual_validator_set { Ok(()) } else { Err(anyhow::Error::msg(Error::ValidatorSetMismatch)) } } } // A naive way to check whether a `child` header is a decendent // of an `ancestor` header. For this it requires a proof which // is a chain of headers between (but not including) the `child` // and `ancestor`. This could be updated to use something like // Log2 Ancestors (#2053) in the future. fn verify_ancestry<H>(proof: Vec<H>, ancestor_hash: H::Hash, child: &H) -> Result<()> where H: Header<Hash = H256>, { { info!("ancestor_hash: {}", ancestor_hash); for h in proof.iter() { info!( "block {:?} - hash: {} parent: {}", h.number(), h.hash(), h.parent_hash() ); } info!( "child block {:?} - hash: {} parent: {}", child.number(), child.hash(), child.parent_hash() ); } let mut parent_hash = child.parent_hash(); if *parent_hash == ancestor_hash { return Ok(()); } // If we find that the header's parent hash matches our ancestor's hash we're done for header in proof.iter() { // Need to check that blocks are actually related if header.hash()!= *parent_hash { break; } parent_hash = header.parent_hash(); if *parent_hash == ancestor_hash { return Ok(()); } } Err(anyhow::Error::msg(Error::InvalidAncestryProof)) } fn verify_grandpa_proof<B>( justification: EncodedJustification, hash: B::Hash, number: NumberFor<B>, set_id: u64, voters: &VoterSet<AuthorityId>, ) -> Result<()> where B: BlockT<Hash = H256>, NumberFor<B>: finality_grandpa::BlockNumberOps, { // We don't really care about the justification, as long as it's valid let _ = GrandpaJustification::<B>::decode_and_verify_finalizes( &justification, (hash, number), set_id, voters, ) .map_err(anyhow::Error::msg)?; Ok(()) } impl<T: Config> fmt::Debug for LightValidation<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "LightValidationTest {{ num_bridges: {}, tracked_bridges: {:?} }}", self.num_bridges, self.tracked_bridges ) } } impl<T: Config> fmt::Debug for BridgeInfo<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "BridgeInfo {{ last_finalized_block_header: {:?}, current_validator_set: {:?}, current_validator_set_id: {} }}", self.last_finalized_block_header, self.current_set.list, self.current_set.id) } } pub mod utils { use parity_scale_codec::Encode; /// Gets the prefix of a storage item pub fn storage_prefix(module: &str, storage: &str) -> Vec<u8> { let mut bytes = sp_core::twox_128(module.as_bytes()).to_vec(); bytes.extend(&sp_core::twox_128(storage.as_bytes())[..]); bytes } /// Calculates the Substrate storage key prefix for a StorageMap pub fn storage_map_prefix_twox_64_concat( module: &[u8], storage_item: &[u8], key: &(impl Encode +?Sized), ) -> Vec<u8> { let mut bytes = sp_core::twox_128(module).to_vec(); bytes.extend(&sp_core::twox_128(storage_item)[..]); let encoded = key.encode(); bytes.extend(sp_core::twox_64(&encoded)); bytes.extend(&encoded); bytes } #[test] #[ignore = "for debug"] fn show_keys() { let modules = [ "System", "Timestamp", "RandomnessCollectiveFlip", "Utility", "Multisig", "Proxy", "Vesting", "Scheduler", "Preimage", "ParachainInfo", "ParachainSystem", "XcmpQueue", "CumulusXcm", "DmpQueue", "PolkadotXcm", "Balances", "TransactionPayment", "Authorship", "CollatorSelection", "Session", "Aura", "AuraExt", "Identity", "Democracy", "Council", "Treasury", "Bounties", "Lottery", "TechnicalCommittee", "TechnicalMembership", "PhragmenElection", "Tips", "ChildBounties", "ChainBridge", "XcmBridge", "XTransfer", "PhalaMq", "PhalaRegistry", "PhalaComputation", "PhalaStakePool", "Assets", "AssetsRegistry", "PhalaStakePoolv2", "PhalaVault", "PhalaWrappedBalances", "PhalaBasePool", "Uniques", "RmrkCore", "RmrkEquip", "RmrkMarket", "PWNftSale", "PWIncubation", ]; for module in modules.iter() { let key = storage_prefix(module, ""); println!("{module}: 0x{}", hex::encode(key)); } let storage_keys = [ "Collections", "Nfts", "Priorities", "Children", "Resources", "EquippableBases", "EquippableSlots", "Properties", "Lock", "DummyStorage", ]; for key in storage_keys.iter() { let prefix = storage_prefix("RmrkCore", key); println!("RmrkCore::{key}: 0x{}", hex::encode(prefix)); } /* System: 0x26aa394eea5630e07c48ae0c9558cef799e9d85137db46ef4bbea33613baafd5 Timestamp: 0xf0c365c3cf59d671eb72da0e7a4113c499e9d85137db46ef4bbea33613baafd5 RandomnessCollectiveFlip: 0xbd2a529379475088d3e29a918cd4787299e9d85137db46ef4bbea33613baafd5 Utility: 0xd5e1a2fa16732ce6906189438c0a82c699e9d85137db46ef4bbea33613baafd5 Multisig: 0x7474449cca95dc5d0c00e71735a6d17d99e9d85137db46ef4bbea33613baafd5 Proxy: 0x1809d78346727a0ef58c0fa03bafa32399e9d85137db46ef4bbea33613baafd5 Vesting: 0x5f27b51b5ec208ee9cb25b55d872824399e9d85137db46ef4bbea33613baafd5 Scheduler: 0x3db7a24cfdc9de785974746c14a99df999e9d85137db46ef4bbea33613baafd5 Preimage: 0xd8f314b7f4e6b095f0f8ee4656a4482599e9d85137db46ef4bbea33613baafd5 ParachainInfo: 0x0d715f2646c8f85767b5d2764bb2782699e9d85137db46ef4bbea33613baafd5 ParachainSystem: 0x45323df7cc47150b3930e2666b0aa31399e9d85137db46ef4bbea33613baafd5 XcmpQueue: 0x7b3237373ffdfeb1cab4222e3b520d6b99e9d85137db46ef4bbea33613baafd5 CumulusXcm: 0x79e2fe5d327165001f8232643023ed8b99e9d85137db46ef4bbea33613baafd5 DmpQueue: 0xcd5c1f6df63bc97f4a8ce37f14a50ca799e9d85137db46ef4bbea33613baafd5 PolkadotXcm: 0xe38f185207498abb5c213d0fb059b3d899e9d85137db46ef4bbea33613baafd5 Balances: 0xc2261276cc9d1f8598ea4b6a74b15c2f99e9d85137db46ef4bbea33613baafd5 TransactionPayment: 0x3f1467a096bcd71a5b6a0c8155e2081099e9d85137db46ef4bbea33613baafd5 Authorship: 0xd57bce545fb382c34570e5dfbf338f5e99e9d85137db46ef4bbea33613baafd5 CollatorSelection: 0x15464cac3378d46f113cd5b7a4d71c8499e9d85137db46ef4bbea33613baafd5 Session: 0xcec5070d609dd3497f72bde07fc96ba099e9d85137db46ef4bbea33613baafd5 Aura: 0x57f8dc2f5ab09467896f47300f04243899e9d85137db46ef4bbea33613baafd5 AuraExt: 0x3c311d57d4daf52904616cf69648081e99e9d85137db46ef4bbea33613baafd5 Identity: 0x2aeddc77fe58c98d50bd37f1b90840f999e9d85137db46ef4bbea33613baafd5 Democracy: 0xf2794c22e353e9a839f12faab03a911b99e9d85137db46ef4bbea33613baafd5 Council: 0xaebd463ed9925c488c112434d61debc099e9d85137db46ef4bbea33613baafd5 Treasury: 0x89d139e01a5eb2256f222e5fc5dbe6b399e9d85137db46ef4bbea33613baafd5 Bounties: 0xa37f719efab16103103a0c8c2c784ce199e9d85137db46ef4bbea33613baafd5 Lottery: 0xfbc9f53700f75f681f234e70fb7241eb99e9d85137db46ef4bbea33613baafd5 TechnicalCommittee: 0xed25f63942de25ac5253ba64b5eb64d199e9d85137db46ef4bbea33613baafd5 TechnicalMembership: 0x3a2d6c9353500637d8f8e3e0fa0bb1c599e9d85137db46ef4bbea33613baafd5 PhragmenElection: 0xe2e62dd81c48a88f73b6f6463555fd8e99e9d85137db46ef4bbea33613baafd5 Tips: 0x2c5de123c468aef7f3ac2ab3a76f87ce99e9d85137db46ef4bbea33613baafd5 ChildBounties: 0xedfb05b766f199ce00df85317e33050e99e9d85137db46ef4bbea33613baafd5 ChainBridge: 0x43cdcd39d5edb1d16e24fa028edde0de99e9d85137db46ef4bbea33613baafd5 XcmBridge: 0x9d0cdc3697970df81fa5fabe88fa03ea99e9d85137db46ef4bbea33613baafd5 XTransfer: 0xc0cf946351a2b7b37cc8f3086b3674a199e9d85137db46ef4bbea33613baafd5 PhalaMq: 0x2f039a6a7f13e94b9545257e54062a0499e9d85137db46ef4bbea33613baafd5 PhalaRegistry: 0x0d746931e7a6bfd47fbcccfd71984aef99e9d85137db46ef4bbea33613baafd5 PhalaComputation:
{ BridgeInfo { last_finalized_block_header: block_header, current_set: validator_set, } }
identifier_body
mod.rs
::{Decode, Encode}; use sp_consensus_grandpa::{AuthorityId, AuthorityWeight, SetId}; use sp_core::H256; use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; use sp_runtime::EncodedJustification; pub use types::{AuthoritySet, AuthoritySetChange}; #[derive(Encode, Decode, Clone, PartialEq, Serialize, Deserialize)] pub struct BridgeInfo<T: Config> { #[serde(bound( serialize = "T::Header: ::serde::Serialize", deserialize = "T::Header: ::serde::de::DeserializeOwned" ))] last_finalized_block_header: T::Header, #[serde(with = "more::scale_bytes")] current_set: AuthoritySet, } impl<T: Config> BridgeInfo<T> { pub fn new(block_header: T::Header, validator_set: AuthoritySet) -> Self { BridgeInfo { last_finalized_block_header: block_header, current_set: validator_set, } } } type BridgeId = u64; pub trait Config: frame_system::Config<Hash = H256> { type Block: BlockT<Hash = H256, Header = Self::Header>; } impl Config for chain::Runtime { type Block = chain::Block; } #[derive(Encode, Decode, Clone, Serialize, Deserialize)] pub struct LightValidation<T: Config> { num_bridges: BridgeId, #[serde(bound( serialize = "T::Header: ::serde::Serialize", deserialize = "T::Header: ::serde::de::DeserializeOwned" ))] tracked_bridges: BTreeMap<BridgeId, BridgeInfo<T>>, } impl<T: Config> LightValidation<T> where NumberFor<T::Block>: AsPrimitive<usize>, { #[allow(clippy::new_without_default)] pub fn new() -> Self { LightValidation { num_bridges: 0, tracked_bridges: BTreeMap::new(), } } pub fn
( &mut self, block_header: T::Header, validator_set: AuthoritySet, proof: StorageProof, ) -> Result<BridgeId> { let state_root = block_header.state_root(); Self::check_validator_set_proof(state_root, proof, &validator_set.list, validator_set.id) .map_err(anyhow::Error::msg)?; let bridge_info = BridgeInfo::new(block_header, validator_set); let new_bridge_id = self.num_bridges + 1; self.tracked_bridges.insert(new_bridge_id, bridge_info); self.num_bridges = new_bridge_id; Ok(new_bridge_id) } /// Submits a sequence of block headers to the light client to validate /// /// The light client accepts a sequence of block headers, optionally with an authority set change /// in the last block. Without the authority set change, it assumes the authority set and the set /// id remains the same after submitting the blocks. One submission can have at most one authortiy /// set change (change.set_id == last_set_id + 1). pub fn submit_finalized_headers( &mut self, bridge_id: BridgeId, header: T::Header, ancestry_proof: Vec<T::Header>, grandpa_proof: EncodedJustification, auhtority_set_change: Option<AuthoritySetChange>, ) -> Result<()> { let bridge = self .tracked_bridges .get(&bridge_id) .ok_or_else(|| anyhow::Error::msg(Error::NoSuchBridgeExists))?; // Check that the new header is a decendent of the old header let last_header = &bridge.last_finalized_block_header; verify_ancestry(ancestry_proof, last_header.hash(), &header)?; let block_hash = header.hash(); let block_num = *header.number(); // Check that the header has been finalized let voters = &bridge.current_set; let voter_set = VoterSet::new(voters.list.clone()).unwrap(); let voter_set_id = voters.id; verify_grandpa_proof::<T::Block>( grandpa_proof, block_hash, block_num, voter_set_id, &voter_set, )?; match self.tracked_bridges.get_mut(&bridge_id) { Some(bridge_info) => { bridge_info.last_finalized_block_header = header; if let Some(change) = auhtority_set_change { // Check the validator set increment if change.authority_set.id!= voter_set_id + 1 { return Err(anyhow::Error::msg(Error::UnexpectedValidatorSetId)); } // Check validator set change proof let state_root = bridge_info.last_finalized_block_header.state_root(); Self::check_validator_set_proof( state_root, change.authority_proof, &change.authority_set.list, change.authority_set.id, )?; // Commit bridge_info.current_set = AuthoritySet { list: change.authority_set.list, id: change.authority_set.id, } } } _ => panic!("We succesfully got this bridge earlier, therefore it exists; qed"), }; Ok(()) } pub fn validate_storage_proof( &self, state_root: T::Hash, proof: StorageProof, items: &[(&[u8], &[u8])], // &[(key, value)] ) -> Result<()> { let checker = StorageProofChecker::<T::Hashing>::new(state_root, proof)?; for (k, v) in items { let actual_value = checker .read_value(k)? .ok_or_else(|| anyhow::Error::msg(Error::StorageValueUnavailable))?; if actual_value.as_slice()!= *v { return Err(anyhow::Error::msg(Error::StorageValueMismatch)); } } Ok(()) } } #[derive(Debug)] pub enum Error { // InvalidStorageProof, // StorageRootMismatch, StorageValueUnavailable, // InvalidValidatorSetProof, ValidatorSetMismatch, InvalidAncestryProof, NoSuchBridgeExists, InvalidFinalityProof, // UnknownClientError, // HeaderAncestryMismatch, UnexpectedValidatorSetId, StorageValueMismatch, } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { // Error::StorageRootMismatch => write!(f, "storage root mismatch"), Error::StorageValueUnavailable => write!(f, "storage value unavailable"), Error::ValidatorSetMismatch => write!(f, "validator set mismatch"), Error::InvalidAncestryProof => write!(f, "invalid ancestry proof"), Error::NoSuchBridgeExists => write!(f, "no such bridge exists"), Error::InvalidFinalityProof => write!(f, "invalid finality proof"), // Error::HeaderAncestryMismatch => write!(f, "header ancestry mismatch"), Error::UnexpectedValidatorSetId => write!(f, "unexpected validator set id"), Error::StorageValueMismatch => write!(f, "storage value mismatch"), } } } impl From<JustificationError> for Error { fn from(e: JustificationError) -> Self { match e { JustificationError::BadJustification(msg) => { error!("InvalidFinalityProof(BadJustification({}))", msg); Error::InvalidFinalityProof } JustificationError::JustificationDecode => { error!("InvalidFinalityProof(JustificationDecode)"); Error::InvalidFinalityProof } } } } impl<T: Config> LightValidation<T> where NumberFor<T::Block>: AsPrimitive<usize>, { fn check_validator_set_proof( state_root: &T::Hash, proof: StorageProof, validator_set: &[(AuthorityId, AuthorityWeight)], _set_id: SetId, ) -> Result<()> { let checker = <StorageProofChecker<T::Hashing>>::new(*state_root, proof)?; // By encoding the given set we should have an easy way to compare // with the stuff we get out of storage via `read_value` let mut encoded_validator_set = validator_set.encode(); encoded_validator_set.insert(0, 1); // Add AUTHORITIES_VERISON == 1 let actual_validator_set = checker .read_value(b":grandpa_authorities")? .ok_or_else(|| anyhow::Error::msg(Error::StorageValueUnavailable))?; // TODO: check set_id // checker.read_value(grandpa::CurrentSetId.key()) if encoded_validator_set == actual_validator_set { Ok(()) } else { Err(anyhow::Error::msg(Error::ValidatorSetMismatch)) } } } // A naive way to check whether a `child` header is a decendent // of an `ancestor` header. For this it requires a proof which // is a chain of headers between (but not including) the `child` // and `ancestor`. This could be updated to use something like // Log2 Ancestors (#2053) in the future. fn verify_ancestry<H>(proof: Vec<H>, ancestor_hash: H::Hash, child: &H) -> Result<()> where H: Header<Hash = H256>, { { info!("ancestor_hash: {}", ancestor_hash); for h in proof.iter() { info!( "block {:?} - hash: {} parent: {}", h.number(), h.hash(), h.parent_hash() ); } info!( "child block {:?} - hash: {} parent: {}", child.number(), child.hash(), child.parent_hash() ); } let mut parent_hash = child.parent_hash(); if *parent_hash == ancestor_hash { return Ok(()); } // If we find that the header's parent hash matches our ancestor's hash we're done for header in proof.iter() { // Need to check that blocks are actually related if header.hash()!= *parent_hash { break; } parent_hash = header.parent_hash(); if *parent_hash == ancestor_hash { return Ok(()); } } Err(anyhow::Error::msg(Error::InvalidAncestryProof)) } fn verify_grandpa_proof<B>( justification: EncodedJustification, hash: B::Hash, number: NumberFor<B>, set_id: u64, voters: &VoterSet<AuthorityId>, ) -> Result<()> where B: BlockT<Hash = H256>, NumberFor<B>: finality_grandpa::BlockNumberOps, { // We don't really care about the justification, as long as it's valid let _ = GrandpaJustification::<B>::decode_and_verify_finalizes( &justification, (hash, number), set_id, voters, ) .map_err(anyhow::Error::msg)?; Ok(()) } impl<T: Config> fmt::Debug for LightValidation<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "LightValidationTest {{ num_bridges: {}, tracked_bridges: {:?} }}", self.num_bridges, self.tracked_bridges ) } } impl<T: Config> fmt::Debug for BridgeInfo<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "BridgeInfo {{ last_finalized_block_header: {:?}, current_validator_set: {:?}, current_validator_set_id: {} }}", self.last_finalized_block_header, self.current_set.list, self.current_set.id) } } pub mod utils { use parity_scale_codec::Encode; /// Gets the prefix of a storage item pub fn storage_prefix(module: &str, storage: &str) -> Vec<u8> { let mut bytes = sp_core::twox_128(module.as_bytes()).to_vec(); bytes.extend(&sp_core::twox_128(storage.as_bytes())[..]); bytes } /// Calculates the Substrate storage key prefix for a StorageMap pub fn storage_map_prefix_twox_64_concat( module: &[u8], storage_item: &[u8], key: &(impl Encode +?Sized), ) -> Vec<u8> { let mut bytes = sp_core::twox_128(module).to_vec(); bytes.extend(&sp_core::twox_128(storage_item)[..]); let encoded = key.encode(); bytes.extend(sp_core::twox_64(&encoded)); bytes.extend(&encoded); bytes } #[test] #[ignore = "for debug"] fn show_keys() { let modules = [ "System", "Timestamp", "RandomnessCollectiveFlip", "Utility", "Multisig", "Proxy", "Vesting", "Scheduler", "Preimage", "ParachainInfo", "ParachainSystem", "XcmpQueue", "CumulusXcm", "DmpQueue", "PolkadotXcm", "Balances", "TransactionPayment", "Authorship", "CollatorSelection", "Session", "Aura", "AuraExt", "Identity", "Democracy", "Council", "Treasury", "Bounties", "Lottery", "TechnicalCommittee", "TechnicalMembership", "PhragmenElection", "Tips", "ChildBounties", "ChainBridge", "XcmBridge", "XTransfer", "PhalaMq", "PhalaRegistry", "PhalaComputation", "PhalaStakePool", "Assets", "AssetsRegistry", "PhalaStakePoolv2", "PhalaVault", "PhalaWrappedBalances", "PhalaBasePool", "Uniques", "RmrkCore", "RmrkEquip", "RmrkMarket", "PWNftSale", "PWIncubation", ]; for module in modules.iter() { let key = storage_prefix(module, ""); println!("{module}: 0x{}", hex::encode(key)); } let storage_keys = [ "Collections", "Nfts", "Priorities", "Children", "Resources", "EquippableBases", "EquippableSlots", "Properties", "Lock", "DummyStorage", ]; for key in storage_keys.iter() { let prefix = storage_prefix("RmrkCore", key); println!("RmrkCore::{key}: 0x{}", hex::encode(prefix)); } /* System: 0x26aa394eea5630e07c48ae0c9558cef799e9d85137db46ef4bbea33613baafd5 Timestamp: 0xf0c365c3cf59d671eb72da0e7a4113c499e9d85137db46ef4bbea33613baafd5 RandomnessCollectiveFlip: 0xbd2a529379475088d3e29a918cd4787299e9d85137db46ef4bbea33613baafd5 Utility: 0xd5e1a2fa16732ce6906189438c0a82c699e9d85137db46ef4bbea33613baafd5 Multisig: 0x7474449cca95dc5d0c00e71735a6d17d99e9d85137db46ef4bbea33613baafd5 Proxy: 0x1809d78346727a0ef58c0fa03bafa32399e9d85137db46ef4bbea33613baafd5 Vesting: 0x5f27b51b5ec208ee9cb25b55d872824399e9d85137db46ef4bbea33613baafd5 Scheduler: 0x3db7a24cfdc9de785974746c14a99df999e9d85137db46ef4bbea33613baafd5 Preimage: 0xd8f314b7f4e6b095f0f8ee4656a4482599e9d85137db46ef4bbea33613baafd5 ParachainInfo: 0x0d715f2646c8f85767b5d2764bb2782699e9d85137db46ef4bbea33613baafd5 ParachainSystem: 0x45323df7cc47150b3930e2666b0aa31399e9d85137db46ef4bbea33613baafd5 XcmpQueue: 0x7b3237373ffdfeb1cab4222e3b520d6b99e9d85137db46ef4bbea33613baafd5 CumulusXcm: 0x79e2fe5d327165001f8232643023ed8b99e9d85137db46ef4bbea33613baafd5 DmpQueue: 0xcd5c1f6df63bc97f4a8ce37f14a50ca799e9d85137db46ef4bbea33613baafd5 PolkadotXcm: 0xe38f185207498abb5c213d0fb059b3d899e9d85137db46ef4bbea33613baafd5 Balances: 0xc2261276cc9d1f8598ea4b6a74b15c2f99e9d85137db46ef4bbea33613baafd5 TransactionPayment: 0x3f1467a096bcd71a5b6a0c8155e2081099e9d85137db46ef4bbea33613baafd5 Authorship: 0xd57bce545fb382c34570e5dfbf338f5e99e9d85137db46ef4bbea33613baafd5 CollatorSelection: 0x15464cac3378d46f113cd5b7a4d71c8499e9d85137db46ef4bbea33613baafd5 Session: 0xcec5070d609dd3497f72bde07fc96ba099e9d85137db46ef4bbea33613baafd5 Aura: 0x57f8dc2f5ab09467896f47300f04243899e9d85137db46ef4bbea33613baafd5 AuraExt: 0x3c311d57d4daf52904616cf69648081e99e9d85137db46ef4bbea33613baafd5 Identity: 0x2aeddc77fe58c98d50bd37f1b90840f999e9d85137db46ef4bbea33613baafd5 Democracy: 0xf2794c22e353e9a839f12faab03a911b99e9d85137db46ef4bbea33613baafd5 Council: 0xaebd463ed9925c488c112434d61debc099e9d85137db46ef4bbea33613baafd5 Treasury: 0x89d139e01a5eb2256f222e5fc5dbe6b399e9d85137db46ef4bbea33613baafd5 Bounties: 0xa37f719efab16103103a0c8c2c784ce199e9d85137db46ef4bbea33613baafd5 Lottery: 0xfbc9f53700f75f681f234e70fb7241eb99e9d85137db46ef4bbea33613baafd5 TechnicalCommittee: 0xed25f63942de25ac5253ba64b5eb64d199e9d85137db46ef4bbea33613baafd5 TechnicalMembership: 0x3a2d6c9353500637d8f8e3e0fa0bb1c599e9d85137db46ef4bbea33613baafd5 PhragmenElection: 0xe2e62dd81c48a88f73b6f6463555fd8e99e9d85137db46ef4bbea33613baafd5 Tips: 0x2c5de123c468aef7f3ac2ab3a76f87ce99e9d85137db46ef4bbea33613baafd5 ChildBounties: 0xedfb05b766f199ce00df85317e33050e99e9d85137db46ef4bbea33613baafd5 ChainBridge: 0x43cdcd39d5edb1d16e24fa028edde0de99e9d85137db46ef4bbea33613baafd5 XcmBridge: 0x9d0cdc3697970df81fa5fabe88fa03ea99e9d85137db46ef4bbea33613baafd5 XTransfer: 0xc0cf946351a2b7b37cc8f3086b3674a199e9d85137db46ef4bbea33613baafd5 PhalaMq: 0x2f039a6a7f13e94b9545257e54062a0499e9d85137db46ef4bbea33613baafd5 PhalaRegistry: 0x0d746931e7a6bfd47fbcccfd71984aef99e9d85137db46ef4bbea33613baafd5 PhalaComputation:
initialize_bridge
identifier_name
codegen.rs
use super::*; use crate::ops::cast::cast; use crate::ops::math::add; use crate::ops::matmul::lir_unary::{ AddMatMulGeometry, LirMatMulUnary, MapOutputAxisToInput, ProtoFusedSpec, }; use crate::ops::matmul::mir_quant::{ combine_scales, compensate_zero_points, requant, wire_offset_u8_as_i8, }; use crate::ops::matmul::pack::MatMatMulPack; use crate::ops::nn::{Reduce, Reducer}; pub enum AxesOrPatch<'a> { Axes(&'a Axis, &'a Axis, &'a Axis), Patch(TypedModelPatch), NotAMatMul(&'a Axis), } pub(crate) fn codegen( op: &EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<Option<TypedModelPatch>> { if (op.q_params.is_none() && node.inputs.len()!= 2) || (op.q_params.is_some() && node.inputs.len()!= 9) { return Ok(None); } let (m_axis, k_axis, n_axis) = match ensure_mkn_axes(op, model, node)? { AxesOrPatch::Axes(m, k, n) => (m, k, n), AxesOrPatch::Patch(p) => return Ok(Some(p)), AxesOrPatch::NotAMatMul(_) => return Ok(None), }; if op.q_params.is_none() { lir_mat_mul_unary(op, model, node, (m_axis, k_axis, n_axis)) .context("Translating to LirMatMul") } else { dequant(op, model, node, (m_axis, k_axis, n_axis)).context("Dequantize") } } pub(super) fn ensure_mkn_axes<'a>( op: &'a EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<AxesOrPatch<'a>> { let input_facts = model.node_input_facts(node.id)?; let input_shapes: TVec<&[TDim]> = input_facts.iter().map(|f| &*f.shape).collect(); let output_shape = super::eval::output_shape(&op.axes, &input_shapes); let candidate_k_axes: TVec<&Axis> = op .axes .iter_all_axes() // Filter possible candidates (should be one time in each inputs but not in output) .filter(|a| { a.inputs[0].len() == 1 && a.inputs[1].len() == 1 && a.outputs[0].len() == 0 && input_facts[0].shape[a.inputs[0][0]] == input_facts[1].shape[a.inputs[1][0]] }) .collect(); let non_trivial_k_axis = candidate_k_axes .iter() .filter(|a| input_facts[0].shape[a.inputs[0][0]] > 1.to_dim()) .collect::<TVec<_>>(); let k_axis = if non_trivial_k_axis.len() > 1 { // TODO: handle case where multiple consecutive k in the same order in both input. bail!("Multiple k-axis candidate found"); } else { non_trivial_k_axis.get(0).copied().or_else(|| candidate_k_axes.get(0)).copied() }; let Some(k_axis) = k_axis else { return Ok(AxesOrPatch::Patch(inject_k_axis(op, model, node)?)); }; let m_axis = op .axes .iter_all_axes() .filter(|a| { a.inputs[0].len() == 1 && (a.inputs[1].len() == 0 || input_facts[1].shape[a.inputs[1][0]].is_one()) && a.outputs[0].len() == 1 }) .max_by_key(|a| &output_shape[a.outputs[0][0]]); let Some(m_axis) = m_axis else { return Ok(AxesOrPatch::Patch(inject_m_or_n_axis(op, model, node, false, &[k_axis])?)); }; let n_axis = op .axes .iter_all_axes() .filter(|a| { (a.inputs[0].len() == 0 || input_facts[0].shape[a.inputs[0][0]].is_one()) && a.inputs[1].len() == 1 && a.outputs[0].len() == 1 }) .max_by_key(|a| &output_shape[a.outputs[0][0]]); let Some(n_axis) = n_axis else { return Ok(AxesOrPatch::Patch(inject_m_or_n_axis( op, model, node, true, &[k_axis, m_axis], )?)); }; for axis in op.axes.iter_all_axes() { let one = TDim::one(); let in_left = axis.inputs[0].first().map(|pos| &input_facts[0].shape[*pos]).unwrap_or(&one)!= &one; let in_right = axis.inputs[1].first().map(|pos| &input_facts[1].shape[*pos]).unwrap_or(&one)!= &one; let in_out = axis.outputs[0].first().map(|pos| &output_shape[*pos]).unwrap_or(&one)!= &one; if (in_left ^ in_right) &&!in_out { return Ok(AxesOrPatch::NotAMatMul(axis)); } } Ok(AxesOrPatch::Axes(m_axis, k_axis, n_axis)) } pub(super) fn inject_k_axis( op: &EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<TypedModelPatch> { let mut new_axes = op.axes.clone(); let name = &node.name; let mut patch = TypedModelPatch::new("inject k axis"); let mut wire = patch.taps(model, &node.inputs)?; let repr = new_axes.available_label(); new_axes = new_axes.with_extra_axis(repr, InOut::In(0), 0)?.with_extra_axis_occurency( repr, InOut::In(1), 0, )?; wire[0] = patch.wire_node(format!("{name}.add_k.0"), AxisOp::Add(0), &[wire[0]])?[0]; wire[1] = patch.wire_node(format!("{name}.add_k.1"), AxisOp::Add(0), &[wire[1]])?[0]; wire = patch.wire_node(&node.name, EinSum { axes: new_axes,..op.clone() }, &wire)?; patch.shunt_outside(model, node.id.into(), wire[0])?; Ok(patch) } pub(super) fn inject_m_or_n_axis( op: &EinSum, model: &TypedModel, node: &TypedNode, is_n: bool, exclude: &[&Axis], ) -> TractResult<TypedModelPatch> { let input_to_fix = is_n as usize; let label = if is_n { "n" } else { "m" }; let input_facts = model.node_input_facts(node.id)?; let quasi_m_or_n_axis = op.axes.iter_all_axes().filter(|a|!exclude.contains(a)).find(|a| { (a.inputs[1 - input_to_fix].len() == 0 || input_facts[1 - input_to_fix].shape[a.inputs[1 - input_to_fix][0]].is_one()) && (a.inputs[input_to_fix].len() == 1 || a.outputs[0].len() == 1) }); let name = &node.name; let mut patch = TypedModelPatch::new("Injecting m or n axis"); let mut wire = patch.taps(model, &node.inputs)?; if let Some(axis) = quasi_m_or_n_axis { if axis.inputs[input_to_fix].len() == 1 { let new_axes = op.axes.clone().with_extra_axis('$', InOut::Out(0), 0)?.linking(axis.repr, '$')?; wire = patch.wire_node( format!("{name}.einsum"), EinSum { axes: new_axes,..op.clone() }, &wire, )?; wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?; } else { let new_axes = op .axes .clone() .with_extra_axis('$', InOut::In(input_to_fix), 0)? .linking(axis.repr, '$')?; wire[input_to_fix] = patch.wire_node( format!("{name}.add_{label}"), AxisOp::Add(0), &[wire[input_to_fix]], )?[0]; wire = patch.wire_node(&node.name, EinSum { axes: new_axes,..op.clone() }, &wire)?; } } else { let repr = op.axes.available_label(); let new_axes = op .axes .clone() .with_extra_axis(repr, InOut::In(input_to_fix), 0)? .with_extra_axis('$', InOut::Out(0), 0)? .linking(repr, '$')?; wire[input_to_fix] = patch.wire_node( format!("{name}.add_{label}"), AxisOp::Add(0), &[wire[input_to_fix]], )?[0]; wire = patch.wire_node( format!("{name}.einsum"), EinSum { axes: new_axes,..op.clone() }, &wire, )?; wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?; } patch.shunt_outside(model, node.id.into(), wire[0])?; Ok(patch) } fn wire_axes_fix( patch: &mut TypedModelPatch, name: &str, var: &str, mapping: &AxesMapping, mut outlet: TVec<OutletId>, ) -> TractResult<TVec<OutletId>> { for (ix, axis_op) in mapping.translate_to_axis_ops()?.into_iter().enumerate() { outlet = patch.wire_node(format!("{name}.fix_{var}.{ix})"), axis_op, &outlet)?; } Ok(outlet) } fn
( op: &EinSum, model: &TypedModel, node: &TypedNode, (_, k_axis, _): (&Axis, &Axis, &Axis), ) -> TractResult<Option<TypedModelPatch>> { let name = &node.name; let mut patch = TypedModelPatch::new("Dequantizing einsum"); let taps = patch.taps(model, &node.inputs)?; let [a, b, bias, mut a0, mut a_scale, mut b0, b_scale, c0, c_scale] = *taps else { bail!("Expect exactly 9 inputs") }; if!patch.outlet_fact(a_scale)?.shape.volume().is_one() { let q_axis_in_output = op.axes.axis((InOut::In(4), 0))?.outputs[0][0]; let output_rank = node.outputs[0].fact.rank(); for i in 1..(output_rank - q_axis_in_output) { a_scale = patch.wire_node( format!("{name}.a_scale_axis_fix_{i}"), AxisOp::Add(i), &[a_scale], )?[0]; } } let a = wire_offset_u8_as_i8(&mut patch, &node.name, a, "a", &mut a0, "a0")?; let b = wire_offset_u8_as_i8(&mut patch, &node.name, b, "b", &mut b0, "b0")?; let mut output = patch.wire_node( &node.name, EinSum { q_params: None, axes: op.axes.extract_sub_mapping(&[0, 1], &[0])?, operating_dt: op.operating_dt, }, &[a, b], )?; let a_i32 = patch.wire_node(format!("{name}.a_as_i32"), cast(i32::datum_type()), &[a])?[0]; let b_i32 = patch.wire_node(format!("{name}.b_as_i32"), cast(i32::datum_type()), &[b])?[0]; let sum_a = patch.wire_node( format!("{name}.sum_a"), Reduce::new(tvec!(k_axis.inputs[0][0]), Reducer::Sum), &[a_i32], )?; let sum_b = patch.wire_node( format!("{name}.sum_b"), Reduce::new(tvec!(k_axis.inputs[1][0]), Reducer::Sum), &[b_i32], )?; let sum_a = wire_axes_fix(&mut patch, name, "sum_a", &op.axes.extract_sub_mapping(&[0], &[0])?, sum_a)?; let sum_b = wire_axes_fix(&mut patch, name, "sum_b", &op.axes.extract_sub_mapping(&[1], &[0])?, sum_b)?; let bias = tvec!(bias); let bias = wire_axes_fix(&mut patch, name, "bias", &op.axes.extract_sub_mapping(&[2], &[0])?, bias)?; let abc_scale = combine_scales(&mut patch, name, a_scale, b_scale, c_scale)?; output = patch.wire_node(format!("{name}.add_bias"), add(), &[output[0], bias[0]])?; let k = model.outlet_fact(node.inputs[0])?.shape[k_axis.inputs[0][0]].clone(); let output = compensate_zero_points(&mut patch, name, output[0], k, a0, b0, sum_a[0], sum_b[0]) .context("Zero point compensation")?; let output = requant(&mut patch, name, output, op.q_params.unwrap(), abc_scale, c0)?; patch.shunt_outside(model, node.id.into(), output)?; Ok(Some(patch)) } fn lir_mat_mul_unary( op: &EinSum, model: &TypedModel, node: &TypedNode, (m_axis, k_axis, n_axis): (&Axis, &Axis, &Axis), ) -> TractResult<Option<TypedModelPatch>> { let input_facts = model.node_input_facts(node.id)?; let a_m = m_axis.inputs[0][0]; let a_k = k_axis.inputs[0][0]; let b_n = n_axis.inputs[1][0]; let b_k = k_axis.inputs[1][0]; let c_m = m_axis.outputs[0][0]; let c_n = n_axis.outputs[0][0]; let m = &input_facts[0].shape[a_m]; let k = &input_facts[0].shape[a_k]; let n = &input_facts[1].shape[b_n]; if m < n { let expr = op .axes .iter_all_axes() .map(|axis| { let mut axis = axis.clone(); axis.inputs.swap(0, 1); axis }) .collect::<TVec<Axis>>(); return TypedModelPatch::replace_single_op( model, node, &[node.inputs[1], node.inputs[0]], EinSum { axes: AxesMapping::new(node.inputs.len(), 1, expr)?,..op.clone() }, ) .map(Some); } let a_dt = input_facts[0].datum_type; let b_dt = input_facts[1].datum_type; let dt = op.operating_dt; let mmm = tract_linalg::ops() .mmm(a_dt, b_dt, dt, m.to_usize().ok(), k.to_usize().ok(), n.to_usize().ok()) .unwrap(); let name = &node.name; let mut patch = TypedModelPatch::new("Einsum to LirMatMulUnary"); let a = patch.tap_model(model, node.inputs[0])?; let b = patch.tap_model(model, node.inputs[1])?; let pack_a = MatMatMulPack { packer: mmm.a_pack(), k_axis: a_k, mn_axis: a_m }; let pack_b = MatMatMulPack { packer: mmm.b_pack(), k_axis: b_k, mn_axis: b_n }; let pa = patch.wire_node(format!("{name}.pack_a"), pack_a, &[a])?[0]; let pb = patch.wire_node(format!("{name}.pack_b"), pack_b, &[b])?[0]; let mut c_to_a_axis_mapping = tvec!(); let mut c_to_b_axis_mapping = tvec!(); for axis in op.axes.iter_all_axes().filter(|&axis|![m_axis, k_axis, n_axis].contains(&axis)) { if let (&[c], &[a]) = (&*axis.outputs[0], &*axis.inputs[0]) { if input_facts[0].shape[a]!= 1.to_dim() { let a = a - (a > a_m) as usize - (a > a_k) as usize; c_to_a_axis_mapping.push((c, a)); } } if let (&[c], &[b]) = (&*axis.outputs[0], &*axis.inputs[1]) { if input_facts[1].shape[b]!= 1.to_dim() { let b = b - (b > b_n) as usize - (b > b_k) as usize; c_to_b_axis_mapping.push((c, b)); } } } let c_fact = op.output_facts(&input_facts)?.remove(0); let name = &node.name; let geo = AddMatMulGeometry { k: k.to_dim(), a_storage: None, b_storage: None, mmm: mmm.clone(), c_to_a_axis_mapping: MapOutputAxisToInput(c_to_a_axis_mapping), c_to_b_axis_mapping: MapOutputAxisToInput(c_to_b_axis_mapping), }; let output = unsafe { mmm.c_view(c_m, c_n) }; let lir = LirMatMulUnary::new( mmm, c_fact, c_m, c_n, vec![ProtoFusedSpec::AddMatMul(geo, 0, 1), ProtoFusedSpec::Store(output)], ) .context("Creating LirMatMulUnary")?; let output = patch.wire_node(name, lir, &[pa, pb])?[0]; patch.shunt_outside(model, node.id.into(), output)?; Ok(Some(patch)) }
dequant
identifier_name
codegen.rs
use super::*; use crate::ops::cast::cast; use crate::ops::math::add; use crate::ops::matmul::lir_unary::{ AddMatMulGeometry, LirMatMulUnary, MapOutputAxisToInput, ProtoFusedSpec, }; use crate::ops::matmul::mir_quant::{ combine_scales, compensate_zero_points, requant, wire_offset_u8_as_i8, }; use crate::ops::matmul::pack::MatMatMulPack; use crate::ops::nn::{Reduce, Reducer}; pub enum AxesOrPatch<'a> { Axes(&'a Axis, &'a Axis, &'a Axis), Patch(TypedModelPatch), NotAMatMul(&'a Axis), } pub(crate) fn codegen( op: &EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<Option<TypedModelPatch>> { if (op.q_params.is_none() && node.inputs.len()!= 2) || (op.q_params.is_some() && node.inputs.len()!= 9) { return Ok(None); } let (m_axis, k_axis, n_axis) = match ensure_mkn_axes(op, model, node)? { AxesOrPatch::Axes(m, k, n) => (m, k, n), AxesOrPatch::Patch(p) => return Ok(Some(p)), AxesOrPatch::NotAMatMul(_) => return Ok(None), }; if op.q_params.is_none() { lir_mat_mul_unary(op, model, node, (m_axis, k_axis, n_axis)) .context("Translating to LirMatMul") } else { dequant(op, model, node, (m_axis, k_axis, n_axis)).context("Dequantize") } } pub(super) fn ensure_mkn_axes<'a>( op: &'a EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<AxesOrPatch<'a>> { let input_facts = model.node_input_facts(node.id)?; let input_shapes: TVec<&[TDim]> = input_facts.iter().map(|f| &*f.shape).collect(); let output_shape = super::eval::output_shape(&op.axes, &input_shapes); let candidate_k_axes: TVec<&Axis> = op .axes .iter_all_axes() // Filter possible candidates (should be one time in each inputs but not in output) .filter(|a| { a.inputs[0].len() == 1 && a.inputs[1].len() == 1 && a.outputs[0].len() == 0 && input_facts[0].shape[a.inputs[0][0]] == input_facts[1].shape[a.inputs[1][0]] }) .collect(); let non_trivial_k_axis = candidate_k_axes .iter() .filter(|a| input_facts[0].shape[a.inputs[0][0]] > 1.to_dim()) .collect::<TVec<_>>(); let k_axis = if non_trivial_k_axis.len() > 1
else { non_trivial_k_axis.get(0).copied().or_else(|| candidate_k_axes.get(0)).copied() }; let Some(k_axis) = k_axis else { return Ok(AxesOrPatch::Patch(inject_k_axis(op, model, node)?)); }; let m_axis = op .axes .iter_all_axes() .filter(|a| { a.inputs[0].len() == 1 && (a.inputs[1].len() == 0 || input_facts[1].shape[a.inputs[1][0]].is_one()) && a.outputs[0].len() == 1 }) .max_by_key(|a| &output_shape[a.outputs[0][0]]); let Some(m_axis) = m_axis else { return Ok(AxesOrPatch::Patch(inject_m_or_n_axis(op, model, node, false, &[k_axis])?)); }; let n_axis = op .axes .iter_all_axes() .filter(|a| { (a.inputs[0].len() == 0 || input_facts[0].shape[a.inputs[0][0]].is_one()) && a.inputs[1].len() == 1 && a.outputs[0].len() == 1 }) .max_by_key(|a| &output_shape[a.outputs[0][0]]); let Some(n_axis) = n_axis else { return Ok(AxesOrPatch::Patch(inject_m_or_n_axis( op, model, node, true, &[k_axis, m_axis], )?)); }; for axis in op.axes.iter_all_axes() { let one = TDim::one(); let in_left = axis.inputs[0].first().map(|pos| &input_facts[0].shape[*pos]).unwrap_or(&one)!= &one; let in_right = axis.inputs[1].first().map(|pos| &input_facts[1].shape[*pos]).unwrap_or(&one)!= &one; let in_out = axis.outputs[0].first().map(|pos| &output_shape[*pos]).unwrap_or(&one)!= &one; if (in_left ^ in_right) &&!in_out { return Ok(AxesOrPatch::NotAMatMul(axis)); } } Ok(AxesOrPatch::Axes(m_axis, k_axis, n_axis)) } pub(super) fn inject_k_axis( op: &EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<TypedModelPatch> { let mut new_axes = op.axes.clone(); let name = &node.name; let mut patch = TypedModelPatch::new("inject k axis"); let mut wire = patch.taps(model, &node.inputs)?; let repr = new_axes.available_label(); new_axes = new_axes.with_extra_axis(repr, InOut::In(0), 0)?.with_extra_axis_occurency( repr, InOut::In(1), 0, )?; wire[0] = patch.wire_node(format!("{name}.add_k.0"), AxisOp::Add(0), &[wire[0]])?[0]; wire[1] = patch.wire_node(format!("{name}.add_k.1"), AxisOp::Add(0), &[wire[1]])?[0]; wire = patch.wire_node(&node.name, EinSum { axes: new_axes,..op.clone() }, &wire)?; patch.shunt_outside(model, node.id.into(), wire[0])?; Ok(patch) } pub(super) fn inject_m_or_n_axis( op: &EinSum, model: &TypedModel, node: &TypedNode, is_n: bool, exclude: &[&Axis], ) -> TractResult<TypedModelPatch> { let input_to_fix = is_n as usize; let label = if is_n { "n" } else { "m" }; let input_facts = model.node_input_facts(node.id)?; let quasi_m_or_n_axis = op.axes.iter_all_axes().filter(|a|!exclude.contains(a)).find(|a| { (a.inputs[1 - input_to_fix].len() == 0 || input_facts[1 - input_to_fix].shape[a.inputs[1 - input_to_fix][0]].is_one()) && (a.inputs[input_to_fix].len() == 1 || a.outputs[0].len() == 1) }); let name = &node.name; let mut patch = TypedModelPatch::new("Injecting m or n axis"); let mut wire = patch.taps(model, &node.inputs)?; if let Some(axis) = quasi_m_or_n_axis { if axis.inputs[input_to_fix].len() == 1 { let new_axes = op.axes.clone().with_extra_axis('$', InOut::Out(0), 0)?.linking(axis.repr, '$')?; wire = patch.wire_node( format!("{name}.einsum"), EinSum { axes: new_axes,..op.clone() }, &wire, )?; wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?; } else { let new_axes = op .axes .clone() .with_extra_axis('$', InOut::In(input_to_fix), 0)? .linking(axis.repr, '$')?; wire[input_to_fix] = patch.wire_node( format!("{name}.add_{label}"), AxisOp::Add(0), &[wire[input_to_fix]], )?[0]; wire = patch.wire_node(&node.name, EinSum { axes: new_axes,..op.clone() }, &wire)?; } } else { let repr = op.axes.available_label(); let new_axes = op .axes .clone() .with_extra_axis(repr, InOut::In(input_to_fix), 0)? .with_extra_axis('$', InOut::Out(0), 0)? .linking(repr, '$')?; wire[input_to_fix] = patch.wire_node( format!("{name}.add_{label}"), AxisOp::Add(0), &[wire[input_to_fix]], )?[0]; wire = patch.wire_node( format!("{name}.einsum"), EinSum { axes: new_axes,..op.clone() }, &wire, )?; wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?; } patch.shunt_outside(model, node.id.into(), wire[0])?; Ok(patch) } fn wire_axes_fix( patch: &mut TypedModelPatch, name: &str, var: &str, mapping: &AxesMapping, mut outlet: TVec<OutletId>, ) -> TractResult<TVec<OutletId>> { for (ix, axis_op) in mapping.translate_to_axis_ops()?.into_iter().enumerate() { outlet = patch.wire_node(format!("{name}.fix_{var}.{ix})"), axis_op, &outlet)?; } Ok(outlet) } fn dequant( op: &EinSum, model: &TypedModel, node: &TypedNode, (_, k_axis, _): (&Axis, &Axis, &Axis), ) -> TractResult<Option<TypedModelPatch>> { let name = &node.name; let mut patch = TypedModelPatch::new("Dequantizing einsum"); let taps = patch.taps(model, &node.inputs)?; let [a, b, bias, mut a0, mut a_scale, mut b0, b_scale, c0, c_scale] = *taps else { bail!("Expect exactly 9 inputs") }; if!patch.outlet_fact(a_scale)?.shape.volume().is_one() { let q_axis_in_output = op.axes.axis((InOut::In(4), 0))?.outputs[0][0]; let output_rank = node.outputs[0].fact.rank(); for i in 1..(output_rank - q_axis_in_output) { a_scale = patch.wire_node( format!("{name}.a_scale_axis_fix_{i}"), AxisOp::Add(i), &[a_scale], )?[0]; } } let a = wire_offset_u8_as_i8(&mut patch, &node.name, a, "a", &mut a0, "a0")?; let b = wire_offset_u8_as_i8(&mut patch, &node.name, b, "b", &mut b0, "b0")?; let mut output = patch.wire_node( &node.name, EinSum { q_params: None, axes: op.axes.extract_sub_mapping(&[0, 1], &[0])?, operating_dt: op.operating_dt, }, &[a, b], )?; let a_i32 = patch.wire_node(format!("{name}.a_as_i32"), cast(i32::datum_type()), &[a])?[0]; let b_i32 = patch.wire_node(format!("{name}.b_as_i32"), cast(i32::datum_type()), &[b])?[0]; let sum_a = patch.wire_node( format!("{name}.sum_a"), Reduce::new(tvec!(k_axis.inputs[0][0]), Reducer::Sum), &[a_i32], )?; let sum_b = patch.wire_node( format!("{name}.sum_b"), Reduce::new(tvec!(k_axis.inputs[1][0]), Reducer::Sum), &[b_i32], )?; let sum_a = wire_axes_fix(&mut patch, name, "sum_a", &op.axes.extract_sub_mapping(&[0], &[0])?, sum_a)?; let sum_b = wire_axes_fix(&mut patch, name, "sum_b", &op.axes.extract_sub_mapping(&[1], &[0])?, sum_b)?; let bias = tvec!(bias); let bias = wire_axes_fix(&mut patch, name, "bias", &op.axes.extract_sub_mapping(&[2], &[0])?, bias)?; let abc_scale = combine_scales(&mut patch, name, a_scale, b_scale, c_scale)?; output = patch.wire_node(format!("{name}.add_bias"), add(), &[output[0], bias[0]])?; let k = model.outlet_fact(node.inputs[0])?.shape[k_axis.inputs[0][0]].clone(); let output = compensate_zero_points(&mut patch, name, output[0], k, a0, b0, sum_a[0], sum_b[0]) .context("Zero point compensation")?; let output = requant(&mut patch, name, output, op.q_params.unwrap(), abc_scale, c0)?; patch.shunt_outside(model, node.id.into(), output)?; Ok(Some(patch)) } fn lir_mat_mul_unary( op: &EinSum, model: &TypedModel, node: &TypedNode, (m_axis, k_axis, n_axis): (&Axis, &Axis, &Axis), ) -> TractResult<Option<TypedModelPatch>> { let input_facts = model.node_input_facts(node.id)?; let a_m = m_axis.inputs[0][0]; let a_k = k_axis.inputs[0][0]; let b_n = n_axis.inputs[1][0]; let b_k = k_axis.inputs[1][0]; let c_m = m_axis.outputs[0][0]; let c_n = n_axis.outputs[0][0]; let m = &input_facts[0].shape[a_m]; let k = &input_facts[0].shape[a_k]; let n = &input_facts[1].shape[b_n]; if m < n { let expr = op .axes .iter_all_axes() .map(|axis| { let mut axis = axis.clone(); axis.inputs.swap(0, 1); axis }) .collect::<TVec<Axis>>(); return TypedModelPatch::replace_single_op( model, node, &[node.inputs[1], node.inputs[0]], EinSum { axes: AxesMapping::new(node.inputs.len(), 1, expr)?,..op.clone() }, ) .map(Some); } let a_dt = input_facts[0].datum_type; let b_dt = input_facts[1].datum_type; let dt = op.operating_dt; let mmm = tract_linalg::ops() .mmm(a_dt, b_dt, dt, m.to_usize().ok(), k.to_usize().ok(), n.to_usize().ok()) .unwrap(); let name = &node.name; let mut patch = TypedModelPatch::new("Einsum to LirMatMulUnary"); let a = patch.tap_model(model, node.inputs[0])?; let b = patch.tap_model(model, node.inputs[1])?; let pack_a = MatMatMulPack { packer: mmm.a_pack(), k_axis: a_k, mn_axis: a_m }; let pack_b = MatMatMulPack { packer: mmm.b_pack(), k_axis: b_k, mn_axis: b_n }; let pa = patch.wire_node(format!("{name}.pack_a"), pack_a, &[a])?[0]; let pb = patch.wire_node(format!("{name}.pack_b"), pack_b, &[b])?[0]; let mut c_to_a_axis_mapping = tvec!(); let mut c_to_b_axis_mapping = tvec!(); for axis in op.axes.iter_all_axes().filter(|&axis|![m_axis, k_axis, n_axis].contains(&axis)) { if let (&[c], &[a]) = (&*axis.outputs[0], &*axis.inputs[0]) { if input_facts[0].shape[a]!= 1.to_dim() { let a = a - (a > a_m) as usize - (a > a_k) as usize; c_to_a_axis_mapping.push((c, a)); } } if let (&[c], &[b]) = (&*axis.outputs[0], &*axis.inputs[1]) { if input_facts[1].shape[b]!= 1.to_dim() { let b = b - (b > b_n) as usize - (b > b_k) as usize; c_to_b_axis_mapping.push((c, b)); } } } let c_fact = op.output_facts(&input_facts)?.remove(0); let name = &node.name; let geo = AddMatMulGeometry { k: k.to_dim(), a_storage: None, b_storage: None, mmm: mmm.clone(), c_to_a_axis_mapping: MapOutputAxisToInput(c_to_a_axis_mapping), c_to_b_axis_mapping: MapOutputAxisToInput(c_to_b_axis_mapping), }; let output = unsafe { mmm.c_view(c_m, c_n) }; let lir = LirMatMulUnary::new( mmm, c_fact, c_m, c_n, vec![ProtoFusedSpec::AddMatMul(geo, 0, 1), ProtoFusedSpec::Store(output)], ) .context("Creating LirMatMulUnary")?; let output = patch.wire_node(name, lir, &[pa, pb])?[0]; patch.shunt_outside(model, node.id.into(), output)?; Ok(Some(patch)) }
{ // TODO: handle case where multiple consecutive k in the same order in both input. bail!("Multiple k-axis candidate found"); }
conditional_block
codegen.rs
use super::*; use crate::ops::cast::cast; use crate::ops::math::add; use crate::ops::matmul::lir_unary::{ AddMatMulGeometry, LirMatMulUnary, MapOutputAxisToInput, ProtoFusedSpec, }; use crate::ops::matmul::mir_quant::{ combine_scales, compensate_zero_points, requant, wire_offset_u8_as_i8, }; use crate::ops::matmul::pack::MatMatMulPack; use crate::ops::nn::{Reduce, Reducer}; pub enum AxesOrPatch<'a> { Axes(&'a Axis, &'a Axis, &'a Axis), Patch(TypedModelPatch), NotAMatMul(&'a Axis), } pub(crate) fn codegen( op: &EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<Option<TypedModelPatch>> { if (op.q_params.is_none() && node.inputs.len()!= 2) || (op.q_params.is_some() && node.inputs.len()!= 9) { return Ok(None); } let (m_axis, k_axis, n_axis) = match ensure_mkn_axes(op, model, node)? { AxesOrPatch::Axes(m, k, n) => (m, k, n), AxesOrPatch::Patch(p) => return Ok(Some(p)), AxesOrPatch::NotAMatMul(_) => return Ok(None), }; if op.q_params.is_none() { lir_mat_mul_unary(op, model, node, (m_axis, k_axis, n_axis)) .context("Translating to LirMatMul") } else { dequant(op, model, node, (m_axis, k_axis, n_axis)).context("Dequantize") } } pub(super) fn ensure_mkn_axes<'a>( op: &'a EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<AxesOrPatch<'a>>
// TODO: handle case where multiple consecutive k in the same order in both input. bail!("Multiple k-axis candidate found"); } else { non_trivial_k_axis.get(0).copied().or_else(|| candidate_k_axes.get(0)).copied() }; let Some(k_axis) = k_axis else { return Ok(AxesOrPatch::Patch(inject_k_axis(op, model, node)?)); }; let m_axis = op .axes .iter_all_axes() .filter(|a| { a.inputs[0].len() == 1 && (a.inputs[1].len() == 0 || input_facts[1].shape[a.inputs[1][0]].is_one()) && a.outputs[0].len() == 1 }) .max_by_key(|a| &output_shape[a.outputs[0][0]]); let Some(m_axis) = m_axis else { return Ok(AxesOrPatch::Patch(inject_m_or_n_axis(op, model, node, false, &[k_axis])?)); }; let n_axis = op .axes .iter_all_axes() .filter(|a| { (a.inputs[0].len() == 0 || input_facts[0].shape[a.inputs[0][0]].is_one()) && a.inputs[1].len() == 1 && a.outputs[0].len() == 1 }) .max_by_key(|a| &output_shape[a.outputs[0][0]]); let Some(n_axis) = n_axis else { return Ok(AxesOrPatch::Patch(inject_m_or_n_axis( op, model, node, true, &[k_axis, m_axis], )?)); }; for axis in op.axes.iter_all_axes() { let one = TDim::one(); let in_left = axis.inputs[0].first().map(|pos| &input_facts[0].shape[*pos]).unwrap_or(&one)!= &one; let in_right = axis.inputs[1].first().map(|pos| &input_facts[1].shape[*pos]).unwrap_or(&one)!= &one; let in_out = axis.outputs[0].first().map(|pos| &output_shape[*pos]).unwrap_or(&one)!= &one; if (in_left ^ in_right) &&!in_out { return Ok(AxesOrPatch::NotAMatMul(axis)); } } Ok(AxesOrPatch::Axes(m_axis, k_axis, n_axis)) } pub(super) fn inject_k_axis( op: &EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<TypedModelPatch> { let mut new_axes = op.axes.clone(); let name = &node.name; let mut patch = TypedModelPatch::new("inject k axis"); let mut wire = patch.taps(model, &node.inputs)?; let repr = new_axes.available_label(); new_axes = new_axes.with_extra_axis(repr, InOut::In(0), 0)?.with_extra_axis_occurency( repr, InOut::In(1), 0, )?; wire[0] = patch.wire_node(format!("{name}.add_k.0"), AxisOp::Add(0), &[wire[0]])?[0]; wire[1] = patch.wire_node(format!("{name}.add_k.1"), AxisOp::Add(0), &[wire[1]])?[0]; wire = patch.wire_node(&node.name, EinSum { axes: new_axes,..op.clone() }, &wire)?; patch.shunt_outside(model, node.id.into(), wire[0])?; Ok(patch) } pub(super) fn inject_m_or_n_axis( op: &EinSum, model: &TypedModel, node: &TypedNode, is_n: bool, exclude: &[&Axis], ) -> TractResult<TypedModelPatch> { let input_to_fix = is_n as usize; let label = if is_n { "n" } else { "m" }; let input_facts = model.node_input_facts(node.id)?; let quasi_m_or_n_axis = op.axes.iter_all_axes().filter(|a|!exclude.contains(a)).find(|a| { (a.inputs[1 - input_to_fix].len() == 0 || input_facts[1 - input_to_fix].shape[a.inputs[1 - input_to_fix][0]].is_one()) && (a.inputs[input_to_fix].len() == 1 || a.outputs[0].len() == 1) }); let name = &node.name; let mut patch = TypedModelPatch::new("Injecting m or n axis"); let mut wire = patch.taps(model, &node.inputs)?; if let Some(axis) = quasi_m_or_n_axis { if axis.inputs[input_to_fix].len() == 1 { let new_axes = op.axes.clone().with_extra_axis('$', InOut::Out(0), 0)?.linking(axis.repr, '$')?; wire = patch.wire_node( format!("{name}.einsum"), EinSum { axes: new_axes,..op.clone() }, &wire, )?; wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?; } else { let new_axes = op .axes .clone() .with_extra_axis('$', InOut::In(input_to_fix), 0)? .linking(axis.repr, '$')?; wire[input_to_fix] = patch.wire_node( format!("{name}.add_{label}"), AxisOp::Add(0), &[wire[input_to_fix]], )?[0]; wire = patch.wire_node(&node.name, EinSum { axes: new_axes,..op.clone() }, &wire)?; } } else { let repr = op.axes.available_label(); let new_axes = op .axes .clone() .with_extra_axis(repr, InOut::In(input_to_fix), 0)? .with_extra_axis('$', InOut::Out(0), 0)? .linking(repr, '$')?; wire[input_to_fix] = patch.wire_node( format!("{name}.add_{label}"), AxisOp::Add(0), &[wire[input_to_fix]], )?[0]; wire = patch.wire_node( format!("{name}.einsum"), EinSum { axes: new_axes,..op.clone() }, &wire, )?; wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?; } patch.shunt_outside(model, node.id.into(), wire[0])?; Ok(patch) } fn wire_axes_fix( patch: &mut TypedModelPatch, name: &str, var: &str, mapping: &AxesMapping, mut outlet: TVec<OutletId>, ) -> TractResult<TVec<OutletId>> { for (ix, axis_op) in mapping.translate_to_axis_ops()?.into_iter().enumerate() { outlet = patch.wire_node(format!("{name}.fix_{var}.{ix})"), axis_op, &outlet)?; } Ok(outlet) } fn dequant( op: &EinSum, model: &TypedModel, node: &TypedNode, (_, k_axis, _): (&Axis, &Axis, &Axis), ) -> TractResult<Option<TypedModelPatch>> { let name = &node.name; let mut patch = TypedModelPatch::new("Dequantizing einsum"); let taps = patch.taps(model, &node.inputs)?; let [a, b, bias, mut a0, mut a_scale, mut b0, b_scale, c0, c_scale] = *taps else { bail!("Expect exactly 9 inputs") }; if!patch.outlet_fact(a_scale)?.shape.volume().is_one() { let q_axis_in_output = op.axes.axis((InOut::In(4), 0))?.outputs[0][0]; let output_rank = node.outputs[0].fact.rank(); for i in 1..(output_rank - q_axis_in_output) { a_scale = patch.wire_node( format!("{name}.a_scale_axis_fix_{i}"), AxisOp::Add(i), &[a_scale], )?[0]; } } let a = wire_offset_u8_as_i8(&mut patch, &node.name, a, "a", &mut a0, "a0")?; let b = wire_offset_u8_as_i8(&mut patch, &node.name, b, "b", &mut b0, "b0")?; let mut output = patch.wire_node( &node.name, EinSum { q_params: None, axes: op.axes.extract_sub_mapping(&[0, 1], &[0])?, operating_dt: op.operating_dt, }, &[a, b], )?; let a_i32 = patch.wire_node(format!("{name}.a_as_i32"), cast(i32::datum_type()), &[a])?[0]; let b_i32 = patch.wire_node(format!("{name}.b_as_i32"), cast(i32::datum_type()), &[b])?[0]; let sum_a = patch.wire_node( format!("{name}.sum_a"), Reduce::new(tvec!(k_axis.inputs[0][0]), Reducer::Sum), &[a_i32], )?; let sum_b = patch.wire_node( format!("{name}.sum_b"), Reduce::new(tvec!(k_axis.inputs[1][0]), Reducer::Sum), &[b_i32], )?; let sum_a = wire_axes_fix(&mut patch, name, "sum_a", &op.axes.extract_sub_mapping(&[0], &[0])?, sum_a)?; let sum_b = wire_axes_fix(&mut patch, name, "sum_b", &op.axes.extract_sub_mapping(&[1], &[0])?, sum_b)?; let bias = tvec!(bias); let bias = wire_axes_fix(&mut patch, name, "bias", &op.axes.extract_sub_mapping(&[2], &[0])?, bias)?; let abc_scale = combine_scales(&mut patch, name, a_scale, b_scale, c_scale)?; output = patch.wire_node(format!("{name}.add_bias"), add(), &[output[0], bias[0]])?; let k = model.outlet_fact(node.inputs[0])?.shape[k_axis.inputs[0][0]].clone(); let output = compensate_zero_points(&mut patch, name, output[0], k, a0, b0, sum_a[0], sum_b[0]) .context("Zero point compensation")?; let output = requant(&mut patch, name, output, op.q_params.unwrap(), abc_scale, c0)?; patch.shunt_outside(model, node.id.into(), output)?; Ok(Some(patch)) } fn lir_mat_mul_unary( op: &EinSum, model: &TypedModel, node: &TypedNode, (m_axis, k_axis, n_axis): (&Axis, &Axis, &Axis), ) -> TractResult<Option<TypedModelPatch>> { let input_facts = model.node_input_facts(node.id)?; let a_m = m_axis.inputs[0][0]; let a_k = k_axis.inputs[0][0]; let b_n = n_axis.inputs[1][0]; let b_k = k_axis.inputs[1][0]; let c_m = m_axis.outputs[0][0]; let c_n = n_axis.outputs[0][0]; let m = &input_facts[0].shape[a_m]; let k = &input_facts[0].shape[a_k]; let n = &input_facts[1].shape[b_n]; if m < n { let expr = op .axes .iter_all_axes() .map(|axis| { let mut axis = axis.clone(); axis.inputs.swap(0, 1); axis }) .collect::<TVec<Axis>>(); return TypedModelPatch::replace_single_op( model, node, &[node.inputs[1], node.inputs[0]], EinSum { axes: AxesMapping::new(node.inputs.len(), 1, expr)?,..op.clone() }, ) .map(Some); } let a_dt = input_facts[0].datum_type; let b_dt = input_facts[1].datum_type; let dt = op.operating_dt; let mmm = tract_linalg::ops() .mmm(a_dt, b_dt, dt, m.to_usize().ok(), k.to_usize().ok(), n.to_usize().ok()) .unwrap(); let name = &node.name; let mut patch = TypedModelPatch::new("Einsum to LirMatMulUnary"); let a = patch.tap_model(model, node.inputs[0])?; let b = patch.tap_model(model, node.inputs[1])?; let pack_a = MatMatMulPack { packer: mmm.a_pack(), k_axis: a_k, mn_axis: a_m }; let pack_b = MatMatMulPack { packer: mmm.b_pack(), k_axis: b_k, mn_axis: b_n }; let pa = patch.wire_node(format!("{name}.pack_a"), pack_a, &[a])?[0]; let pb = patch.wire_node(format!("{name}.pack_b"), pack_b, &[b])?[0]; let mut c_to_a_axis_mapping = tvec!(); let mut c_to_b_axis_mapping = tvec!(); for axis in op.axes.iter_all_axes().filter(|&axis|![m_axis, k_axis, n_axis].contains(&axis)) { if let (&[c], &[a]) = (&*axis.outputs[0], &*axis.inputs[0]) { if input_facts[0].shape[a]!= 1.to_dim() { let a = a - (a > a_m) as usize - (a > a_k) as usize; c_to_a_axis_mapping.push((c, a)); } } if let (&[c], &[b]) = (&*axis.outputs[0], &*axis.inputs[1]) { if input_facts[1].shape[b]!= 1.to_dim() { let b = b - (b > b_n) as usize - (b > b_k) as usize; c_to_b_axis_mapping.push((c, b)); } } } let c_fact = op.output_facts(&input_facts)?.remove(0); let name = &node.name; let geo = AddMatMulGeometry { k: k.to_dim(), a_storage: None, b_storage: None, mmm: mmm.clone(), c_to_a_axis_mapping: MapOutputAxisToInput(c_to_a_axis_mapping), c_to_b_axis_mapping: MapOutputAxisToInput(c_to_b_axis_mapping), }; let output = unsafe { mmm.c_view(c_m, c_n) }; let lir = LirMatMulUnary::new( mmm, c_fact, c_m, c_n, vec![ProtoFusedSpec::AddMatMul(geo, 0, 1), ProtoFusedSpec::Store(output)], ) .context("Creating LirMatMulUnary")?; let output = patch.wire_node(name, lir, &[pa, pb])?[0]; patch.shunt_outside(model, node.id.into(), output)?; Ok(Some(patch)) }
{ let input_facts = model.node_input_facts(node.id)?; let input_shapes: TVec<&[TDim]> = input_facts.iter().map(|f| &*f.shape).collect(); let output_shape = super::eval::output_shape(&op.axes, &input_shapes); let candidate_k_axes: TVec<&Axis> = op .axes .iter_all_axes() // Filter possible candidates (should be one time in each inputs but not in output) .filter(|a| { a.inputs[0].len() == 1 && a.inputs[1].len() == 1 && a.outputs[0].len() == 0 && input_facts[0].shape[a.inputs[0][0]] == input_facts[1].shape[a.inputs[1][0]] }) .collect(); let non_trivial_k_axis = candidate_k_axes .iter() .filter(|a| input_facts[0].shape[a.inputs[0][0]] > 1.to_dim()) .collect::<TVec<_>>(); let k_axis = if non_trivial_k_axis.len() > 1 {
identifier_body
codegen.rs
use super::*; use crate::ops::cast::cast; use crate::ops::math::add; use crate::ops::matmul::lir_unary::{ AddMatMulGeometry, LirMatMulUnary, MapOutputAxisToInput, ProtoFusedSpec, }; use crate::ops::matmul::mir_quant::{ combine_scales, compensate_zero_points, requant, wire_offset_u8_as_i8, }; use crate::ops::matmul::pack::MatMatMulPack; use crate::ops::nn::{Reduce, Reducer}; pub enum AxesOrPatch<'a> { Axes(&'a Axis, &'a Axis, &'a Axis), Patch(TypedModelPatch), NotAMatMul(&'a Axis), } pub(crate) fn codegen( op: &EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<Option<TypedModelPatch>> { if (op.q_params.is_none() && node.inputs.len()!= 2) || (op.q_params.is_some() && node.inputs.len()!= 9) { return Ok(None); } let (m_axis, k_axis, n_axis) = match ensure_mkn_axes(op, model, node)? { AxesOrPatch::Axes(m, k, n) => (m, k, n), AxesOrPatch::Patch(p) => return Ok(Some(p)), AxesOrPatch::NotAMatMul(_) => return Ok(None), }; if op.q_params.is_none() { lir_mat_mul_unary(op, model, node, (m_axis, k_axis, n_axis)) .context("Translating to LirMatMul") } else { dequant(op, model, node, (m_axis, k_axis, n_axis)).context("Dequantize") } } pub(super) fn ensure_mkn_axes<'a>( op: &'a EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<AxesOrPatch<'a>> { let input_facts = model.node_input_facts(node.id)?; let input_shapes: TVec<&[TDim]> = input_facts.iter().map(|f| &*f.shape).collect(); let output_shape = super::eval::output_shape(&op.axes, &input_shapes); let candidate_k_axes: TVec<&Axis> = op .axes .iter_all_axes() // Filter possible candidates (should be one time in each inputs but not in output) .filter(|a| { a.inputs[0].len() == 1 && a.inputs[1].len() == 1 && a.outputs[0].len() == 0 && input_facts[0].shape[a.inputs[0][0]] == input_facts[1].shape[a.inputs[1][0]] }) .collect(); let non_trivial_k_axis = candidate_k_axes .iter() .filter(|a| input_facts[0].shape[a.inputs[0][0]] > 1.to_dim()) .collect::<TVec<_>>(); let k_axis = if non_trivial_k_axis.len() > 1 { // TODO: handle case where multiple consecutive k in the same order in both input. bail!("Multiple k-axis candidate found"); } else { non_trivial_k_axis.get(0).copied().or_else(|| candidate_k_axes.get(0)).copied() }; let Some(k_axis) = k_axis else { return Ok(AxesOrPatch::Patch(inject_k_axis(op, model, node)?)); }; let m_axis = op .axes .iter_all_axes() .filter(|a| { a.inputs[0].len() == 1 && (a.inputs[1].len() == 0 || input_facts[1].shape[a.inputs[1][0]].is_one()) && a.outputs[0].len() == 1 }) .max_by_key(|a| &output_shape[a.outputs[0][0]]); let Some(m_axis) = m_axis else { return Ok(AxesOrPatch::Patch(inject_m_or_n_axis(op, model, node, false, &[k_axis])?)); }; let n_axis = op .axes .iter_all_axes() .filter(|a| { (a.inputs[0].len() == 0 || input_facts[0].shape[a.inputs[0][0]].is_one()) && a.inputs[1].len() == 1 && a.outputs[0].len() == 1 }) .max_by_key(|a| &output_shape[a.outputs[0][0]]); let Some(n_axis) = n_axis else { return Ok(AxesOrPatch::Patch(inject_m_or_n_axis( op, model, node, true, &[k_axis, m_axis], )?)); }; for axis in op.axes.iter_all_axes() { let one = TDim::one(); let in_left = axis.inputs[0].first().map(|pos| &input_facts[0].shape[*pos]).unwrap_or(&one)!= &one; let in_right = axis.inputs[1].first().map(|pos| &input_facts[1].shape[*pos]).unwrap_or(&one)!= &one; let in_out = axis.outputs[0].first().map(|pos| &output_shape[*pos]).unwrap_or(&one)!= &one; if (in_left ^ in_right) &&!in_out { return Ok(AxesOrPatch::NotAMatMul(axis)); } } Ok(AxesOrPatch::Axes(m_axis, k_axis, n_axis)) } pub(super) fn inject_k_axis( op: &EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<TypedModelPatch> { let mut new_axes = op.axes.clone(); let name = &node.name; let mut patch = TypedModelPatch::new("inject k axis"); let mut wire = patch.taps(model, &node.inputs)?; let repr = new_axes.available_label(); new_axes = new_axes.with_extra_axis(repr, InOut::In(0), 0)?.with_extra_axis_occurency( repr, InOut::In(1), 0, )?; wire[0] = patch.wire_node(format!("{name}.add_k.0"), AxisOp::Add(0), &[wire[0]])?[0]; wire[1] = patch.wire_node(format!("{name}.add_k.1"), AxisOp::Add(0), &[wire[1]])?[0]; wire = patch.wire_node(&node.name, EinSum { axes: new_axes,..op.clone() }, &wire)?; patch.shunt_outside(model, node.id.into(), wire[0])?; Ok(patch) } pub(super) fn inject_m_or_n_axis( op: &EinSum, model: &TypedModel, node: &TypedNode, is_n: bool, exclude: &[&Axis], ) -> TractResult<TypedModelPatch> { let input_to_fix = is_n as usize; let label = if is_n { "n" } else { "m" }; let input_facts = model.node_input_facts(node.id)?; let quasi_m_or_n_axis = op.axes.iter_all_axes().filter(|a|!exclude.contains(a)).find(|a| { (a.inputs[1 - input_to_fix].len() == 0 || input_facts[1 - input_to_fix].shape[a.inputs[1 - input_to_fix][0]].is_one()) && (a.inputs[input_to_fix].len() == 1 || a.outputs[0].len() == 1) }); let name = &node.name; let mut patch = TypedModelPatch::new("Injecting m or n axis"); let mut wire = patch.taps(model, &node.inputs)?; if let Some(axis) = quasi_m_or_n_axis { if axis.inputs[input_to_fix].len() == 1 { let new_axes = op.axes.clone().with_extra_axis('$', InOut::Out(0), 0)?.linking(axis.repr, '$')?; wire = patch.wire_node( format!("{name}.einsum"), EinSum { axes: new_axes,..op.clone() }, &wire, )?; wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?; } else { let new_axes = op .axes .clone() .with_extra_axis('$', InOut::In(input_to_fix), 0)? .linking(axis.repr, '$')?; wire[input_to_fix] = patch.wire_node( format!("{name}.add_{label}"), AxisOp::Add(0), &[wire[input_to_fix]], )?[0]; wire = patch.wire_node(&node.name, EinSum { axes: new_axes,..op.clone() }, &wire)?; } } else { let repr = op.axes.available_label(); let new_axes = op .axes .clone() .with_extra_axis(repr, InOut::In(input_to_fix), 0)? .with_extra_axis('$', InOut::Out(0), 0)? .linking(repr, '$')?; wire[input_to_fix] = patch.wire_node( format!("{name}.add_{label}"), AxisOp::Add(0), &[wire[input_to_fix]], )?[0]; wire = patch.wire_node( format!("{name}.einsum"), EinSum { axes: new_axes,..op.clone() }, &wire, )?; wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?; } patch.shunt_outside(model, node.id.into(), wire[0])?; Ok(patch) } fn wire_axes_fix( patch: &mut TypedModelPatch, name: &str, var: &str, mapping: &AxesMapping, mut outlet: TVec<OutletId>, ) -> TractResult<TVec<OutletId>> { for (ix, axis_op) in mapping.translate_to_axis_ops()?.into_iter().enumerate() { outlet = patch.wire_node(format!("{name}.fix_{var}.{ix})"), axis_op, &outlet)?; } Ok(outlet) } fn dequant( op: &EinSum, model: &TypedModel, node: &TypedNode, (_, k_axis, _): (&Axis, &Axis, &Axis), ) -> TractResult<Option<TypedModelPatch>> { let name = &node.name; let mut patch = TypedModelPatch::new("Dequantizing einsum"); let taps = patch.taps(model, &node.inputs)?; let [a, b, bias, mut a0, mut a_scale, mut b0, b_scale, c0, c_scale] = *taps else { bail!("Expect exactly 9 inputs") }; if!patch.outlet_fact(a_scale)?.shape.volume().is_one() { let q_axis_in_output = op.axes.axis((InOut::In(4), 0))?.outputs[0][0]; let output_rank = node.outputs[0].fact.rank();
)?[0]; } } let a = wire_offset_u8_as_i8(&mut patch, &node.name, a, "a", &mut a0, "a0")?; let b = wire_offset_u8_as_i8(&mut patch, &node.name, b, "b", &mut b0, "b0")?; let mut output = patch.wire_node( &node.name, EinSum { q_params: None, axes: op.axes.extract_sub_mapping(&[0, 1], &[0])?, operating_dt: op.operating_dt, }, &[a, b], )?; let a_i32 = patch.wire_node(format!("{name}.a_as_i32"), cast(i32::datum_type()), &[a])?[0]; let b_i32 = patch.wire_node(format!("{name}.b_as_i32"), cast(i32::datum_type()), &[b])?[0]; let sum_a = patch.wire_node( format!("{name}.sum_a"), Reduce::new(tvec!(k_axis.inputs[0][0]), Reducer::Sum), &[a_i32], )?; let sum_b = patch.wire_node( format!("{name}.sum_b"), Reduce::new(tvec!(k_axis.inputs[1][0]), Reducer::Sum), &[b_i32], )?; let sum_a = wire_axes_fix(&mut patch, name, "sum_a", &op.axes.extract_sub_mapping(&[0], &[0])?, sum_a)?; let sum_b = wire_axes_fix(&mut patch, name, "sum_b", &op.axes.extract_sub_mapping(&[1], &[0])?, sum_b)?; let bias = tvec!(bias); let bias = wire_axes_fix(&mut patch, name, "bias", &op.axes.extract_sub_mapping(&[2], &[0])?, bias)?; let abc_scale = combine_scales(&mut patch, name, a_scale, b_scale, c_scale)?; output = patch.wire_node(format!("{name}.add_bias"), add(), &[output[0], bias[0]])?; let k = model.outlet_fact(node.inputs[0])?.shape[k_axis.inputs[0][0]].clone(); let output = compensate_zero_points(&mut patch, name, output[0], k, a0, b0, sum_a[0], sum_b[0]) .context("Zero point compensation")?; let output = requant(&mut patch, name, output, op.q_params.unwrap(), abc_scale, c0)?; patch.shunt_outside(model, node.id.into(), output)?; Ok(Some(patch)) } fn lir_mat_mul_unary( op: &EinSum, model: &TypedModel, node: &TypedNode, (m_axis, k_axis, n_axis): (&Axis, &Axis, &Axis), ) -> TractResult<Option<TypedModelPatch>> { let input_facts = model.node_input_facts(node.id)?; let a_m = m_axis.inputs[0][0]; let a_k = k_axis.inputs[0][0]; let b_n = n_axis.inputs[1][0]; let b_k = k_axis.inputs[1][0]; let c_m = m_axis.outputs[0][0]; let c_n = n_axis.outputs[0][0]; let m = &input_facts[0].shape[a_m]; let k = &input_facts[0].shape[a_k]; let n = &input_facts[1].shape[b_n]; if m < n { let expr = op .axes .iter_all_axes() .map(|axis| { let mut axis = axis.clone(); axis.inputs.swap(0, 1); axis }) .collect::<TVec<Axis>>(); return TypedModelPatch::replace_single_op( model, node, &[node.inputs[1], node.inputs[0]], EinSum { axes: AxesMapping::new(node.inputs.len(), 1, expr)?,..op.clone() }, ) .map(Some); } let a_dt = input_facts[0].datum_type; let b_dt = input_facts[1].datum_type; let dt = op.operating_dt; let mmm = tract_linalg::ops() .mmm(a_dt, b_dt, dt, m.to_usize().ok(), k.to_usize().ok(), n.to_usize().ok()) .unwrap(); let name = &node.name; let mut patch = TypedModelPatch::new("Einsum to LirMatMulUnary"); let a = patch.tap_model(model, node.inputs[0])?; let b = patch.tap_model(model, node.inputs[1])?; let pack_a = MatMatMulPack { packer: mmm.a_pack(), k_axis: a_k, mn_axis: a_m }; let pack_b = MatMatMulPack { packer: mmm.b_pack(), k_axis: b_k, mn_axis: b_n }; let pa = patch.wire_node(format!("{name}.pack_a"), pack_a, &[a])?[0]; let pb = patch.wire_node(format!("{name}.pack_b"), pack_b, &[b])?[0]; let mut c_to_a_axis_mapping = tvec!(); let mut c_to_b_axis_mapping = tvec!(); for axis in op.axes.iter_all_axes().filter(|&axis|![m_axis, k_axis, n_axis].contains(&axis)) { if let (&[c], &[a]) = (&*axis.outputs[0], &*axis.inputs[0]) { if input_facts[0].shape[a]!= 1.to_dim() { let a = a - (a > a_m) as usize - (a > a_k) as usize; c_to_a_axis_mapping.push((c, a)); } } if let (&[c], &[b]) = (&*axis.outputs[0], &*axis.inputs[1]) { if input_facts[1].shape[b]!= 1.to_dim() { let b = b - (b > b_n) as usize - (b > b_k) as usize; c_to_b_axis_mapping.push((c, b)); } } } let c_fact = op.output_facts(&input_facts)?.remove(0); let name = &node.name; let geo = AddMatMulGeometry { k: k.to_dim(), a_storage: None, b_storage: None, mmm: mmm.clone(), c_to_a_axis_mapping: MapOutputAxisToInput(c_to_a_axis_mapping), c_to_b_axis_mapping: MapOutputAxisToInput(c_to_b_axis_mapping), }; let output = unsafe { mmm.c_view(c_m, c_n) }; let lir = LirMatMulUnary::new( mmm, c_fact, c_m, c_n, vec![ProtoFusedSpec::AddMatMul(geo, 0, 1), ProtoFusedSpec::Store(output)], ) .context("Creating LirMatMulUnary")?; let output = patch.wire_node(name, lir, &[pa, pb])?[0]; patch.shunt_outside(model, node.id.into(), output)?; Ok(Some(patch)) }
for i in 1..(output_rank - q_axis_in_output) { a_scale = patch.wire_node( format!("{name}.a_scale_axis_fix_{i}"), AxisOp::Add(i), &[a_scale],
random_line_split
game.rs
use std::convert::TryFrom; use hdk::{ utils, entry_definition::ValidatingEntryType, error::{ZomeApiResult, ZomeApiError}, holochain_persistence_api::{ cas::content::{AddressableContent, Address}, }, holochain_json_api::{ error::JsonError, json::JsonString, }, holochain_core_types::{ dna::entry_types::Sharing, validation::EntryValidationData, entry::Entry, link::LinkMatch, } }; use crate::game_move::Move; use crate::GameState; #[derive(Clone, Debug, Serialize, Deserialize, DefaultJson)] pub struct Game { pub player_1: Address, pub player_2: Address, pub created_at: u32, } /*===================================== = DHT Functions = =====================================*/ /// Traverse the linked list rooted at a game to find all the moves pub fn get_moves(game_address: &Address) -> ZomeApiResult<Vec<Move>> { match hdk::get_links(game_address, LinkMatch::Any, LinkMatch::Any)?.addresses().into_iter().next() { /* get links returns the ZomeApiResult<GetLinksResult>. * This will get entries that are linked to the first argument. * Since ZomeApiResult returns Result<T, ZomeApiError>(where T in this case is the GetLinksResult), * you can use the? sugar to return the ZomeApiError if error then return the T if get_links is a success. * GetLinkResult has a method implemented called addresses() which returns a vector of Addresses. * into_iter() will iterate through this vector of addresses and move the value from the vector to an Iterator. * next() is a method for iterator where in it returns the next value of the Iterator (start at index 0) in Option<Self::Item> * Since next() returns an Option<Self::Item>, we can use the match operator to cater to all possible values of Option<Self: Item> */ Some(first_move) => { let mut move_addresses = vec![first_move]; let mut more = true; while more { more = match hdk::get_links(move_addresses.last().unwrap(), LinkMatch::Any, LinkMatch::Any)?.addresses().into_iter().next() { Some(addr) => { move_addresses.push(addr.clone()); true }, None => { false }, } } /* In this match operator, we first cater to Some(first_move). The name is first_move because * the Game entry is always linked to the first_move made by Player 2. * So we store this first_move to a vector in a variable name move_addresses. * Then we create a while loop in order to store all the game_move entries that are linked to the first_move. * while more is true, we get the entries linked to the first_move, then the next move and the next move and * on and on and on until we finish all the linked moves. The way this works is, in the first argument of get_links, * we get the very last element of the move_addresses vector using last() which returns a Option<&T>. * Since we want the address itself wrapped in Option<&T>, we will use unwrap() to get the value of the Address. * In this way, we will always have the last address stored in move_addresses as our first argument in get_links.Address. * Then we do the same thing we did above to move the value from a vector of addresses to an Iterator then get the value with next(). * Then we run the match operator again to store the address in the move_addresses using push() then return true to run the loop again. * Since next() returns None if there is no more value to be retrieved in the Iterator, we return false in None so that the loop ends after * we get all the moves that are linked together. */ let moves: Vec<Move> = move_addresses.iter().map(|addr| { let move_entry = hdk::get_entry(addr).unwrap().unwrap(); if let Entry::App(_, move_struct) = move_entry { Move::try_from(move_struct).expect("Entry at address is type other than Move") } else { panic!("Not an app entry!") } }).collect(); /* Now that we have a vector of addresses for all connected moves, we will now try to retrieve the data itself which can * be found in the Addresses we retrieved. First, we create a variable named moves which is a type of Vec<Move>. In this variable, * we will use the iter() method on move_addresses (note that we used iter() instead of into_iter() because we dont want * to move the value from move_addresses but rather have a referennce to the addresses found in the move_addresses.) and then * use map() method provided in Iterator. map() takes a closure and creates an iterator which calls that closure on each element. * the closure will have addr as an argument. The closure creates a variable named move_entry in which we will use the method * get_entry which takes an Address(HashString) type then return ZomeApiResult<Option<Entry>>. We then unwrap it twice to * retrieve the Entry itself. Then we use if let to match the move_entry with an Entry::App variant. This is because Entry * enum can have different variants and we need to makesure that the entry found in this address is an App variant. If not * then we throw a panic in else statement saying that it is not an app entry. Now if it is an app entry, we use the try_from method * to try to convert the Entry::App, which we assume to have the Move struct in the second element of App * variant(here named as move_struct) as the AppEntryValue type, to an actual Move struct. If the try_from fails then we throw an error * saying the Entry at the given address is not a Move type of entry. After we call the closure on all addresses in move_addresses, * we use the collect() to turn them into Vec<Move>. collect() would understand that the items should be collected into Vec<Move> * since that is the defined type for moves. */ Ok(moves) }, None => { Ok(Vec::new()) } } } pub fn get_state(game_address: &Address) -> ZomeApiResult<GameState> { let moves = get_moves(game_address)?; let game = get_game(game_address)?; let new_state = moves.iter().fold(GameState::initial(), |state, new_move| state.evolve(game.clone(), new_move)); Ok(new_state) /* get_state takes the address of the game as a parameter and return a ZomeApiResult<GameState>. This is a reducer function. * First we create a vairable named moves and call the get_moves in it with the parameter game_address. * Since we have the? operator in get_moves(), it will return the value T in Result<T, ZomeApiError> if nothing goes wrong. * T in this case is Vec<Move> which will also be the type of moves variable. next we create the game variable an call the get_game * with the game_address being its argument. get_game also returns ZomeApiResult with Game being the success value so we * use the? to get the Game struct if no error occurs. with moves and game having the vectors we need, we will now create * a variable name new_state and call iter() on moves to turn it into an Iterator in order for us to call a method fold() on it. * fold() takes two arguments: an initial value, and a closure with two arguments: an 'accumulator', and an element. * The closure returns the value that the accumulator should have for the next iteration. In this case, the initial value is an empty * GameState created with initial() we associated with GameState. Then the accumulator will be named state which will hold the * initial value (empty GameState) we set. new_move will be each Move stored in moves. now we call the evolve() method we associated * with GameState in state.rs. evolve takes self, Game struct, and &Move so we clone game and give it as a first argument and a * reference to moves with new_move(automatically a reference since the element in fold has FnMut implemented). This evolve method will * add all the Move that is in the moves to the GameState which will be stored in new_state. now we can return this as Ok(new_state) */ } pub fn get_game(game_address: &Address) -> ZomeApiResult<Game> { utils::get_as_type(game_address.to_owned()) /* get_as_type load an entry from the given address in the argument then convert it to a given type wrapped in ZomeApiResult. In this case, * rust will infer that the type is Game since that is the return value of get_game function so it will convert the loaded entry from the * given address to ZomeApiResult<Game> */ } /*===== End of DHT Functions ======*/ /*============================================= = Local chain functions = =============================================*/ pub fn get_game_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<Game> { local_chain .iter() .filter(|entry| { entry.address() == game_address.to_owned() }) .filter_map(|entry| { if let Entry::App(_, entry_data) = entry { Some(Game::try_from(entry_data.clone()).unwrap()) } else { None } }) .next() .ok_or(ZomeApiError::HashNotFound) /* get_game_local_chain() gets all the Entry in the local_chain as well as the address of the game and will return ZomeApiResult<Game>. * now we will call the iter() method on the local_chain so that we can call the filter() method. filter() method will create an iterator * which uses a closure to determine if an element should be yielded. the closure must return true or false and if the closure returns * true on that element then filter() will return that element. if its a false it simply runs the same closure on the nexrt element. * now filter's closure check if the address of the each element found in the local_chain is equal to the address of game_address * by getting the address of each element in the localchain using address() method provided for the Entry type in hdk. we need to clone * the game_address because we are passing a reference in the parameter and we cant compare a reference to an actual value(not 100% sure * correct me if im wrong). If the address of the entry matches the game_address that is passed in the paramater, then we return that entry. * After getting all elements that have the address of game_address, we implement the filter_map() method which filters then maps. * filter_map() takes a closure as an argument which has to return Option<T>. If the closure returns Some(element) then we return the * element. If the closure returns None then we just skip and try the closure on the next element in local_chain. inside the closure, * we use the if let to make sure that each element is an Entry::App variant. If not we return None but if it is, then we use the try_from() * method on the entry_data found in the Entry::App and convert it to the Game struct cos at this point we are sure that the element * is an Entry::App variant that holds the Game struct as AppEntryValue. try_from returns Result<Self, Self::Error> so we use unwrap to get * the Self which in this case is Game. Since at this point, we are sure that there is only one match for the game_address provided * in the parameter, we use the next() to return the element. Since next() returns an Option<T>, we use the ok_or() method to turn * Option<T> to Result<T, E> and E here being the ZomeApiError::HashNotFound variant which indicates that the game_address provided in the * parameter did not match any entry in the local_chain. we return ZomeApiError::HashNotFound because ZomeApiResult expects any variant * of the ZomeApiError to be returned as an error value. */ } pub fn get_moves_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<Vec<Move>> { Ok(local_chain .iter() .filter_map(|entry| { if let Entry::App(entry_type, entry_data) = entry { if entry_type.to_string() == "move" { Some(Move::try_from(entry_data.clone()).unwrap()) } else { None } } else { None } }) .filter(|game_move| { game_move.game == game_address.to_owned() }) .rev() .collect()) /* This one is similar to get_game_local_chain. It takes the local_chain Entries and the game_address as the parameter and returns * a vector of Move wrapped in ZomeApiResult. We first call iter() again then use filter_map() to filter the entries in local chain * to Entry::App variant then if the entry_type (1st element of App variant) is equal to "move" then we return that entry using try_from * method and wrap it in Some(). else we return None if there is no Entry that has the entry_type of "move" and return None also if there * is no Entry:App variant in the local chain. After getting all entries with "move" as the entry_type, we need to filter them and only * yield "move" entry that has the game_address passed in the parameter. That's what the next filter() is for and we check if the game * field of the "move" entry we retrieve from filter_map equals to the game_address being passed in the parameter. We then use rev() to reverse * the iteration when we use the collect() method in order to collect them and turn them into Vec<Move>. // To verify:: why use rev()?? */ } pub fn get_state_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<GameState> { let moves = get_moves_local_chain(local_chain.clone(), game_address)?;
let new_state = moves.iter().fold(GameState::initial(), move |state, new_move| state.evolve(game.clone(), new_move)); Ok(new_state) /* get_state_local_chain is similar to get_state function. It takes local_chain and game_address as parameters and return the GameState. * we first get all the moves associated with the game_address given as parameter using get_moves_local_chain and store them in moves * variable. Then we get the game struct found at game_address using get_game_local_chain. We then create new_state and call iter() * on moves thne use fold() method. fold() will take an empty GameState sturct and then call evolve method on that empty GameState (sored in * state) to store all the Moves. the move keyword before the closure actually means is to move ownership of all of the captured * variables in the closure rather than borrow. To Verify: Not really sure which captured variables we are moving here rather than borrowing. */ } /*===== End of Local chain functions ======*/ pub fn definition() -> ValidatingEntryType { entry!( name: "game", description: "Represents an occurence of a game between several agents", sharing: Sharing::Public, validation_package: || { hdk::ValidationPackageDefinition::Entry }, validation: | validation_data: hdk::EntryValidationData<Game>| { match validation_data { EntryValidationData::Create{entry, validation_data: _} => { let game = entry as Game; if game.player_1 == game.player_2 { return Err("Player 1 and Player 2 must be different agents.".into()) } Ok(()) }, _ => { Err("Cannot modify or delete a game".into()) } } } ) }
let game = get_game_local_chain(local_chain, game_address)?;
random_line_split
game.rs
use std::convert::TryFrom; use hdk::{ utils, entry_definition::ValidatingEntryType, error::{ZomeApiResult, ZomeApiError}, holochain_persistence_api::{ cas::content::{AddressableContent, Address}, }, holochain_json_api::{ error::JsonError, json::JsonString, }, holochain_core_types::{ dna::entry_types::Sharing, validation::EntryValidationData, entry::Entry, link::LinkMatch, } }; use crate::game_move::Move; use crate::GameState; #[derive(Clone, Debug, Serialize, Deserialize, DefaultJson)] pub struct Game { pub player_1: Address, pub player_2: Address, pub created_at: u32, } /*===================================== = DHT Functions = =====================================*/ /// Traverse the linked list rooted at a game to find all the moves pub fn
(game_address: &Address) -> ZomeApiResult<Vec<Move>> { match hdk::get_links(game_address, LinkMatch::Any, LinkMatch::Any)?.addresses().into_iter().next() { /* get links returns the ZomeApiResult<GetLinksResult>. * This will get entries that are linked to the first argument. * Since ZomeApiResult returns Result<T, ZomeApiError>(where T in this case is the GetLinksResult), * you can use the? sugar to return the ZomeApiError if error then return the T if get_links is a success. * GetLinkResult has a method implemented called addresses() which returns a vector of Addresses. * into_iter() will iterate through this vector of addresses and move the value from the vector to an Iterator. * next() is a method for iterator where in it returns the next value of the Iterator (start at index 0) in Option<Self::Item> * Since next() returns an Option<Self::Item>, we can use the match operator to cater to all possible values of Option<Self: Item> */ Some(first_move) => { let mut move_addresses = vec![first_move]; let mut more = true; while more { more = match hdk::get_links(move_addresses.last().unwrap(), LinkMatch::Any, LinkMatch::Any)?.addresses().into_iter().next() { Some(addr) => { move_addresses.push(addr.clone()); true }, None => { false }, } } /* In this match operator, we first cater to Some(first_move). The name is first_move because * the Game entry is always linked to the first_move made by Player 2. * So we store this first_move to a vector in a variable name move_addresses. * Then we create a while loop in order to store all the game_move entries that are linked to the first_move. * while more is true, we get the entries linked to the first_move, then the next move and the next move and * on and on and on until we finish all the linked moves. The way this works is, in the first argument of get_links, * we get the very last element of the move_addresses vector using last() which returns a Option<&T>. * Since we want the address itself wrapped in Option<&T>, we will use unwrap() to get the value of the Address. * In this way, we will always have the last address stored in move_addresses as our first argument in get_links.Address. * Then we do the same thing we did above to move the value from a vector of addresses to an Iterator then get the value with next(). * Then we run the match operator again to store the address in the move_addresses using push() then return true to run the loop again. * Since next() returns None if there is no more value to be retrieved in the Iterator, we return false in None so that the loop ends after * we get all the moves that are linked together. */ let moves: Vec<Move> = move_addresses.iter().map(|addr| { let move_entry = hdk::get_entry(addr).unwrap().unwrap(); if let Entry::App(_, move_struct) = move_entry { Move::try_from(move_struct).expect("Entry at address is type other than Move") } else { panic!("Not an app entry!") } }).collect(); /* Now that we have a vector of addresses for all connected moves, we will now try to retrieve the data itself which can * be found in the Addresses we retrieved. First, we create a variable named moves which is a type of Vec<Move>. In this variable, * we will use the iter() method on move_addresses (note that we used iter() instead of into_iter() because we dont want * to move the value from move_addresses but rather have a referennce to the addresses found in the move_addresses.) and then * use map() method provided in Iterator. map() takes a closure and creates an iterator which calls that closure on each element. * the closure will have addr as an argument. The closure creates a variable named move_entry in which we will use the method * get_entry which takes an Address(HashString) type then return ZomeApiResult<Option<Entry>>. We then unwrap it twice to * retrieve the Entry itself. Then we use if let to match the move_entry with an Entry::App variant. This is because Entry * enum can have different variants and we need to makesure that the entry found in this address is an App variant. If not * then we throw a panic in else statement saying that it is not an app entry. Now if it is an app entry, we use the try_from method * to try to convert the Entry::App, which we assume to have the Move struct in the second element of App * variant(here named as move_struct) as the AppEntryValue type, to an actual Move struct. If the try_from fails then we throw an error * saying the Entry at the given address is not a Move type of entry. After we call the closure on all addresses in move_addresses, * we use the collect() to turn them into Vec<Move>. collect() would understand that the items should be collected into Vec<Move> * since that is the defined type for moves. */ Ok(moves) }, None => { Ok(Vec::new()) } } } pub fn get_state(game_address: &Address) -> ZomeApiResult<GameState> { let moves = get_moves(game_address)?; let game = get_game(game_address)?; let new_state = moves.iter().fold(GameState::initial(), |state, new_move| state.evolve(game.clone(), new_move)); Ok(new_state) /* get_state takes the address of the game as a parameter and return a ZomeApiResult<GameState>. This is a reducer function. * First we create a vairable named moves and call the get_moves in it with the parameter game_address. * Since we have the? operator in get_moves(), it will return the value T in Result<T, ZomeApiError> if nothing goes wrong. * T in this case is Vec<Move> which will also be the type of moves variable. next we create the game variable an call the get_game * with the game_address being its argument. get_game also returns ZomeApiResult with Game being the success value so we * use the? to get the Game struct if no error occurs. with moves and game having the vectors we need, we will now create * a variable name new_state and call iter() on moves to turn it into an Iterator in order for us to call a method fold() on it. * fold() takes two arguments: an initial value, and a closure with two arguments: an 'accumulator', and an element. * The closure returns the value that the accumulator should have for the next iteration. In this case, the initial value is an empty * GameState created with initial() we associated with GameState. Then the accumulator will be named state which will hold the * initial value (empty GameState) we set. new_move will be each Move stored in moves. now we call the evolve() method we associated * with GameState in state.rs. evolve takes self, Game struct, and &Move so we clone game and give it as a first argument and a * reference to moves with new_move(automatically a reference since the element in fold has FnMut implemented). This evolve method will * add all the Move that is in the moves to the GameState which will be stored in new_state. now we can return this as Ok(new_state) */ } pub fn get_game(game_address: &Address) -> ZomeApiResult<Game> { utils::get_as_type(game_address.to_owned()) /* get_as_type load an entry from the given address in the argument then convert it to a given type wrapped in ZomeApiResult. In this case, * rust will infer that the type is Game since that is the return value of get_game function so it will convert the loaded entry from the * given address to ZomeApiResult<Game> */ } /*===== End of DHT Functions ======*/ /*============================================= = Local chain functions = =============================================*/ pub fn get_game_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<Game> { local_chain .iter() .filter(|entry| { entry.address() == game_address.to_owned() }) .filter_map(|entry| { if let Entry::App(_, entry_data) = entry { Some(Game::try_from(entry_data.clone()).unwrap()) } else { None } }) .next() .ok_or(ZomeApiError::HashNotFound) /* get_game_local_chain() gets all the Entry in the local_chain as well as the address of the game and will return ZomeApiResult<Game>. * now we will call the iter() method on the local_chain so that we can call the filter() method. filter() method will create an iterator * which uses a closure to determine if an element should be yielded. the closure must return true or false and if the closure returns * true on that element then filter() will return that element. if its a false it simply runs the same closure on the nexrt element. * now filter's closure check if the address of the each element found in the local_chain is equal to the address of game_address * by getting the address of each element in the localchain using address() method provided for the Entry type in hdk. we need to clone * the game_address because we are passing a reference in the parameter and we cant compare a reference to an actual value(not 100% sure * correct me if im wrong). If the address of the entry matches the game_address that is passed in the paramater, then we return that entry. * After getting all elements that have the address of game_address, we implement the filter_map() method which filters then maps. * filter_map() takes a closure as an argument which has to return Option<T>. If the closure returns Some(element) then we return the * element. If the closure returns None then we just skip and try the closure on the next element in local_chain. inside the closure, * we use the if let to make sure that each element is an Entry::App variant. If not we return None but if it is, then we use the try_from() * method on the entry_data found in the Entry::App and convert it to the Game struct cos at this point we are sure that the element * is an Entry::App variant that holds the Game struct as AppEntryValue. try_from returns Result<Self, Self::Error> so we use unwrap to get * the Self which in this case is Game. Since at this point, we are sure that there is only one match for the game_address provided * in the parameter, we use the next() to return the element. Since next() returns an Option<T>, we use the ok_or() method to turn * Option<T> to Result<T, E> and E here being the ZomeApiError::HashNotFound variant which indicates that the game_address provided in the * parameter did not match any entry in the local_chain. we return ZomeApiError::HashNotFound because ZomeApiResult expects any variant * of the ZomeApiError to be returned as an error value. */ } pub fn get_moves_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<Vec<Move>> { Ok(local_chain .iter() .filter_map(|entry| { if let Entry::App(entry_type, entry_data) = entry { if entry_type.to_string() == "move" { Some(Move::try_from(entry_data.clone()).unwrap()) } else { None } } else { None } }) .filter(|game_move| { game_move.game == game_address.to_owned() }) .rev() .collect()) /* This one is similar to get_game_local_chain. It takes the local_chain Entries and the game_address as the parameter and returns * a vector of Move wrapped in ZomeApiResult. We first call iter() again then use filter_map() to filter the entries in local chain * to Entry::App variant then if the entry_type (1st element of App variant) is equal to "move" then we return that entry using try_from * method and wrap it in Some(). else we return None if there is no Entry that has the entry_type of "move" and return None also if there * is no Entry:App variant in the local chain. After getting all entries with "move" as the entry_type, we need to filter them and only * yield "move" entry that has the game_address passed in the parameter. That's what the next filter() is for and we check if the game * field of the "move" entry we retrieve from filter_map equals to the game_address being passed in the parameter. We then use rev() to reverse * the iteration when we use the collect() method in order to collect them and turn them into Vec<Move>. // To verify:: why use rev()?? */ } pub fn get_state_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<GameState> { let moves = get_moves_local_chain(local_chain.clone(), game_address)?; let game = get_game_local_chain(local_chain, game_address)?; let new_state = moves.iter().fold(GameState::initial(), move |state, new_move| state.evolve(game.clone(), new_move)); Ok(new_state) /* get_state_local_chain is similar to get_state function. It takes local_chain and game_address as parameters and return the GameState. * we first get all the moves associated with the game_address given as parameter using get_moves_local_chain and store them in moves * variable. Then we get the game struct found at game_address using get_game_local_chain. We then create new_state and call iter() * on moves thne use fold() method. fold() will take an empty GameState sturct and then call evolve method on that empty GameState (sored in * state) to store all the Moves. the move keyword before the closure actually means is to move ownership of all of the captured * variables in the closure rather than borrow. To Verify: Not really sure which captured variables we are moving here rather than borrowing. */ } /*===== End of Local chain functions ======*/ pub fn definition() -> ValidatingEntryType { entry!( name: "game", description: "Represents an occurence of a game between several agents", sharing: Sharing::Public, validation_package: || { hdk::ValidationPackageDefinition::Entry }, validation: | validation_data: hdk::EntryValidationData<Game>| { match validation_data { EntryValidationData::Create{entry, validation_data: _} => { let game = entry as Game; if game.player_1 == game.player_2 { return Err("Player 1 and Player 2 must be different agents.".into()) } Ok(()) }, _ => { Err("Cannot modify or delete a game".into()) } } } ) }
get_moves
identifier_name
game.rs
use std::convert::TryFrom; use hdk::{ utils, entry_definition::ValidatingEntryType, error::{ZomeApiResult, ZomeApiError}, holochain_persistence_api::{ cas::content::{AddressableContent, Address}, }, holochain_json_api::{ error::JsonError, json::JsonString, }, holochain_core_types::{ dna::entry_types::Sharing, validation::EntryValidationData, entry::Entry, link::LinkMatch, } }; use crate::game_move::Move; use crate::GameState; #[derive(Clone, Debug, Serialize, Deserialize, DefaultJson)] pub struct Game { pub player_1: Address, pub player_2: Address, pub created_at: u32, } /*===================================== = DHT Functions = =====================================*/ /// Traverse the linked list rooted at a game to find all the moves pub fn get_moves(game_address: &Address) -> ZomeApiResult<Vec<Move>> { match hdk::get_links(game_address, LinkMatch::Any, LinkMatch::Any)?.addresses().into_iter().next() { /* get links returns the ZomeApiResult<GetLinksResult>. * This will get entries that are linked to the first argument. * Since ZomeApiResult returns Result<T, ZomeApiError>(where T in this case is the GetLinksResult), * you can use the? sugar to return the ZomeApiError if error then return the T if get_links is a success. * GetLinkResult has a method implemented called addresses() which returns a vector of Addresses. * into_iter() will iterate through this vector of addresses and move the value from the vector to an Iterator. * next() is a method for iterator where in it returns the next value of the Iterator (start at index 0) in Option<Self::Item> * Since next() returns an Option<Self::Item>, we can use the match operator to cater to all possible values of Option<Self: Item> */ Some(first_move) => { let mut move_addresses = vec![first_move]; let mut more = true; while more { more = match hdk::get_links(move_addresses.last().unwrap(), LinkMatch::Any, LinkMatch::Any)?.addresses().into_iter().next() { Some(addr) => { move_addresses.push(addr.clone()); true }, None => { false }, } } /* In this match operator, we first cater to Some(first_move). The name is first_move because * the Game entry is always linked to the first_move made by Player 2. * So we store this first_move to a vector in a variable name move_addresses. * Then we create a while loop in order to store all the game_move entries that are linked to the first_move. * while more is true, we get the entries linked to the first_move, then the next move and the next move and * on and on and on until we finish all the linked moves. The way this works is, in the first argument of get_links, * we get the very last element of the move_addresses vector using last() which returns a Option<&T>. * Since we want the address itself wrapped in Option<&T>, we will use unwrap() to get the value of the Address. * In this way, we will always have the last address stored in move_addresses as our first argument in get_links.Address. * Then we do the same thing we did above to move the value from a vector of addresses to an Iterator then get the value with next(). * Then we run the match operator again to store the address in the move_addresses using push() then return true to run the loop again. * Since next() returns None if there is no more value to be retrieved in the Iterator, we return false in None so that the loop ends after * we get all the moves that are linked together. */ let moves: Vec<Move> = move_addresses.iter().map(|addr| { let move_entry = hdk::get_entry(addr).unwrap().unwrap(); if let Entry::App(_, move_struct) = move_entry { Move::try_from(move_struct).expect("Entry at address is type other than Move") } else { panic!("Not an app entry!") } }).collect(); /* Now that we have a vector of addresses for all connected moves, we will now try to retrieve the data itself which can * be found in the Addresses we retrieved. First, we create a variable named moves which is a type of Vec<Move>. In this variable, * we will use the iter() method on move_addresses (note that we used iter() instead of into_iter() because we dont want * to move the value from move_addresses but rather have a referennce to the addresses found in the move_addresses.) and then * use map() method provided in Iterator. map() takes a closure and creates an iterator which calls that closure on each element. * the closure will have addr as an argument. The closure creates a variable named move_entry in which we will use the method * get_entry which takes an Address(HashString) type then return ZomeApiResult<Option<Entry>>. We then unwrap it twice to * retrieve the Entry itself. Then we use if let to match the move_entry with an Entry::App variant. This is because Entry * enum can have different variants and we need to makesure that the entry found in this address is an App variant. If not * then we throw a panic in else statement saying that it is not an app entry. Now if it is an app entry, we use the try_from method * to try to convert the Entry::App, which we assume to have the Move struct in the second element of App * variant(here named as move_struct) as the AppEntryValue type, to an actual Move struct. If the try_from fails then we throw an error * saying the Entry at the given address is not a Move type of entry. After we call the closure on all addresses in move_addresses, * we use the collect() to turn them into Vec<Move>. collect() would understand that the items should be collected into Vec<Move> * since that is the defined type for moves. */ Ok(moves) }, None => { Ok(Vec::new()) } } } pub fn get_state(game_address: &Address) -> ZomeApiResult<GameState> { let moves = get_moves(game_address)?; let game = get_game(game_address)?; let new_state = moves.iter().fold(GameState::initial(), |state, new_move| state.evolve(game.clone(), new_move)); Ok(new_state) /* get_state takes the address of the game as a parameter and return a ZomeApiResult<GameState>. This is a reducer function. * First we create a vairable named moves and call the get_moves in it with the parameter game_address. * Since we have the? operator in get_moves(), it will return the value T in Result<T, ZomeApiError> if nothing goes wrong. * T in this case is Vec<Move> which will also be the type of moves variable. next we create the game variable an call the get_game * with the game_address being its argument. get_game also returns ZomeApiResult with Game being the success value so we * use the? to get the Game struct if no error occurs. with moves and game having the vectors we need, we will now create * a variable name new_state and call iter() on moves to turn it into an Iterator in order for us to call a method fold() on it. * fold() takes two arguments: an initial value, and a closure with two arguments: an 'accumulator', and an element. * The closure returns the value that the accumulator should have for the next iteration. In this case, the initial value is an empty * GameState created with initial() we associated with GameState. Then the accumulator will be named state which will hold the * initial value (empty GameState) we set. new_move will be each Move stored in moves. now we call the evolve() method we associated * with GameState in state.rs. evolve takes self, Game struct, and &Move so we clone game and give it as a first argument and a * reference to moves with new_move(automatically a reference since the element in fold has FnMut implemented). This evolve method will * add all the Move that is in the moves to the GameState which will be stored in new_state. now we can return this as Ok(new_state) */ } pub fn get_game(game_address: &Address) -> ZomeApiResult<Game> { utils::get_as_type(game_address.to_owned()) /* get_as_type load an entry from the given address in the argument then convert it to a given type wrapped in ZomeApiResult. In this case, * rust will infer that the type is Game since that is the return value of get_game function so it will convert the loaded entry from the * given address to ZomeApiResult<Game> */ } /*===== End of DHT Functions ======*/ /*============================================= = Local chain functions = =============================================*/ pub fn get_game_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<Game> { local_chain .iter() .filter(|entry| { entry.address() == game_address.to_owned() }) .filter_map(|entry| { if let Entry::App(_, entry_data) = entry
else { None } }) .next() .ok_or(ZomeApiError::HashNotFound) /* get_game_local_chain() gets all the Entry in the local_chain as well as the address of the game and will return ZomeApiResult<Game>. * now we will call the iter() method on the local_chain so that we can call the filter() method. filter() method will create an iterator * which uses a closure to determine if an element should be yielded. the closure must return true or false and if the closure returns * true on that element then filter() will return that element. if its a false it simply runs the same closure on the nexrt element. * now filter's closure check if the address of the each element found in the local_chain is equal to the address of game_address * by getting the address of each element in the localchain using address() method provided for the Entry type in hdk. we need to clone * the game_address because we are passing a reference in the parameter and we cant compare a reference to an actual value(not 100% sure * correct me if im wrong). If the address of the entry matches the game_address that is passed in the paramater, then we return that entry. * After getting all elements that have the address of game_address, we implement the filter_map() method which filters then maps. * filter_map() takes a closure as an argument which has to return Option<T>. If the closure returns Some(element) then we return the * element. If the closure returns None then we just skip and try the closure on the next element in local_chain. inside the closure, * we use the if let to make sure that each element is an Entry::App variant. If not we return None but if it is, then we use the try_from() * method on the entry_data found in the Entry::App and convert it to the Game struct cos at this point we are sure that the element * is an Entry::App variant that holds the Game struct as AppEntryValue. try_from returns Result<Self, Self::Error> so we use unwrap to get * the Self which in this case is Game. Since at this point, we are sure that there is only one match for the game_address provided * in the parameter, we use the next() to return the element. Since next() returns an Option<T>, we use the ok_or() method to turn * Option<T> to Result<T, E> and E here being the ZomeApiError::HashNotFound variant which indicates that the game_address provided in the * parameter did not match any entry in the local_chain. we return ZomeApiError::HashNotFound because ZomeApiResult expects any variant * of the ZomeApiError to be returned as an error value. */ } pub fn get_moves_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<Vec<Move>> { Ok(local_chain .iter() .filter_map(|entry| { if let Entry::App(entry_type, entry_data) = entry { if entry_type.to_string() == "move" { Some(Move::try_from(entry_data.clone()).unwrap()) } else { None } } else { None } }) .filter(|game_move| { game_move.game == game_address.to_owned() }) .rev() .collect()) /* This one is similar to get_game_local_chain. It takes the local_chain Entries and the game_address as the parameter and returns * a vector of Move wrapped in ZomeApiResult. We first call iter() again then use filter_map() to filter the entries in local chain * to Entry::App variant then if the entry_type (1st element of App variant) is equal to "move" then we return that entry using try_from * method and wrap it in Some(). else we return None if there is no Entry that has the entry_type of "move" and return None also if there * is no Entry:App variant in the local chain. After getting all entries with "move" as the entry_type, we need to filter them and only * yield "move" entry that has the game_address passed in the parameter. That's what the next filter() is for and we check if the game * field of the "move" entry we retrieve from filter_map equals to the game_address being passed in the parameter. We then use rev() to reverse * the iteration when we use the collect() method in order to collect them and turn them into Vec<Move>. // To verify:: why use rev()?? */ } pub fn get_state_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<GameState> { let moves = get_moves_local_chain(local_chain.clone(), game_address)?; let game = get_game_local_chain(local_chain, game_address)?; let new_state = moves.iter().fold(GameState::initial(), move |state, new_move| state.evolve(game.clone(), new_move)); Ok(new_state) /* get_state_local_chain is similar to get_state function. It takes local_chain and game_address as parameters and return the GameState. * we first get all the moves associated with the game_address given as parameter using get_moves_local_chain and store them in moves * variable. Then we get the game struct found at game_address using get_game_local_chain. We then create new_state and call iter() * on moves thne use fold() method. fold() will take an empty GameState sturct and then call evolve method on that empty GameState (sored in * state) to store all the Moves. the move keyword before the closure actually means is to move ownership of all of the captured * variables in the closure rather than borrow. To Verify: Not really sure which captured variables we are moving here rather than borrowing. */ } /*===== End of Local chain functions ======*/ pub fn definition() -> ValidatingEntryType { entry!( name: "game", description: "Represents an occurence of a game between several agents", sharing: Sharing::Public, validation_package: || { hdk::ValidationPackageDefinition::Entry }, validation: | validation_data: hdk::EntryValidationData<Game>| { match validation_data { EntryValidationData::Create{entry, validation_data: _} => { let game = entry as Game; if game.player_1 == game.player_2 { return Err("Player 1 and Player 2 must be different agents.".into()) } Ok(()) }, _ => { Err("Cannot modify or delete a game".into()) } } } ) }
{ Some(Game::try_from(entry_data.clone()).unwrap()) }
conditional_block
game.rs
use std::convert::TryFrom; use hdk::{ utils, entry_definition::ValidatingEntryType, error::{ZomeApiResult, ZomeApiError}, holochain_persistence_api::{ cas::content::{AddressableContent, Address}, }, holochain_json_api::{ error::JsonError, json::JsonString, }, holochain_core_types::{ dna::entry_types::Sharing, validation::EntryValidationData, entry::Entry, link::LinkMatch, } }; use crate::game_move::Move; use crate::GameState; #[derive(Clone, Debug, Serialize, Deserialize, DefaultJson)] pub struct Game { pub player_1: Address, pub player_2: Address, pub created_at: u32, } /*===================================== = DHT Functions = =====================================*/ /// Traverse the linked list rooted at a game to find all the moves pub fn get_moves(game_address: &Address) -> ZomeApiResult<Vec<Move>>
None => { false }, } } /* In this match operator, we first cater to Some(first_move). The name is first_move because * the Game entry is always linked to the first_move made by Player 2. * So we store this first_move to a vector in a variable name move_addresses. * Then we create a while loop in order to store all the game_move entries that are linked to the first_move. * while more is true, we get the entries linked to the first_move, then the next move and the next move and * on and on and on until we finish all the linked moves. The way this works is, in the first argument of get_links, * we get the very last element of the move_addresses vector using last() which returns a Option<&T>. * Since we want the address itself wrapped in Option<&T>, we will use unwrap() to get the value of the Address. * In this way, we will always have the last address stored in move_addresses as our first argument in get_links.Address. * Then we do the same thing we did above to move the value from a vector of addresses to an Iterator then get the value with next(). * Then we run the match operator again to store the address in the move_addresses using push() then return true to run the loop again. * Since next() returns None if there is no more value to be retrieved in the Iterator, we return false in None so that the loop ends after * we get all the moves that are linked together. */ let moves: Vec<Move> = move_addresses.iter().map(|addr| { let move_entry = hdk::get_entry(addr).unwrap().unwrap(); if let Entry::App(_, move_struct) = move_entry { Move::try_from(move_struct).expect("Entry at address is type other than Move") } else { panic!("Not an app entry!") } }).collect(); /* Now that we have a vector of addresses for all connected moves, we will now try to retrieve the data itself which can * be found in the Addresses we retrieved. First, we create a variable named moves which is a type of Vec<Move>. In this variable, * we will use the iter() method on move_addresses (note that we used iter() instead of into_iter() because we dont want * to move the value from move_addresses but rather have a referennce to the addresses found in the move_addresses.) and then * use map() method provided in Iterator. map() takes a closure and creates an iterator which calls that closure on each element. * the closure will have addr as an argument. The closure creates a variable named move_entry in which we will use the method * get_entry which takes an Address(HashString) type then return ZomeApiResult<Option<Entry>>. We then unwrap it twice to * retrieve the Entry itself. Then we use if let to match the move_entry with an Entry::App variant. This is because Entry * enum can have different variants and we need to makesure that the entry found in this address is an App variant. If not * then we throw a panic in else statement saying that it is not an app entry. Now if it is an app entry, we use the try_from method * to try to convert the Entry::App, which we assume to have the Move struct in the second element of App * variant(here named as move_struct) as the AppEntryValue type, to an actual Move struct. If the try_from fails then we throw an error * saying the Entry at the given address is not a Move type of entry. After we call the closure on all addresses in move_addresses, * we use the collect() to turn them into Vec<Move>. collect() would understand that the items should be collected into Vec<Move> * since that is the defined type for moves. */ Ok(moves) }, None => { Ok(Vec::new()) } } } pub fn get_state(game_address: &Address) -> ZomeApiResult<GameState> { let moves = get_moves(game_address)?; let game = get_game(game_address)?; let new_state = moves.iter().fold(GameState::initial(), |state, new_move| state.evolve(game.clone(), new_move)); Ok(new_state) /* get_state takes the address of the game as a parameter and return a ZomeApiResult<GameState>. This is a reducer function. * First we create a vairable named moves and call the get_moves in it with the parameter game_address. * Since we have the? operator in get_moves(), it will return the value T in Result<T, ZomeApiError> if nothing goes wrong. * T in this case is Vec<Move> which will also be the type of moves variable. next we create the game variable an call the get_game * with the game_address being its argument. get_game also returns ZomeApiResult with Game being the success value so we * use the? to get the Game struct if no error occurs. with moves and game having the vectors we need, we will now create * a variable name new_state and call iter() on moves to turn it into an Iterator in order for us to call a method fold() on it. * fold() takes two arguments: an initial value, and a closure with two arguments: an 'accumulator', and an element. * The closure returns the value that the accumulator should have for the next iteration. In this case, the initial value is an empty * GameState created with initial() we associated with GameState. Then the accumulator will be named state which will hold the * initial value (empty GameState) we set. new_move will be each Move stored in moves. now we call the evolve() method we associated * with GameState in state.rs. evolve takes self, Game struct, and &Move so we clone game and give it as a first argument and a * reference to moves with new_move(automatically a reference since the element in fold has FnMut implemented). This evolve method will * add all the Move that is in the moves to the GameState which will be stored in new_state. now we can return this as Ok(new_state) */ } pub fn get_game(game_address: &Address) -> ZomeApiResult<Game> { utils::get_as_type(game_address.to_owned()) /* get_as_type load an entry from the given address in the argument then convert it to a given type wrapped in ZomeApiResult. In this case, * rust will infer that the type is Game since that is the return value of get_game function so it will convert the loaded entry from the * given address to ZomeApiResult<Game> */ } /*===== End of DHT Functions ======*/ /*============================================= = Local chain functions = =============================================*/ pub fn get_game_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<Game> { local_chain .iter() .filter(|entry| { entry.address() == game_address.to_owned() }) .filter_map(|entry| { if let Entry::App(_, entry_data) = entry { Some(Game::try_from(entry_data.clone()).unwrap()) } else { None } }) .next() .ok_or(ZomeApiError::HashNotFound) /* get_game_local_chain() gets all the Entry in the local_chain as well as the address of the game and will return ZomeApiResult<Game>. * now we will call the iter() method on the local_chain so that we can call the filter() method. filter() method will create an iterator * which uses a closure to determine if an element should be yielded. the closure must return true or false and if the closure returns * true on that element then filter() will return that element. if its a false it simply runs the same closure on the nexrt element. * now filter's closure check if the address of the each element found in the local_chain is equal to the address of game_address * by getting the address of each element in the localchain using address() method provided for the Entry type in hdk. we need to clone * the game_address because we are passing a reference in the parameter and we cant compare a reference to an actual value(not 100% sure * correct me if im wrong). If the address of the entry matches the game_address that is passed in the paramater, then we return that entry. * After getting all elements that have the address of game_address, we implement the filter_map() method which filters then maps. * filter_map() takes a closure as an argument which has to return Option<T>. If the closure returns Some(element) then we return the * element. If the closure returns None then we just skip and try the closure on the next element in local_chain. inside the closure, * we use the if let to make sure that each element is an Entry::App variant. If not we return None but if it is, then we use the try_from() * method on the entry_data found in the Entry::App and convert it to the Game struct cos at this point we are sure that the element * is an Entry::App variant that holds the Game struct as AppEntryValue. try_from returns Result<Self, Self::Error> so we use unwrap to get * the Self which in this case is Game. Since at this point, we are sure that there is only one match for the game_address provided * in the parameter, we use the next() to return the element. Since next() returns an Option<T>, we use the ok_or() method to turn * Option<T> to Result<T, E> and E here being the ZomeApiError::HashNotFound variant which indicates that the game_address provided in the * parameter did not match any entry in the local_chain. we return ZomeApiError::HashNotFound because ZomeApiResult expects any variant * of the ZomeApiError to be returned as an error value. */ } pub fn get_moves_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<Vec<Move>> { Ok(local_chain .iter() .filter_map(|entry| { if let Entry::App(entry_type, entry_data) = entry { if entry_type.to_string() == "move" { Some(Move::try_from(entry_data.clone()).unwrap()) } else { None } } else { None } }) .filter(|game_move| { game_move.game == game_address.to_owned() }) .rev() .collect()) /* This one is similar to get_game_local_chain. It takes the local_chain Entries and the game_address as the parameter and returns * a vector of Move wrapped in ZomeApiResult. We first call iter() again then use filter_map() to filter the entries in local chain * to Entry::App variant then if the entry_type (1st element of App variant) is equal to "move" then we return that entry using try_from * method and wrap it in Some(). else we return None if there is no Entry that has the entry_type of "move" and return None also if there * is no Entry:App variant in the local chain. After getting all entries with "move" as the entry_type, we need to filter them and only * yield "move" entry that has the game_address passed in the parameter. That's what the next filter() is for and we check if the game * field of the "move" entry we retrieve from filter_map equals to the game_address being passed in the parameter. We then use rev() to reverse * the iteration when we use the collect() method in order to collect them and turn them into Vec<Move>. // To verify:: why use rev()?? */ } pub fn get_state_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<GameState> { let moves = get_moves_local_chain(local_chain.clone(), game_address)?; let game = get_game_local_chain(local_chain, game_address)?; let new_state = moves.iter().fold(GameState::initial(), move |state, new_move| state.evolve(game.clone(), new_move)); Ok(new_state) /* get_state_local_chain is similar to get_state function. It takes local_chain and game_address as parameters and return the GameState. * we first get all the moves associated with the game_address given as parameter using get_moves_local_chain and store them in moves * variable. Then we get the game struct found at game_address using get_game_local_chain. We then create new_state and call iter() * on moves thne use fold() method. fold() will take an empty GameState sturct and then call evolve method on that empty GameState (sored in * state) to store all the Moves. the move keyword before the closure actually means is to move ownership of all of the captured * variables in the closure rather than borrow. To Verify: Not really sure which captured variables we are moving here rather than borrowing. */ } /*===== End of Local chain functions ======*/ pub fn definition() -> ValidatingEntryType { entry!( name: "game", description: "Represents an occurence of a game between several agents", sharing: Sharing::Public, validation_package: || { hdk::ValidationPackageDefinition::Entry }, validation: | validation_data: hdk::EntryValidationData<Game>| { match validation_data { EntryValidationData::Create{entry, validation_data: _} => { let game = entry as Game; if game.player_1 == game.player_2 { return Err("Player 1 and Player 2 must be different agents.".into()) } Ok(()) }, _ => { Err("Cannot modify or delete a game".into()) } } } ) }
{ match hdk::get_links(game_address, LinkMatch::Any, LinkMatch::Any)?.addresses().into_iter().next() { /* get links returns the ZomeApiResult<GetLinksResult>. * This will get entries that are linked to the first argument. * Since ZomeApiResult returns Result<T, ZomeApiError>(where T in this case is the GetLinksResult), * you can use the ? sugar to return the ZomeApiError if error then return the T if get_links is a success. * GetLinkResult has a method implemented called addresses() which returns a vector of Addresses. * into_iter() will iterate through this vector of addresses and move the value from the vector to an Iterator. * next() is a method for iterator where in it returns the next value of the Iterator (start at index 0) in Option<Self::Item> * Since next() returns an Option<Self::Item>, we can use the match operator to cater to all possible values of Option<Self: Item> */ Some(first_move) => { let mut move_addresses = vec![first_move]; let mut more = true; while more { more = match hdk::get_links(move_addresses.last().unwrap(), LinkMatch::Any, LinkMatch::Any)?.addresses().into_iter().next() { Some(addr) => { move_addresses.push(addr.clone()); true },
identifier_body
server.rs
use crate::adapter::Adapter; use crate::socket::{ subscribe_socket_to_transport_events, Callback, Socket, SocketCloseReason, SocketEvent, }; use crate::transport::{Transport, TransportCreateData, TransportKind}; use crate::util::{HttpMethod, RequestContext, SendPacketError, ServerError, SetCookie}; use dashmap::DashMap; use engine_io_parser::packet::{Packet, PacketData}; use std::sync::Arc; use std::sync::Mutex; use tokio::sync::{broadcast, mpsc}; use uuid::Uuid; pub const BUFFER_CONST: usize = 32; #[derive(Debug, Clone, PartialEq)] pub struct ServerOptions { pub ping_timeout: u32, pub ping_interval: u32, pub upgrade_timeout: u32, pub transports: Vec<TransportKind>, pub allow_upgrades: bool, pub initial_packet: Option<Packet>, // TODO: implement this // pub allow_request: Option<Box<dyn (Fn() -> bool) + Send +'static>>, pub cookie: Option<CookieOptions>, // TODO: node ws-specific options: // - maxHttpBufferSize // - perMessageDeflate // - httpCompression // -- cors pub buffer_factor: usize, } #[derive(Debug, Clone, PartialEq)] pub struct CookieOptions { pub name: String, pub path: String, pub http_only: bool, } #[derive(Debug, Clone)] pub struct EventSenders { // Event sender to external owner server: bmrng::RequestSender<ServerEvent, Packet>, // server: broadcast::Sender<ServerEvent>, /// Event sender to Socket instances. cloned and passed over client: mpsc::Sender<SocketEvent>, } #[derive(Debug)] pub enum ServerState { Unsubscribed { socket_event_receiver: mpsc::Receiver<SocketEvent>, engine_event_receiver: bmrng::RequestReceiver<ServerEvent, Packet>, }, Subscribed, } pub struct Server<A:'static + Adapter> { state: Arc<Mutex<ServerState>>, // TODO: don't use a mutex here, instead have an internal socket state clients: Arc<DashMap<String, Arc<Socket<A>>>>, event_senders: EventSenders, // TODO: ping timeout handler EngineIoSocketTimeoutHandler pub options: ServerOptions, } impl Default for ServerOptions { fn default() -> Self { ServerOptions { ping_timeout: 5000, ping_interval: 25000, upgrade_timeout: 10000, transports: vec![TransportKind::WebSocket, TransportKind::Polling], allow_upgrades: true, initial_packet: None, cookie: Some(CookieOptions::default()), // allow_request: None, buffer_factor: 2, } } } impl Default for CookieOptions { fn default() -> Self { CookieOptions { name: "io".to_owned(), path: "/".to_owned(), http_only: true, } } } #[derive(Display, Debug, Clone, PartialEq)] pub enum ServerEvent { /// Socket ID Connection { connection_id: String, }, Close { connection_id: String, reason: SocketCloseReason, }, Flush { connection_id: String, }, Drain { connection_id: String, }, Message { connection_id: String, context: Arc<RequestContext>, data: PacketData, }, Error { connection_id: String, }, } impl<A:'static + Adapter> Server<A> { pub fn new(options: ServerOptions) -> Self { // To listen events from socket instances let (client_event_sender, client_event_receiver) = mpsc::channel(options.buffer_factor * BUFFER_CONST); // To send events to the owner of this Server instance let (server_event_sender, server_event_receiver) = bmrng::channel(options.buffer_factor * BUFFER_CONST); Server { state: Arc::new(Mutex::new(ServerState::Unsubscribed { socket_event_receiver: client_event_receiver, engine_event_receiver: server_event_receiver, })), clients: Arc::new(DashMap::new()), event_senders: EventSenders { server: server_event_sender, client: client_event_sender, }, options, } } pub fn subscribe(&self) -> bmrng::RequestReceiver<ServerEvent, Packet>
pub fn try_subscribe( &self, ) -> Result<bmrng::RequestReceiver<ServerEvent, Packet>, AlreadySubscribedError> { let mut state = self.state.lock().unwrap(); let old_state = std::mem::replace(&mut *state, ServerState::Subscribed); match old_state { ServerState::Subscribed => Err(AlreadySubscribedError), ServerState::Unsubscribed { socket_event_receiver, engine_event_receiver, } => { // First time calling subscribe, also start listening events from `Socket` instances self.subscribe_to_socket_events(socket_event_receiver); Ok(engine_event_receiver) } } // TODO: handle shutdown properly by receiving a shutdown signal // sending it to socket instances. } pub async fn close(&self) { // TODO: consider sending signals or dropping channels instead of closing them like this? // TODO: or drop the whole thing. The server, the sockets, everything. todo!(); // for socket in self.clients.iter() { // socket.value().close(true); // } } pub async fn close_socket(&self, connection_id: &str) { if let Some((_key, socket)) = self.clients.remove(connection_id) { // TODO: convert this to drop todo!(); // socket.close(true); } } // TODO: consider converting ack callbacks into optional async Results? // `connection_id` is an owned string just because of a Rust compiler issue. pub async fn send_packet_with_ack( &self, connection_id: String, packet: Packet, callback: Option<Callback>, ) -> Result<(), SendPacketError> { match self.clients.get(&connection_id) { Some(client) => Ok(client.send_packet(packet, None).await), None => Err(SendPacketError::UnknownConnectionId), } } pub async fn send_packet( &self, connection_id: String, packet: Packet, ) -> Result<(), SendPacketError> { match self.clients.get(&connection_id) { Some(client) => Ok(client.send_packet(packet, None).await), None => Err(SendPacketError::UnknownConnectionId), } } pub async fn handle_request( &self, context: RequestContext, body: Option<A::Body>, ) -> Result<A::Response, ServerError> { let context = Arc::new(context); let sid_ref = context.query.get("sid"); let sid = sid_ref.map(|s| s.to_owned()); self.verify_request(sid_ref, false, context.transport_kind, context.http_method) .await?; if let Some(sid) = sid { let client = self.get_client_or_error(&sid)?; let response = client.handle_polling_request(context.clone(), body).await?; Ok(response) } else { let (sid, response) = self.handshake(context, HandshakeData::Polling).await?; Ok(response) } } /// Akin to `onWebSocket` from engine.io js // TODO: handle errors, socket closure etc. pub async fn handle_upgrade(&self, context: RequestContext, socket: A::WsHandle) { let context = Arc::new(context); let sid_ref = context.query.get("sid"); let sid = sid_ref.map(|s| s.to_owned()); if let Some(sid) = sid { // TODO: don't panic let client = self.get_client_or_error(&sid).expect("TODO: fix this"); client.maybe_upgrade(context, todo!()); // TODO: implement this! // let client = // TODO: call socket.maybe_upgrade() } else { self.handshake(context, HandshakeData::WebSocket { socket }) .await; todo!(); } } pub async fn verify_request( &self, sid: Option<&String>, upgrade: bool, transport_kind: TransportKind, http_method: HttpMethod, ) -> Result<(), ServerError> { if let Some(sid) = sid { let client = self.clients.get(sid); if let Some(client) = client { let client_transport_kind = client.get_transport_kind(); if!upgrade && Some(transport_kind)!= client_transport_kind { return Err(ServerError::BadRequest); } } else { return Err(ServerError::UnknownSid); } } else { if http_method!= HttpMethod::Get { return Err(ServerError::BadHandshakeMethod); } // FIXME: fix allow_request calls /*if let Some(validator) = &self.options.allow_request { // FIXME: pass some request parameters to this validator // to make it useful let valid = validator(); if!valid { return Err(ServerError::BadRequest); } }*/ } Ok(()) } /// Generate a new ID for a client. /// Note: This generates IDs in a different format from the original JS /// engine.io implementation, which uses a library called /// [base64id](https://www.npmjs.com/package/base64id) that doesn't seem /// to guarantee uniqueness. pub fn generate_id() -> String { Uuid::new_v4().to_hyphenated().to_string() } /// Returns the new client ID pub async fn handshake( &self, context: Arc<RequestContext>, data: HandshakeData<A::WsHandle>, ) -> Result<(String, A::Response), ServerError> { let sid = Self::generate_id(); let supports_binary =!context.query.contains_key("b64"); let jsonp =!supports_binary &&!context.query.contains_key("j"); let context = Arc::new(context.with_set_cookie(SetCookie::from_cookie_options( &self.options.cookie, sid.clone(), ))); let transport_create_data = match data { HandshakeData::Polling => TransportCreateData::Polling { jsonp }, HandshakeData::WebSocket { socket } => TransportCreateData::WebSocket { supports_binary, socket, }, }; let socket = Arc::new(Socket::new( sid.clone(), context.clone(), self.event_senders.client.clone(), transport_create_data, )); self.clients.insert(sid.clone(), socket.clone()); socket.open(&self.options).await; // TODO: send this initial packet in the handshake request response? // so we'd need to return it to the adapter if let Some(initial_message_packet) = self.options.initial_packet.clone() { socket.send_packet(initial_message_packet, None).await; } subscribe_socket_to_transport_events(socket).await; let response = { let client = self.get_client_or_error(&sid)?; match client.get_transport_or_fail()?.as_ref() { Transport::Polling(_) => Ok(client.handle_polling_request(context, None).await?), _ => Err(ServerError::BadRequest), } }; // Emit a "connection" event. This is an internal event that's used by socket_io let _ = self .event_senders .server .clone() .send(ServerEvent::Connection { connection_id: sid.clone(), }); response.map(|response| Ok((sid, response)))? } pub fn clients_count(&self) -> usize { self.clients.len() } pub fn get_client_or_error(&self, id: &str) -> Result<Arc<Socket<A>>, ServerError> { if let Some(client) = self.clients.get(id) { Ok(client.value().clone()) } else { Err(ServerError::UnknownSid) } } fn subscribe_to_socket_events(&self, client_event_receiver: mpsc::Receiver<SocketEvent>) { // TODO: listen for responder responses on fallible events let server_event_sender = self.event_senders.server.clone(); let clients = self.clients.clone(); tokio::spawn(async move { let mut receiver = client_event_receiver; while let Some(message) = receiver.recv().await { match message { SocketEvent::Close { socket_id, reason } => { clients.remove(&socket_id); let _ = server_event_sender.send(ServerEvent::Close { connection_id: socket_id, reason, }); } SocketEvent::Flush { socket_id } => { // Forward the Flush event to the external listener let _ = server_event_sender.send(ServerEvent::Flush { connection_id: socket_id, }); } SocketEvent::Drain { socket_id } => { // Forward the Drain event to the external listener let _ = server_event_sender.send(ServerEvent::Drain { connection_id: socket_id, }); } SocketEvent::Message { socket_id, context, data, } => { // Forward the Drain event to the external listener let _ = server_event_sender.send(ServerEvent::Message { connection_id: socket_id, context, data, }); } SocketEvent::Error { socket_id } => { let _ = server_event_sender.send(ServerEvent::Error { connection_id: socket_id, }); } _ => {} } } }); } fn subscribe_to_commands(&self) { // TODO: receive packet send requests using a MPSC listener? todo!(); } } #[derive(Debug)] pub enum HandshakeData<S> where S:'static, { Polling, WebSocket { socket: S }, } #[derive(Debug)] pub struct AlreadySubscribedError; impl std::fmt::Display for AlreadySubscribedError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "Already subscribed to this server, cannot subscribe again" ) } } impl std::error::Error for AlreadySubscribedError {}
{ self.try_subscribe() .expect("Already subscribed to engine_io_server::Server") }
identifier_body
server.rs
use crate::adapter::Adapter; use crate::socket::{ subscribe_socket_to_transport_events, Callback, Socket, SocketCloseReason, SocketEvent, }; use crate::transport::{Transport, TransportCreateData, TransportKind}; use crate::util::{HttpMethod, RequestContext, SendPacketError, ServerError, SetCookie}; use dashmap::DashMap; use engine_io_parser::packet::{Packet, PacketData}; use std::sync::Arc; use std::sync::Mutex; use tokio::sync::{broadcast, mpsc}; use uuid::Uuid; pub const BUFFER_CONST: usize = 32; #[derive(Debug, Clone, PartialEq)] pub struct ServerOptions { pub ping_timeout: u32, pub ping_interval: u32, pub upgrade_timeout: u32, pub transports: Vec<TransportKind>, pub allow_upgrades: bool, pub initial_packet: Option<Packet>, // TODO: implement this // pub allow_request: Option<Box<dyn (Fn() -> bool) + Send +'static>>, pub cookie: Option<CookieOptions>, // TODO: node ws-specific options: // - maxHttpBufferSize // - perMessageDeflate // - httpCompression // -- cors pub buffer_factor: usize, } #[derive(Debug, Clone, PartialEq)] pub struct CookieOptions { pub name: String, pub path: String, pub http_only: bool, } #[derive(Debug, Clone)] pub struct EventSenders { // Event sender to external owner server: bmrng::RequestSender<ServerEvent, Packet>, // server: broadcast::Sender<ServerEvent>, /// Event sender to Socket instances. cloned and passed over client: mpsc::Sender<SocketEvent>, } #[derive(Debug)] pub enum ServerState { Unsubscribed { socket_event_receiver: mpsc::Receiver<SocketEvent>, engine_event_receiver: bmrng::RequestReceiver<ServerEvent, Packet>, }, Subscribed, } pub struct Server<A:'static + Adapter> { state: Arc<Mutex<ServerState>>, // TODO: don't use a mutex here, instead have an internal socket state clients: Arc<DashMap<String, Arc<Socket<A>>>>, event_senders: EventSenders, // TODO: ping timeout handler EngineIoSocketTimeoutHandler pub options: ServerOptions, } impl Default for ServerOptions { fn default() -> Self { ServerOptions { ping_timeout: 5000, ping_interval: 25000, upgrade_timeout: 10000, transports: vec![TransportKind::WebSocket, TransportKind::Polling], allow_upgrades: true, initial_packet: None, cookie: Some(CookieOptions::default()), // allow_request: None, buffer_factor: 2, } } } impl Default for CookieOptions { fn default() -> Self { CookieOptions { name: "io".to_owned(), path: "/".to_owned(), http_only: true, } } } #[derive(Display, Debug, Clone, PartialEq)] pub enum ServerEvent { /// Socket ID Connection { connection_id: String, }, Close { connection_id: String, reason: SocketCloseReason, }, Flush { connection_id: String, }, Drain { connection_id: String, }, Message { connection_id: String, context: Arc<RequestContext>, data: PacketData, }, Error { connection_id: String, }, } impl<A:'static + Adapter> Server<A> { pub fn new(options: ServerOptions) -> Self { // To listen events from socket instances let (client_event_sender, client_event_receiver) = mpsc::channel(options.buffer_factor * BUFFER_CONST); // To send events to the owner of this Server instance let (server_event_sender, server_event_receiver) = bmrng::channel(options.buffer_factor * BUFFER_CONST); Server { state: Arc::new(Mutex::new(ServerState::Unsubscribed { socket_event_receiver: client_event_receiver, engine_event_receiver: server_event_receiver, })), clients: Arc::new(DashMap::new()), event_senders: EventSenders { server: server_event_sender, client: client_event_sender, }, options, } } pub fn subscribe(&self) -> bmrng::RequestReceiver<ServerEvent, Packet> { self.try_subscribe() .expect("Already subscribed to engine_io_server::Server") } pub fn try_subscribe( &self, ) -> Result<bmrng::RequestReceiver<ServerEvent, Packet>, AlreadySubscribedError> { let mut state = self.state.lock().unwrap(); let old_state = std::mem::replace(&mut *state, ServerState::Subscribed); match old_state { ServerState::Subscribed => Err(AlreadySubscribedError), ServerState::Unsubscribed { socket_event_receiver, engine_event_receiver, } => { // First time calling subscribe, also start listening events from `Socket` instances self.subscribe_to_socket_events(socket_event_receiver); Ok(engine_event_receiver) } } // TODO: handle shutdown properly by receiving a shutdown signal // sending it to socket instances. } pub async fn close(&self) {
// TODO: or drop the whole thing. The server, the sockets, everything. todo!(); // for socket in self.clients.iter() { // socket.value().close(true); // } } pub async fn close_socket(&self, connection_id: &str) { if let Some((_key, socket)) = self.clients.remove(connection_id) { // TODO: convert this to drop todo!(); // socket.close(true); } } // TODO: consider converting ack callbacks into optional async Results? // `connection_id` is an owned string just because of a Rust compiler issue. pub async fn send_packet_with_ack( &self, connection_id: String, packet: Packet, callback: Option<Callback>, ) -> Result<(), SendPacketError> { match self.clients.get(&connection_id) { Some(client) => Ok(client.send_packet(packet, None).await), None => Err(SendPacketError::UnknownConnectionId), } } pub async fn send_packet( &self, connection_id: String, packet: Packet, ) -> Result<(), SendPacketError> { match self.clients.get(&connection_id) { Some(client) => Ok(client.send_packet(packet, None).await), None => Err(SendPacketError::UnknownConnectionId), } } pub async fn handle_request( &self, context: RequestContext, body: Option<A::Body>, ) -> Result<A::Response, ServerError> { let context = Arc::new(context); let sid_ref = context.query.get("sid"); let sid = sid_ref.map(|s| s.to_owned()); self.verify_request(sid_ref, false, context.transport_kind, context.http_method) .await?; if let Some(sid) = sid { let client = self.get_client_or_error(&sid)?; let response = client.handle_polling_request(context.clone(), body).await?; Ok(response) } else { let (sid, response) = self.handshake(context, HandshakeData::Polling).await?; Ok(response) } } /// Akin to `onWebSocket` from engine.io js // TODO: handle errors, socket closure etc. pub async fn handle_upgrade(&self, context: RequestContext, socket: A::WsHandle) { let context = Arc::new(context); let sid_ref = context.query.get("sid"); let sid = sid_ref.map(|s| s.to_owned()); if let Some(sid) = sid { // TODO: don't panic let client = self.get_client_or_error(&sid).expect("TODO: fix this"); client.maybe_upgrade(context, todo!()); // TODO: implement this! // let client = // TODO: call socket.maybe_upgrade() } else { self.handshake(context, HandshakeData::WebSocket { socket }) .await; todo!(); } } pub async fn verify_request( &self, sid: Option<&String>, upgrade: bool, transport_kind: TransportKind, http_method: HttpMethod, ) -> Result<(), ServerError> { if let Some(sid) = sid { let client = self.clients.get(sid); if let Some(client) = client { let client_transport_kind = client.get_transport_kind(); if!upgrade && Some(transport_kind)!= client_transport_kind { return Err(ServerError::BadRequest); } } else { return Err(ServerError::UnknownSid); } } else { if http_method!= HttpMethod::Get { return Err(ServerError::BadHandshakeMethod); } // FIXME: fix allow_request calls /*if let Some(validator) = &self.options.allow_request { // FIXME: pass some request parameters to this validator // to make it useful let valid = validator(); if!valid { return Err(ServerError::BadRequest); } }*/ } Ok(()) } /// Generate a new ID for a client. /// Note: This generates IDs in a different format from the original JS /// engine.io implementation, which uses a library called /// [base64id](https://www.npmjs.com/package/base64id) that doesn't seem /// to guarantee uniqueness. pub fn generate_id() -> String { Uuid::new_v4().to_hyphenated().to_string() } /// Returns the new client ID pub async fn handshake( &self, context: Arc<RequestContext>, data: HandshakeData<A::WsHandle>, ) -> Result<(String, A::Response), ServerError> { let sid = Self::generate_id(); let supports_binary =!context.query.contains_key("b64"); let jsonp =!supports_binary &&!context.query.contains_key("j"); let context = Arc::new(context.with_set_cookie(SetCookie::from_cookie_options( &self.options.cookie, sid.clone(), ))); let transport_create_data = match data { HandshakeData::Polling => TransportCreateData::Polling { jsonp }, HandshakeData::WebSocket { socket } => TransportCreateData::WebSocket { supports_binary, socket, }, }; let socket = Arc::new(Socket::new( sid.clone(), context.clone(), self.event_senders.client.clone(), transport_create_data, )); self.clients.insert(sid.clone(), socket.clone()); socket.open(&self.options).await; // TODO: send this initial packet in the handshake request response? // so we'd need to return it to the adapter if let Some(initial_message_packet) = self.options.initial_packet.clone() { socket.send_packet(initial_message_packet, None).await; } subscribe_socket_to_transport_events(socket).await; let response = { let client = self.get_client_or_error(&sid)?; match client.get_transport_or_fail()?.as_ref() { Transport::Polling(_) => Ok(client.handle_polling_request(context, None).await?), _ => Err(ServerError::BadRequest), } }; // Emit a "connection" event. This is an internal event that's used by socket_io let _ = self .event_senders .server .clone() .send(ServerEvent::Connection { connection_id: sid.clone(), }); response.map(|response| Ok((sid, response)))? } pub fn clients_count(&self) -> usize { self.clients.len() } pub fn get_client_or_error(&self, id: &str) -> Result<Arc<Socket<A>>, ServerError> { if let Some(client) = self.clients.get(id) { Ok(client.value().clone()) } else { Err(ServerError::UnknownSid) } } fn subscribe_to_socket_events(&self, client_event_receiver: mpsc::Receiver<SocketEvent>) { // TODO: listen for responder responses on fallible events let server_event_sender = self.event_senders.server.clone(); let clients = self.clients.clone(); tokio::spawn(async move { let mut receiver = client_event_receiver; while let Some(message) = receiver.recv().await { match message { SocketEvent::Close { socket_id, reason } => { clients.remove(&socket_id); let _ = server_event_sender.send(ServerEvent::Close { connection_id: socket_id, reason, }); } SocketEvent::Flush { socket_id } => { // Forward the Flush event to the external listener let _ = server_event_sender.send(ServerEvent::Flush { connection_id: socket_id, }); } SocketEvent::Drain { socket_id } => { // Forward the Drain event to the external listener let _ = server_event_sender.send(ServerEvent::Drain { connection_id: socket_id, }); } SocketEvent::Message { socket_id, context, data, } => { // Forward the Drain event to the external listener let _ = server_event_sender.send(ServerEvent::Message { connection_id: socket_id, context, data, }); } SocketEvent::Error { socket_id } => { let _ = server_event_sender.send(ServerEvent::Error { connection_id: socket_id, }); } _ => {} } } }); } fn subscribe_to_commands(&self) { // TODO: receive packet send requests using a MPSC listener? todo!(); } } #[derive(Debug)] pub enum HandshakeData<S> where S:'static, { Polling, WebSocket { socket: S }, } #[derive(Debug)] pub struct AlreadySubscribedError; impl std::fmt::Display for AlreadySubscribedError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "Already subscribed to this server, cannot subscribe again" ) } } impl std::error::Error for AlreadySubscribedError {}
// TODO: consider sending signals or dropping channels instead of closing them like this?
random_line_split
server.rs
use crate::adapter::Adapter; use crate::socket::{ subscribe_socket_to_transport_events, Callback, Socket, SocketCloseReason, SocketEvent, }; use crate::transport::{Transport, TransportCreateData, TransportKind}; use crate::util::{HttpMethod, RequestContext, SendPacketError, ServerError, SetCookie}; use dashmap::DashMap; use engine_io_parser::packet::{Packet, PacketData}; use std::sync::Arc; use std::sync::Mutex; use tokio::sync::{broadcast, mpsc}; use uuid::Uuid; pub const BUFFER_CONST: usize = 32; #[derive(Debug, Clone, PartialEq)] pub struct ServerOptions { pub ping_timeout: u32, pub ping_interval: u32, pub upgrade_timeout: u32, pub transports: Vec<TransportKind>, pub allow_upgrades: bool, pub initial_packet: Option<Packet>, // TODO: implement this // pub allow_request: Option<Box<dyn (Fn() -> bool) + Send +'static>>, pub cookie: Option<CookieOptions>, // TODO: node ws-specific options: // - maxHttpBufferSize // - perMessageDeflate // - httpCompression // -- cors pub buffer_factor: usize, } #[derive(Debug, Clone, PartialEq)] pub struct CookieOptions { pub name: String, pub path: String, pub http_only: bool, } #[derive(Debug, Clone)] pub struct EventSenders { // Event sender to external owner server: bmrng::RequestSender<ServerEvent, Packet>, // server: broadcast::Sender<ServerEvent>, /// Event sender to Socket instances. cloned and passed over client: mpsc::Sender<SocketEvent>, } #[derive(Debug)] pub enum ServerState { Unsubscribed { socket_event_receiver: mpsc::Receiver<SocketEvent>, engine_event_receiver: bmrng::RequestReceiver<ServerEvent, Packet>, }, Subscribed, } pub struct Server<A:'static + Adapter> { state: Arc<Mutex<ServerState>>, // TODO: don't use a mutex here, instead have an internal socket state clients: Arc<DashMap<String, Arc<Socket<A>>>>, event_senders: EventSenders, // TODO: ping timeout handler EngineIoSocketTimeoutHandler pub options: ServerOptions, } impl Default for ServerOptions { fn default() -> Self { ServerOptions { ping_timeout: 5000, ping_interval: 25000, upgrade_timeout: 10000, transports: vec![TransportKind::WebSocket, TransportKind::Polling], allow_upgrades: true, initial_packet: None, cookie: Some(CookieOptions::default()), // allow_request: None, buffer_factor: 2, } } } impl Default for CookieOptions { fn default() -> Self { CookieOptions { name: "io".to_owned(), path: "/".to_owned(), http_only: true, } } } #[derive(Display, Debug, Clone, PartialEq)] pub enum ServerEvent { /// Socket ID Connection { connection_id: String, }, Close { connection_id: String, reason: SocketCloseReason, }, Flush { connection_id: String, }, Drain { connection_id: String, }, Message { connection_id: String, context: Arc<RequestContext>, data: PacketData, }, Error { connection_id: String, }, } impl<A:'static + Adapter> Server<A> { pub fn new(options: ServerOptions) -> Self { // To listen events from socket instances let (client_event_sender, client_event_receiver) = mpsc::channel(options.buffer_factor * BUFFER_CONST); // To send events to the owner of this Server instance let (server_event_sender, server_event_receiver) = bmrng::channel(options.buffer_factor * BUFFER_CONST); Server { state: Arc::new(Mutex::new(ServerState::Unsubscribed { socket_event_receiver: client_event_receiver, engine_event_receiver: server_event_receiver, })), clients: Arc::new(DashMap::new()), event_senders: EventSenders { server: server_event_sender, client: client_event_sender, }, options, } } pub fn subscribe(&self) -> bmrng::RequestReceiver<ServerEvent, Packet> { self.try_subscribe() .expect("Already subscribed to engine_io_server::Server") } pub fn try_subscribe( &self, ) -> Result<bmrng::RequestReceiver<ServerEvent, Packet>, AlreadySubscribedError> { let mut state = self.state.lock().unwrap(); let old_state = std::mem::replace(&mut *state, ServerState::Subscribed); match old_state { ServerState::Subscribed => Err(AlreadySubscribedError), ServerState::Unsubscribed { socket_event_receiver, engine_event_receiver, } => { // First time calling subscribe, also start listening events from `Socket` instances self.subscribe_to_socket_events(socket_event_receiver); Ok(engine_event_receiver) } } // TODO: handle shutdown properly by receiving a shutdown signal // sending it to socket instances. } pub async fn close(&self) { // TODO: consider sending signals or dropping channels instead of closing them like this? // TODO: or drop the whole thing. The server, the sockets, everything. todo!(); // for socket in self.clients.iter() { // socket.value().close(true); // } } pub async fn close_socket(&self, connection_id: &str) { if let Some((_key, socket)) = self.clients.remove(connection_id) { // TODO: convert this to drop todo!(); // socket.close(true); } } // TODO: consider converting ack callbacks into optional async Results? // `connection_id` is an owned string just because of a Rust compiler issue. pub async fn send_packet_with_ack( &self, connection_id: String, packet: Packet, callback: Option<Callback>, ) -> Result<(), SendPacketError> { match self.clients.get(&connection_id) { Some(client) => Ok(client.send_packet(packet, None).await), None => Err(SendPacketError::UnknownConnectionId), } } pub async fn send_packet( &self, connection_id: String, packet: Packet, ) -> Result<(), SendPacketError> { match self.clients.get(&connection_id) { Some(client) => Ok(client.send_packet(packet, None).await), None => Err(SendPacketError::UnknownConnectionId), } } pub async fn handle_request( &self, context: RequestContext, body: Option<A::Body>, ) -> Result<A::Response, ServerError> { let context = Arc::new(context); let sid_ref = context.query.get("sid"); let sid = sid_ref.map(|s| s.to_owned()); self.verify_request(sid_ref, false, context.transport_kind, context.http_method) .await?; if let Some(sid) = sid { let client = self.get_client_or_error(&sid)?; let response = client.handle_polling_request(context.clone(), body).await?; Ok(response) } else { let (sid, response) = self.handshake(context, HandshakeData::Polling).await?; Ok(response) } } /// Akin to `onWebSocket` from engine.io js // TODO: handle errors, socket closure etc. pub async fn handle_upgrade(&self, context: RequestContext, socket: A::WsHandle) { let context = Arc::new(context); let sid_ref = context.query.get("sid"); let sid = sid_ref.map(|s| s.to_owned()); if let Some(sid) = sid { // TODO: don't panic let client = self.get_client_or_error(&sid).expect("TODO: fix this"); client.maybe_upgrade(context, todo!()); // TODO: implement this! // let client = // TODO: call socket.maybe_upgrade() } else { self.handshake(context, HandshakeData::WebSocket { socket }) .await; todo!(); } } pub async fn
( &self, sid: Option<&String>, upgrade: bool, transport_kind: TransportKind, http_method: HttpMethod, ) -> Result<(), ServerError> { if let Some(sid) = sid { let client = self.clients.get(sid); if let Some(client) = client { let client_transport_kind = client.get_transport_kind(); if!upgrade && Some(transport_kind)!= client_transport_kind { return Err(ServerError::BadRequest); } } else { return Err(ServerError::UnknownSid); } } else { if http_method!= HttpMethod::Get { return Err(ServerError::BadHandshakeMethod); } // FIXME: fix allow_request calls /*if let Some(validator) = &self.options.allow_request { // FIXME: pass some request parameters to this validator // to make it useful let valid = validator(); if!valid { return Err(ServerError::BadRequest); } }*/ } Ok(()) } /// Generate a new ID for a client. /// Note: This generates IDs in a different format from the original JS /// engine.io implementation, which uses a library called /// [base64id](https://www.npmjs.com/package/base64id) that doesn't seem /// to guarantee uniqueness. pub fn generate_id() -> String { Uuid::new_v4().to_hyphenated().to_string() } /// Returns the new client ID pub async fn handshake( &self, context: Arc<RequestContext>, data: HandshakeData<A::WsHandle>, ) -> Result<(String, A::Response), ServerError> { let sid = Self::generate_id(); let supports_binary =!context.query.contains_key("b64"); let jsonp =!supports_binary &&!context.query.contains_key("j"); let context = Arc::new(context.with_set_cookie(SetCookie::from_cookie_options( &self.options.cookie, sid.clone(), ))); let transport_create_data = match data { HandshakeData::Polling => TransportCreateData::Polling { jsonp }, HandshakeData::WebSocket { socket } => TransportCreateData::WebSocket { supports_binary, socket, }, }; let socket = Arc::new(Socket::new( sid.clone(), context.clone(), self.event_senders.client.clone(), transport_create_data, )); self.clients.insert(sid.clone(), socket.clone()); socket.open(&self.options).await; // TODO: send this initial packet in the handshake request response? // so we'd need to return it to the adapter if let Some(initial_message_packet) = self.options.initial_packet.clone() { socket.send_packet(initial_message_packet, None).await; } subscribe_socket_to_transport_events(socket).await; let response = { let client = self.get_client_or_error(&sid)?; match client.get_transport_or_fail()?.as_ref() { Transport::Polling(_) => Ok(client.handle_polling_request(context, None).await?), _ => Err(ServerError::BadRequest), } }; // Emit a "connection" event. This is an internal event that's used by socket_io let _ = self .event_senders .server .clone() .send(ServerEvent::Connection { connection_id: sid.clone(), }); response.map(|response| Ok((sid, response)))? } pub fn clients_count(&self) -> usize { self.clients.len() } pub fn get_client_or_error(&self, id: &str) -> Result<Arc<Socket<A>>, ServerError> { if let Some(client) = self.clients.get(id) { Ok(client.value().clone()) } else { Err(ServerError::UnknownSid) } } fn subscribe_to_socket_events(&self, client_event_receiver: mpsc::Receiver<SocketEvent>) { // TODO: listen for responder responses on fallible events let server_event_sender = self.event_senders.server.clone(); let clients = self.clients.clone(); tokio::spawn(async move { let mut receiver = client_event_receiver; while let Some(message) = receiver.recv().await { match message { SocketEvent::Close { socket_id, reason } => { clients.remove(&socket_id); let _ = server_event_sender.send(ServerEvent::Close { connection_id: socket_id, reason, }); } SocketEvent::Flush { socket_id } => { // Forward the Flush event to the external listener let _ = server_event_sender.send(ServerEvent::Flush { connection_id: socket_id, }); } SocketEvent::Drain { socket_id } => { // Forward the Drain event to the external listener let _ = server_event_sender.send(ServerEvent::Drain { connection_id: socket_id, }); } SocketEvent::Message { socket_id, context, data, } => { // Forward the Drain event to the external listener let _ = server_event_sender.send(ServerEvent::Message { connection_id: socket_id, context, data, }); } SocketEvent::Error { socket_id } => { let _ = server_event_sender.send(ServerEvent::Error { connection_id: socket_id, }); } _ => {} } } }); } fn subscribe_to_commands(&self) { // TODO: receive packet send requests using a MPSC listener? todo!(); } } #[derive(Debug)] pub enum HandshakeData<S> where S:'static, { Polling, WebSocket { socket: S }, } #[derive(Debug)] pub struct AlreadySubscribedError; impl std::fmt::Display for AlreadySubscribedError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "Already subscribed to this server, cannot subscribe again" ) } } impl std::error::Error for AlreadySubscribedError {}
verify_request
identifier_name
id.rs
pub fn bug(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match BUGS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("bug '{}' has no id yet", name), } } pub fn fish(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match FISH.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("fish '{}' has no id yet", name), } } pub fn fossil(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match FOSSILS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("fossil '{}' has no id yet", name), } } pub fn flower(name: impl AsRef<str>) -> usize
pub fn art(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match ART.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("art '{}' has no id yet", name), } } pub fn villager(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match VILLAGERS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("villager '{}' has no id yet", name), } } const BUGS: &[&str] = &[ "common butterfly", "yellow butterfly", "tiger butterfly", "peacock butterfly", "common bluebottle", "paper kite butterfly", "great purple emperor", "monarch butterfly", "emperor butterfly", "agrias butterfly", "rajah brooke's birdwing", "queen alexandra's birdwing", "moth", "atlas moth", "madagascan sunset moth", "long locust", "migratory locust", "rice grasshopper", "grasshopper", "cricket", "bell cricket", "mantis", "orchid mantis", "honeybee", "wasp", "brown cicada", "robust cicada", "giant cicada", "walker cicada", "evening cicada", "cicada shell", "red dragonfly", "darner dragonfly", "banded dragonfly", "damselfly", "firefly", "mole cricket", "pondskater", "diving beetle", "giant water bug", "stinkbug", "man-faced stink bug", "ladybug", "tiger beetle", "jewel beetle", "violin beetle", "citrus long-horned beetle", "rosalia batesi beetle", "blue weevil beetle", "dung beetle", "earth-boring dung beetle", "scarab beetle", "drone beetle", "goliath beetle", "saw stag", "miyama stag", "giant stag", "rainbow stag", "cyclommatus stag", "golden stag", "giraffe stag", "horned dynastid", "horned atlas", "horned elephant", "horned hercules", "walking stick", "walking leaf", "bagworm", "ant", "hermit crab", "wharf roach", "fly", "mosquito", "flea", "snail", "pill bug", "centipede", "spider", "tarantula", "scorpion", ]; const FISH: &[&str] = &[ "bitterling", "pale chub", "crucian carp", "dace", "carp", "koi", "goldfish", "pop-eyed goldfish", "ranchu goldfish", "killifish", "crawfish", "soft-shelled turtle", "snapping turtle", "tadpole", "frog", "freshwater goby", "loach", "catfish", "giant snakehead", "bluegill", "yellow perch", "black bass", "tilapia", "pike", "pond smelt", "sweetfish", "cherry salmon", "char", "golden trout", "stringfish", "salmon", "king salmon", "mitten crab", "guppy", "nibble fish", "angelfish", "betta", "neon tetra", "rainbowfish", "piranha", "arowana", "dorado", "gar", "arapaima", "saddled bichir", "sturgeon", "sea butterfly", "sea horse", "clown fish", "surgeonfish", "butterfly fish", "napoleonfish", "zebra turkeyfish", "blowfish", "puffer fish", "anchovy", "horse mackerel", "barred knifejaw", "sea bass", "red snapper", "dab", "olive flounder", "squid", "moray eel", "ribbon eel", "tuna", "blue marlin", "giant trevally", "mahi-mahi", "ocean sunfish", "ray", "saw shark", "hammerhead shark", "great white shark", "whale shark", "suckerfish", "football fish", "oarfish", "barreleye", "coelacanth", ]; const FOSSILS: &[&str] = &[ "acanthostega", "amber", "ammonite", "anomalocaris", "archaeopteryx", "australopith", "coprolite", "dinosaur track", "dunkleosteus", "eusthenopteron", "juramaia", "myllokunmingia", "shark-tooth pattern", "trilobite", "ankylo skull", "ankylo torso", "ankylo tail", "archelon skull", "archelon tail", "brachio skull", "brachio chest", "brachio pelvis", "brachio tail", "deinony torso", "deinony tail", "dimetrodon skull", "dimetrodon torso", "diplo skull", "diplo neck", "diplo chest", "diplo pelvis", "diplo tail", "diplo tail tip", "iguanodon skull", "iguanodon torso", "iguanodon tail", "mammoth skull", "mammoth torso", "megacero skull", "megacero torso", "megacero tail", "left megalo side", "right megalo side", "ophthalmo skull", "ophthalmo torso", "pachysaurus skull", "pachysaurus tail", "parasaur skull", "parasaur torso", "parasaur tail", "plesio skull", "plesio tail", "plesio body", "right ptera wing", "ptera body", "left ptera wing", "right quetzal wing", "quetzal torso", "left quetzal wing", "sabertooth skull", "sabertooth tail", "spino skull", "spino torso", "spino tail", "stego skull", "stego torso", "stego tail", "tricera skull", "tricera torso", "tricera tail", "t. rex skull", "t. rex torso", "t. rex tail", ]; const FLOWERS: &[&str] = &[ "red cosmos", "white cosmos", "yellow cosmos", "pink cosmos", "orange cosmos", "black cosmos", "white tulips", "red tulips", "yellow tulips", "pink tulips", "orange tulips", "purple tulips", "black tulips", "yellow pansies", "red pansies", "white pansies", "orange pansies", "purple pansies", "blue pansies", "white roses", "red roses", "yellow roses", "pink roses", "orange roses", "purple roses", "black roses", "blue roses", "gold roses", "white lilies", "red lilies", "yellow lilies", "pink lilies", "orange lilies", "black lilies", "white windflowers", "orange windflowers", "red windflowers", "blue windflowers", "pink windflowers", "purple windflowers", "white hyacinths", "yellow hyacinths", "red hyacinths", "pink hyacinths", "orange hyacinths", "blue hyacinths", "purple hyacinths", "white mums", "yellow mums", "red mums", "purple mums", "pink mums", "green mums", ]; const ART: &[&str] = &[ "robust statue", "rock-head statue", "beautiful statue", "valiant statue", "gallant statue", "mystic statue", "informative statue", "warrior statue", "tremendous statue", "ancient statue", "motherly statue", "familiar statue", "great statue", "quaint painting", "graceful painting", "famous painting", "detailed painting", "basic painting", "serene painting", "amazing painting", "solemn painting", "scary painting", "jolly painting", "wistful painting", "moving painting", "wild painting left half", "wild painting right half", "scenic painting", "academic painting", "common painting", "flowery painting", "twinkling painting", "nice painting", "moody painting", "glowing painting", "perfect painting", "mysterious painting", "calm painting", "proper painting", "sinking painting", "worthy painting", "warm painting", "dynamic painting", ]; const VILLAGERS: &[&str] = &[ "amelia", "pierce", "apollo", "frank", "buzz", "sterling", "keaton", "celia", "avery", "deli", "tammi", "monty", "nana", "flip", "simon", "elise", "shari", "anabelle", "annalisa", "snooty", "pango", "olaf", "antonio", "cyrano", "beardo", "chow", "megan", "groucho", "grizzly", "klaus", "ike", "curt", "tutu", "nate", "paula", "pinky", "charlise", "teddy", "ursala", "filbert", "sally", "cally", "marshal", "agent s", "blaire", "nibbles", "sylvana", "mint", "hazel", "tasha", "pecan", "peanut", "caroline", "ricky", "static", "sheldon", "poppy", "axel", "dizzy", "big top", "tia", "ellie", "margie", "eloise", "opal", "paolo", "cyd", "tucker", "bill", "pate", "pompom", "derwin", "drake", "gloria", "scoot", "joey", "mallary", "molly", "deena", "ketchup", "freckles", "quillson", "maelle", "miranda", "weber", "croque", "drift", "diva", "henry", "frobert", "cousteau", "jambette", "jeremiah", "huck", "lily", "puddles", "tad", "prince", "ribbot", "raddle", "camofrog", "gigi", "wart jr.", "cesar", "boyd", "hans", "rocket", "al", "boone", "violet", "louie", "peewee", "flurry", "clay", "graham", "hamlet", "hamphrey", "apple", "rodney", "soleil", "genji", "chrissy", "claude", "dotty", "gabi", "gaston", "doc", "bonbon", "carmen", "cole", "coco", "pippy", "francine", "tiffany", "bunnie", "mira", "o'hare", "snake", "hopkins", "ruby", "benedict", "broffina", "ava", "ken", "becky", "goose", "plucky", "knox", "egbert", "maddie", "bea", "cherry", "goldie", "daisy", "walker", "butch", "portia", "biskit", "marcel", "cookie", "bones", "shep", "lucky", "benjamin", "mac", "astrid", "carrie", "kitt", "mathilda", "marcie", "rooney", "walt", "sylvia", "olivia", "lolly", "purrl", "raymond", "rudy", "katt", "bob", "punchy", "kabuki", "kitty", "kiki", "ankha", "mitzi", "felicity", "merry", "monique", "kid cat", "rosie", "stinky", "tangy", "tom", "moe", "tabby", "canberra", "gonzo", "alice", "melba", "ozzie", "lyman", "sydney", "eugene", "yuka", "alli", "drago", "del", "alfonso", "gayle", "sly", "boots", "tipper", "angus", "coach", "stu", "naomi", "vic", "t-bone", "norma", "patty", "rodeo", "bud", "leopold", "rory", "elvis", "mott", "lionel", "rex", "chadder", "dora", "anicotti", "broccolo", "greta", "bree", "rod", "moose", "penelope", "limberg", "candi", "rizzo", "samson", "bettina", "bella", "spike", "tank", "renée", "merengue", "rhonda", "hornsby", "bertha", "bitty", "bubbles", "hippeux", "harry", "biff", "rocco", "marina", "octavian", "zucker", "reneigh", "annalise", "cleo", "colton", "peaches", "elmer", "victoria", "papi", "ed", "julian", "roscoe", "buck", "clyde", "winnie", "savannah", "cube", "friga", "hopper", "gwen", "tex", "boomer", "iggly", "puck", "roald", "aurora", "wade", "flo", "sprinkle", "bam", "deirdre", "lopez", "erik", "fauna", "beau", "bruce", "fuchsia", "diana", "zell", "baabara", "vesta", "dom", "eunice", "cashmere", "curlos", "muffy", "frita", "willow", "pietro", "stella", "timbra", "wendy", "boris", "pancetti", "chops", "rasher", "hugh", "kevin", "lucy", "truffles", "maggie", "agnes", "curly", "gala", "peggy", "cobb", "spork", "blanche", "flora", "cranston", "phil", "queenie", "julia", "sprocket", "phoebe", "gladys", "sandy", "kody", "stitches", "pudge", "bluebear", "cheri", "chester", "barold", "june", "olive", "murphy", "judy", "maple", "pekoe", "tammy", "poncho", "vladimir", "tybalt", "bianca", "rolf", "rowan", "claudia", "bangle", "leonardo", "jitters", "midge", "admiral", "anchovy", "piper", "jay", "jacob", "robin", "lucha", "sparro", "peck", "jacques", "twiggy", "freya", "fang", "audie", "whitney", "lobo", "skye", "chief", "dobie", "vivian", "wolfgang", "kyle", "chevre", "kidd", "gruff", "billy", "sherb", "pashmina", "velma", "nan", ];
{ let name = name.as_ref().to_lowercase(); match FLOWERS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("flower '{}' has no id yet", name), } }
identifier_body
id.rs
pub fn bug(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match BUGS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("bug '{}' has no id yet", name), } } pub fn fish(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match FISH.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("fish '{}' has no id yet", name), } } pub fn fossil(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match FOSSILS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("fossil '{}' has no id yet", name), } } pub fn flower(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match FLOWERS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("flower '{}' has no id yet", name), } } pub fn
(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match ART.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("art '{}' has no id yet", name), } } pub fn villager(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match VILLAGERS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("villager '{}' has no id yet", name), } } const BUGS: &[&str] = &[ "common butterfly", "yellow butterfly", "tiger butterfly", "peacock butterfly", "common bluebottle", "paper kite butterfly", "great purple emperor", "monarch butterfly", "emperor butterfly", "agrias butterfly", "rajah brooke's birdwing", "queen alexandra's birdwing", "moth", "atlas moth", "madagascan sunset moth", "long locust", "migratory locust", "rice grasshopper", "grasshopper", "cricket", "bell cricket", "mantis", "orchid mantis", "honeybee", "wasp", "brown cicada", "robust cicada", "giant cicada", "walker cicada", "evening cicada", "cicada shell", "red dragonfly", "darner dragonfly", "banded dragonfly", "damselfly", "firefly", "mole cricket", "pondskater", "diving beetle", "giant water bug", "stinkbug", "man-faced stink bug", "ladybug", "tiger beetle", "jewel beetle", "violin beetle", "citrus long-horned beetle", "rosalia batesi beetle", "blue weevil beetle", "dung beetle", "earth-boring dung beetle", "scarab beetle", "drone beetle", "goliath beetle", "saw stag", "miyama stag", "giant stag", "rainbow stag", "cyclommatus stag", "golden stag", "giraffe stag", "horned dynastid", "horned atlas", "horned elephant", "horned hercules", "walking stick", "walking leaf", "bagworm", "ant", "hermit crab", "wharf roach", "fly", "mosquito", "flea", "snail", "pill bug", "centipede", "spider", "tarantula", "scorpion", ]; const FISH: &[&str] = &[ "bitterling", "pale chub", "crucian carp", "dace", "carp", "koi", "goldfish", "pop-eyed goldfish", "ranchu goldfish", "killifish", "crawfish", "soft-shelled turtle", "snapping turtle", "tadpole", "frog", "freshwater goby", "loach", "catfish", "giant snakehead", "bluegill", "yellow perch", "black bass", "tilapia", "pike", "pond smelt", "sweetfish", "cherry salmon", "char", "golden trout", "stringfish", "salmon", "king salmon", "mitten crab", "guppy", "nibble fish", "angelfish", "betta", "neon tetra", "rainbowfish", "piranha", "arowana", "dorado", "gar", "arapaima", "saddled bichir", "sturgeon", "sea butterfly", "sea horse", "clown fish", "surgeonfish", "butterfly fish", "napoleonfish", "zebra turkeyfish", "blowfish", "puffer fish", "anchovy", "horse mackerel", "barred knifejaw", "sea bass", "red snapper", "dab", "olive flounder", "squid", "moray eel", "ribbon eel", "tuna", "blue marlin", "giant trevally", "mahi-mahi", "ocean sunfish", "ray", "saw shark", "hammerhead shark", "great white shark", "whale shark", "suckerfish", "football fish", "oarfish", "barreleye", "coelacanth", ]; const FOSSILS: &[&str] = &[ "acanthostega", "amber", "ammonite", "anomalocaris", "archaeopteryx", "australopith", "coprolite", "dinosaur track", "dunkleosteus", "eusthenopteron", "juramaia", "myllokunmingia", "shark-tooth pattern", "trilobite", "ankylo skull", "ankylo torso", "ankylo tail", "archelon skull", "archelon tail", "brachio skull", "brachio chest", "brachio pelvis", "brachio tail", "deinony torso", "deinony tail", "dimetrodon skull", "dimetrodon torso", "diplo skull", "diplo neck", "diplo chest", "diplo pelvis", "diplo tail", "diplo tail tip", "iguanodon skull", "iguanodon torso", "iguanodon tail", "mammoth skull", "mammoth torso", "megacero skull", "megacero torso", "megacero tail", "left megalo side", "right megalo side", "ophthalmo skull", "ophthalmo torso", "pachysaurus skull", "pachysaurus tail", "parasaur skull", "parasaur torso", "parasaur tail", "plesio skull", "plesio tail", "plesio body", "right ptera wing", "ptera body", "left ptera wing", "right quetzal wing", "quetzal torso", "left quetzal wing", "sabertooth skull", "sabertooth tail", "spino skull", "spino torso", "spino tail", "stego skull", "stego torso", "stego tail", "tricera skull", "tricera torso", "tricera tail", "t. rex skull", "t. rex torso", "t. rex tail", ]; const FLOWERS: &[&str] = &[ "red cosmos", "white cosmos", "yellow cosmos", "pink cosmos", "orange cosmos", "black cosmos", "white tulips", "red tulips", "yellow tulips", "pink tulips", "orange tulips", "purple tulips", "black tulips", "yellow pansies", "red pansies", "white pansies", "orange pansies", "purple pansies", "blue pansies", "white roses", "red roses", "yellow roses", "pink roses", "orange roses", "purple roses", "black roses", "blue roses", "gold roses", "white lilies", "red lilies", "yellow lilies", "pink lilies", "orange lilies", "black lilies", "white windflowers", "orange windflowers", "red windflowers", "blue windflowers", "pink windflowers", "purple windflowers", "white hyacinths", "yellow hyacinths", "red hyacinths", "pink hyacinths", "orange hyacinths", "blue hyacinths", "purple hyacinths", "white mums", "yellow mums", "red mums", "purple mums", "pink mums", "green mums", ]; const ART: &[&str] = &[ "robust statue", "rock-head statue", "beautiful statue", "valiant statue", "gallant statue", "mystic statue", "informative statue", "warrior statue", "tremendous statue", "ancient statue", "motherly statue", "familiar statue", "great statue", "quaint painting", "graceful painting", "famous painting", "detailed painting", "basic painting", "serene painting", "amazing painting", "solemn painting", "scary painting", "jolly painting", "wistful painting", "moving painting", "wild painting left half", "wild painting right half", "scenic painting", "academic painting", "common painting", "flowery painting", "twinkling painting", "nice painting", "moody painting", "glowing painting", "perfect painting", "mysterious painting", "calm painting", "proper painting", "sinking painting", "worthy painting", "warm painting", "dynamic painting", ]; const VILLAGERS: &[&str] = &[ "amelia", "pierce", "apollo", "frank", "buzz", "sterling", "keaton", "celia", "avery", "deli", "tammi", "monty", "nana", "flip", "simon", "elise", "shari", "anabelle", "annalisa", "snooty", "pango", "olaf", "antonio", "cyrano", "beardo", "chow", "megan", "groucho", "grizzly", "klaus", "ike", "curt", "tutu", "nate", "paula", "pinky", "charlise", "teddy", "ursala", "filbert", "sally", "cally", "marshal", "agent s", "blaire", "nibbles", "sylvana", "mint", "hazel", "tasha", "pecan", "peanut", "caroline", "ricky", "static", "sheldon", "poppy", "axel", "dizzy", "big top", "tia", "ellie", "margie", "eloise", "opal", "paolo", "cyd", "tucker", "bill", "pate", "pompom", "derwin", "drake", "gloria", "scoot", "joey", "mallary", "molly", "deena", "ketchup", "freckles", "quillson", "maelle", "miranda", "weber", "croque", "drift", "diva", "henry", "frobert", "cousteau", "jambette", "jeremiah", "huck", "lily", "puddles", "tad", "prince", "ribbot", "raddle", "camofrog", "gigi", "wart jr.", "cesar", "boyd", "hans", "rocket", "al", "boone", "violet", "louie", "peewee", "flurry", "clay", "graham", "hamlet", "hamphrey", "apple", "rodney", "soleil", "genji", "chrissy", "claude", "dotty", "gabi", "gaston", "doc", "bonbon", "carmen", "cole", "coco", "pippy", "francine", "tiffany", "bunnie", "mira", "o'hare", "snake", "hopkins", "ruby", "benedict", "broffina", "ava", "ken", "becky", "goose", "plucky", "knox", "egbert", "maddie", "bea", "cherry", "goldie", "daisy", "walker", "butch", "portia", "biskit", "marcel", "cookie", "bones", "shep", "lucky", "benjamin", "mac", "astrid", "carrie", "kitt", "mathilda", "marcie", "rooney", "walt", "sylvia", "olivia", "lolly", "purrl", "raymond", "rudy", "katt", "bob", "punchy", "kabuki", "kitty", "kiki", "ankha", "mitzi", "felicity", "merry", "monique", "kid cat", "rosie", "stinky", "tangy", "tom", "moe", "tabby", "canberra", "gonzo", "alice", "melba", "ozzie", "lyman", "sydney", "eugene", "yuka", "alli", "drago", "del", "alfonso", "gayle", "sly", "boots", "tipper", "angus", "coach", "stu", "naomi", "vic", "t-bone", "norma", "patty", "rodeo", "bud", "leopold", "rory", "elvis", "mott", "lionel", "rex", "chadder", "dora", "anicotti", "broccolo", "greta", "bree", "rod", "moose", "penelope", "limberg", "candi", "rizzo", "samson", "bettina", "bella", "spike", "tank", "renée", "merengue", "rhonda", "hornsby", "bertha", "bitty", "bubbles", "hippeux", "harry", "biff", "rocco", "marina", "octavian", "zucker", "reneigh", "annalise", "cleo", "colton", "peaches", "elmer", "victoria", "papi", "ed", "julian", "roscoe", "buck", "clyde", "winnie", "savannah", "cube", "friga", "hopper", "gwen", "tex", "boomer", "iggly", "puck", "roald", "aurora", "wade", "flo", "sprinkle", "bam", "deirdre", "lopez", "erik", "fauna", "beau", "bruce", "fuchsia", "diana", "zell", "baabara", "vesta", "dom", "eunice", "cashmere", "curlos", "muffy", "frita", "willow", "pietro", "stella", "timbra", "wendy", "boris", "pancetti", "chops", "rasher", "hugh", "kevin", "lucy", "truffles", "maggie", "agnes", "curly", "gala", "peggy", "cobb", "spork", "blanche", "flora", "cranston", "phil", "queenie", "julia", "sprocket", "phoebe", "gladys", "sandy", "kody", "stitches", "pudge", "bluebear", "cheri", "chester", "barold", "june", "olive", "murphy", "judy", "maple", "pekoe", "tammy", "poncho", "vladimir", "tybalt", "bianca", "rolf", "rowan", "claudia", "bangle", "leonardo", "jitters", "midge", "admiral", "anchovy", "piper", "jay", "jacob", "robin", "lucha", "sparro", "peck", "jacques", "twiggy", "freya", "fang", "audie", "whitney", "lobo", "skye", "chief", "dobie", "vivian", "wolfgang", "kyle", "chevre", "kidd", "gruff", "billy", "sherb", "pashmina", "velma", "nan", ];
art
identifier_name
id.rs
pub fn bug(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match BUGS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("bug '{}' has no id yet", name), } } pub fn fish(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match FISH.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("fish '{}' has no id yet", name), } } pub fn fossil(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match FOSSILS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("fossil '{}' has no id yet", name), } } pub fn flower(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match FLOWERS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("flower '{}' has no id yet", name), } } pub fn art(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match ART.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("art '{}' has no id yet", name), } } pub fn villager(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match VILLAGERS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("villager '{}' has no id yet", name), } } const BUGS: &[&str] = &[ "common butterfly", "yellow butterfly", "tiger butterfly", "peacock butterfly", "common bluebottle", "paper kite butterfly", "great purple emperor", "monarch butterfly", "emperor butterfly", "agrias butterfly", "rajah brooke's birdwing", "queen alexandra's birdwing", "moth", "atlas moth", "madagascan sunset moth", "long locust", "migratory locust", "rice grasshopper", "grasshopper", "cricket", "bell cricket", "mantis", "orchid mantis", "honeybee", "wasp", "brown cicada", "robust cicada", "giant cicada", "walker cicada", "evening cicada", "cicada shell", "red dragonfly", "darner dragonfly", "banded dragonfly", "damselfly", "firefly", "mole cricket", "pondskater", "diving beetle", "giant water bug", "stinkbug", "man-faced stink bug", "ladybug", "tiger beetle", "jewel beetle", "violin beetle", "citrus long-horned beetle", "rosalia batesi beetle", "blue weevil beetle", "dung beetle", "earth-boring dung beetle", "scarab beetle", "drone beetle", "goliath beetle", "saw stag", "miyama stag", "giant stag", "rainbow stag", "cyclommatus stag", "golden stag", "giraffe stag", "horned dynastid", "horned atlas", "horned elephant", "horned hercules", "walking stick", "walking leaf", "bagworm", "ant", "hermit crab", "wharf roach", "fly", "mosquito", "flea", "snail", "pill bug", "centipede", "spider", "tarantula", "scorpion", ]; const FISH: &[&str] = &[ "bitterling", "pale chub", "crucian carp", "dace", "carp", "koi", "goldfish", "pop-eyed goldfish", "ranchu goldfish", "killifish", "crawfish", "soft-shelled turtle", "snapping turtle", "tadpole", "frog", "freshwater goby", "loach", "catfish", "giant snakehead", "bluegill", "yellow perch", "black bass", "tilapia", "pike", "pond smelt", "sweetfish", "cherry salmon", "char", "golden trout", "stringfish", "salmon", "king salmon", "mitten crab", "guppy", "nibble fish", "angelfish", "betta", "neon tetra", "rainbowfish", "piranha", "arowana", "dorado", "gar", "arapaima", "saddled bichir", "sturgeon", "sea butterfly", "sea horse", "clown fish", "surgeonfish", "butterfly fish", "napoleonfish", "zebra turkeyfish", "blowfish", "puffer fish", "anchovy", "horse mackerel", "barred knifejaw", "sea bass", "red snapper", "dab", "olive flounder", "squid", "moray eel", "ribbon eel", "tuna", "blue marlin", "giant trevally", "mahi-mahi", "ocean sunfish", "ray", "saw shark", "hammerhead shark", "great white shark", "whale shark", "suckerfish", "football fish", "oarfish", "barreleye", "coelacanth", ]; const FOSSILS: &[&str] = &[ "acanthostega", "amber", "ammonite", "anomalocaris", "archaeopteryx", "australopith", "coprolite", "dinosaur track", "dunkleosteus", "eusthenopteron", "juramaia", "myllokunmingia", "shark-tooth pattern", "trilobite", "ankylo skull", "ankylo torso", "ankylo tail", "archelon skull", "archelon tail", "brachio skull", "brachio chest", "brachio pelvis", "brachio tail", "deinony torso", "deinony tail", "dimetrodon skull", "dimetrodon torso", "diplo skull", "diplo neck", "diplo chest", "diplo pelvis", "diplo tail", "diplo tail tip", "iguanodon skull", "iguanodon torso", "iguanodon tail", "mammoth skull", "mammoth torso", "megacero skull", "megacero torso", "megacero tail", "left megalo side", "right megalo side", "ophthalmo skull", "ophthalmo torso", "pachysaurus skull", "pachysaurus tail", "parasaur skull", "parasaur torso", "parasaur tail", "plesio skull", "plesio tail", "plesio body", "right ptera wing", "ptera body", "left ptera wing", "right quetzal wing", "quetzal torso", "left quetzal wing", "sabertooth skull", "sabertooth tail", "spino skull", "spino torso", "spino tail", "stego skull", "stego torso", "stego tail", "tricera skull", "tricera torso", "tricera tail", "t. rex skull", "t. rex torso", "t. rex tail", ]; const FLOWERS: &[&str] = &[ "red cosmos",
"black cosmos", "white tulips", "red tulips", "yellow tulips", "pink tulips", "orange tulips", "purple tulips", "black tulips", "yellow pansies", "red pansies", "white pansies", "orange pansies", "purple pansies", "blue pansies", "white roses", "red roses", "yellow roses", "pink roses", "orange roses", "purple roses", "black roses", "blue roses", "gold roses", "white lilies", "red lilies", "yellow lilies", "pink lilies", "orange lilies", "black lilies", "white windflowers", "orange windflowers", "red windflowers", "blue windflowers", "pink windflowers", "purple windflowers", "white hyacinths", "yellow hyacinths", "red hyacinths", "pink hyacinths", "orange hyacinths", "blue hyacinths", "purple hyacinths", "white mums", "yellow mums", "red mums", "purple mums", "pink mums", "green mums", ]; const ART: &[&str] = &[ "robust statue", "rock-head statue", "beautiful statue", "valiant statue", "gallant statue", "mystic statue", "informative statue", "warrior statue", "tremendous statue", "ancient statue", "motherly statue", "familiar statue", "great statue", "quaint painting", "graceful painting", "famous painting", "detailed painting", "basic painting", "serene painting", "amazing painting", "solemn painting", "scary painting", "jolly painting", "wistful painting", "moving painting", "wild painting left half", "wild painting right half", "scenic painting", "academic painting", "common painting", "flowery painting", "twinkling painting", "nice painting", "moody painting", "glowing painting", "perfect painting", "mysterious painting", "calm painting", "proper painting", "sinking painting", "worthy painting", "warm painting", "dynamic painting", ]; const VILLAGERS: &[&str] = &[ "amelia", "pierce", "apollo", "frank", "buzz", "sterling", "keaton", "celia", "avery", "deli", "tammi", "monty", "nana", "flip", "simon", "elise", "shari", "anabelle", "annalisa", "snooty", "pango", "olaf", "antonio", "cyrano", "beardo", "chow", "megan", "groucho", "grizzly", "klaus", "ike", "curt", "tutu", "nate", "paula", "pinky", "charlise", "teddy", "ursala", "filbert", "sally", "cally", "marshal", "agent s", "blaire", "nibbles", "sylvana", "mint", "hazel", "tasha", "pecan", "peanut", "caroline", "ricky", "static", "sheldon", "poppy", "axel", "dizzy", "big top", "tia", "ellie", "margie", "eloise", "opal", "paolo", "cyd", "tucker", "bill", "pate", "pompom", "derwin", "drake", "gloria", "scoot", "joey", "mallary", "molly", "deena", "ketchup", "freckles", "quillson", "maelle", "miranda", "weber", "croque", "drift", "diva", "henry", "frobert", "cousteau", "jambette", "jeremiah", "huck", "lily", "puddles", "tad", "prince", "ribbot", "raddle", "camofrog", "gigi", "wart jr.", "cesar", "boyd", "hans", "rocket", "al", "boone", "violet", "louie", "peewee", "flurry", "clay", "graham", "hamlet", "hamphrey", "apple", "rodney", "soleil", "genji", "chrissy", "claude", "dotty", "gabi", "gaston", "doc", "bonbon", "carmen", "cole", "coco", "pippy", "francine", "tiffany", "bunnie", "mira", "o'hare", "snake", "hopkins", "ruby", "benedict", "broffina", "ava", "ken", "becky", "goose", "plucky", "knox", "egbert", "maddie", "bea", "cherry", "goldie", "daisy", "walker", "butch", "portia", "biskit", "marcel", "cookie", "bones", "shep", "lucky", "benjamin", "mac", "astrid", "carrie", "kitt", "mathilda", "marcie", "rooney", "walt", "sylvia", "olivia", "lolly", "purrl", "raymond", "rudy", "katt", "bob", "punchy", "kabuki", "kitty", "kiki", "ankha", "mitzi", "felicity", "merry", "monique", "kid cat", "rosie", "stinky", "tangy", "tom", "moe", "tabby", "canberra", "gonzo", "alice", "melba", "ozzie", "lyman", "sydney", "eugene", "yuka", "alli", "drago", "del", "alfonso", "gayle", "sly", "boots", "tipper", "angus", "coach", "stu", "naomi", "vic", "t-bone", "norma", "patty", "rodeo", "bud", "leopold", "rory", "elvis", "mott", "lionel", "rex", "chadder", "dora", "anicotti", "broccolo", "greta", "bree", "rod", "moose", "penelope", "limberg", "candi", "rizzo", "samson", "bettina", "bella", "spike", "tank", "renée", "merengue", "rhonda", "hornsby", "bertha", "bitty", "bubbles", "hippeux", "harry", "biff", "rocco", "marina", "octavian", "zucker", "reneigh", "annalise", "cleo", "colton", "peaches", "elmer", "victoria", "papi", "ed", "julian", "roscoe", "buck", "clyde", "winnie", "savannah", "cube", "friga", "hopper", "gwen", "tex", "boomer", "iggly", "puck", "roald", "aurora", "wade", "flo", "sprinkle", "bam", "deirdre", "lopez", "erik", "fauna", "beau", "bruce", "fuchsia", "diana", "zell", "baabara", "vesta", "dom", "eunice", "cashmere", "curlos", "muffy", "frita", "willow", "pietro", "stella", "timbra", "wendy", "boris", "pancetti", "chops", "rasher", "hugh", "kevin", "lucy", "truffles", "maggie", "agnes", "curly", "gala", "peggy", "cobb", "spork", "blanche", "flora", "cranston", "phil", "queenie", "julia", "sprocket", "phoebe", "gladys", "sandy", "kody", "stitches", "pudge", "bluebear", "cheri", "chester", "barold", "june", "olive", "murphy", "judy", "maple", "pekoe", "tammy", "poncho", "vladimir", "tybalt", "bianca", "rolf", "rowan", "claudia", "bangle", "leonardo", "jitters", "midge", "admiral", "anchovy", "piper", "jay", "jacob", "robin", "lucha", "sparro", "peck", "jacques", "twiggy", "freya", "fang", "audie", "whitney", "lobo", "skye", "chief", "dobie", "vivian", "wolfgang", "kyle", "chevre", "kidd", "gruff", "billy", "sherb", "pashmina", "velma", "nan", ];
"white cosmos", "yellow cosmos", "pink cosmos", "orange cosmos",
random_line_split
main.rs
use std::f64::consts::PI; use clap::*; use gre::*; use noise::*; use rand::Rng; use svg::node::element::path::Data; use svg::node::element::*; #[derive(Parser)] #[clap()] pub struct Opts { #[clap(short, long, default_value = "image.svg")] file: String, #[clap(short, long, default_value = "100.0")] pub width: f64, #[clap(short, long, default_value = "150.0")] pub height: f64, #[clap(short, long, default_value = "5.0")] pub pad: f64, #[clap(short, long, default_value = "0.0")] pub seed: f64, #[clap(short, long, default_value = "0.0")] pub seed1: f64, #[clap(short, long, default_value = "0.0")] pub seed2: f64, #[clap(short, long, default_value = "0.0")] pub seed3: f64, } fn shake<R: Rng>( path: Vec<(f64, f64)>, scale: f64, rng: &mut R, ) -> Vec<(f64, f64)> { path .iter() .map(|&(x, y)| { let dx = rng.gen_range(-scale, scale); let dy = rng.gen_range(-scale, scale); (x + dx, y + dy) }) .collect() } fn eagle<R: Rng>( origin: (f64, f64), scale: f64, rotation: f64, xreverse: bool, rng: &mut R, ) -> Vec<Vec<(f64, f64)>> { let xmul = if xreverse { -1.0 } else { 1.0 }; let count = 2 + (scale * 3.0) as usize; let mut routes: Vec<Vec<(f64, f64)>> = Vec::new(); let shaking = scale * 0.1; // body let bodyw = 5.0; let bodyh = 1.5; let headcompression = rng.gen_range(0.1, 0.5); let headoff = rng.gen_range(0.1, 0.5); for i in 0..count { let yp = (i as f64 - (count - 1) as f64 * 0.5) / (count as f64); let ybase = bodyh * yp; let route = shake( path_subdivide_to_curve( vec![ (-rng.gen_range(0.4, 0.6) * bodyw, 1.5 * ybase), (-0.3 * bodyw, ybase), (0.2 * bodyw, ybase), (0.45 * bodyw, headcompression * ybase + headoff * bodyh), ], 1, 0.8, ), shaking, rng, ); routes.push(route); } let count = 2 + (scale * rng.gen_range(4.0, 6.0)) as usize; // wings let wingw = 1.4; let wingh = 8.0; let dx1 = rng.gen_range(-4.0, 4.0) * rng.gen_range(0.0, 1.0); let dx2 = if rng.gen_bool(0.8) { -dx1 } else { rng.gen_range(-3.0, 3.0) }; let spread1 = 1.0 + rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let spread2 = 1.0 + rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let offset1 = rng.gen_range(-1.0, 0.6) * rng.gen_range(0.0, 1.0); let offset2 = rng.gen_range(-1.0, 0.6) * rng.gen_range(0.0, 1.0); let interp = 0.5; let wing1m = 1.0 - rng.gen_range(0.0, 0.5) * rng.gen_range(0.0, 1.0); let wing2m = 1.0 - rng.gen_range(0.0, 0.5) * rng.gen_range(0.0, 1.0); let wing2up = rng.gen_bool(0.5); for i in 0..count { let xp = (i as f64 - (count - 1) as f64 * 0.5) / (count as f64); let xbase = wingw * xp; let wing1 = rng.gen_range(0.8, 1.1) * wing1m; let wing2 = rng.gen_range(0.8, 1.1) * wing2m * (if wing2up { -1.0 } else { 1.0 }); let route = shake( path_subdivide_to_curve( vec![ ( xbase * spread1 + dx1 + wingw * offset1, -wingh * 0.5 * wing1, ), (xbase + dx1 * interp, -wingh * 0.5 * interp * wing1), (xbase, 0.0), (xbase + dx2 * interp, wingh * 0.5 * interp * wing2), (xbase * spread2 + dx2 + wingw * offset2, wingh * 0.5 * wing2), ], 2, 0.8, ), shaking, rng, ); routes.push(route); } // scale, rotate & translate routes .iter() .map(|route| { route .iter() .map(|&p| { let p = p_r(p, rotation); (xmul * scale * p.0 + origin.0, scale * p.1 + origin.1) }) .collect() }) .collect() } fn art(opts: &Opts) -> Vec<Group> { let height = opts.height; let width = opts.width; let pad = opts.pad; let mut rng = rng_from_seed(opts.seed); let perlin = Perlin::new(); let mut passage = Passage::new(0.5, width, height); let passage_threshold = 5; let min_route = 2; let peakfactor = rng.gen_range(-0.001, 0.001) * rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let ynoisefactor = rng.gen_range(0.02, 0.1); let yincr = rng.gen_range(1.5, 3.0); let amp2 = rng.gen_range(1.0, 12.0); let precision = rng.gen_range(0.1, 0.3); let offsetstrategy = rng.gen_range(0, 5); let mut routes = Vec::new(); let mut cave_spawned = false; let cave_threshold = rng.gen_range(0.5, 0.9) * height; let mut cave_initial_pos = Vec::new(); let w = rng.gen_range(0.05, 0.2); let mut base_y = height * 5.0; let mut miny = height; let stopy = rng.gen_range(0.2, 0.5) * height; let mut height_map: Vec<f64> = Vec::new(); loop { if miny < stopy { break; } if miny < cave_threshold &&!cave_spawned { cave_spawned = true; let xfrom = (0.5 - w / 2.0) * width; let xto = (0.5 + w / 2.0) * width; let yamp = rng.gen_range(8.0, 24.0); let mut x = xfrom; let mut route = Vec::new(); loop { if x > xto { break; } let xi = (x / precision) as usize; let ybottom = height_map[xi].min(height - pad); let ytop = ybottom - yamp * (0.4 * perlin.get([opts.seed * 3.1 + 5.4, x * 0.01]).abs() + 0.6 * (1.0 - (((x - xfrom) / (xto - xfrom) - 0.5) * 2.0).powf(2.0))); height_map[xi] = ytop; cave_initial_pos.push((x, ytop)); if (route.len() / 2) % 2 == 0 { route.push((x, ybottom)); route.push((x, ytop)); } else { route.push((x, ytop)); route.push((x, ybottom)); } x += precision; } routes.push(route); } let mut route = Vec::new(); let mut x = pad; let mut was_outside = true; loop { if x > width - pad { break; } let xv = (4.0 - base_y / height) * (x - width / 2.); let amp = height * 0.05; let mut y = base_y; if offsetstrategy == 0 { y += amp * peakfactor * xv * xv; } y += -amp * perlin .get([ // xv * 0.005, y * 0.02, 77. + opts.seed / 7.3 + perlin.get([ // -opts.seed * 7.3, 8.3 + xv * 0.02, y * 0.1, ]), ]) .abs(); if offsetstrategy == 1 { y += amp * peakfactor * xv * xv; } y += amp2 * amp * perlin.get([ // 8.3 + xv * 0.01, 88.1 + y * ynoisefactor, opts.seed * 97.3, ]); if offsetstrategy == 2 { y += amp * peakfactor * xv * xv; } y += amp * perlin.get([ // opts.seed * 9.3 - 77., xv * 0.1, y * 0.5, ]) * perlin .get([ // xv * 0.02, 88.1 + y * 0.2, -opts.seed / 7.7, ]) .min(0.0); if offsetstrategy == 3 { y += amp * peakfactor * xv * xv; } y += 0.1 * amp * (1.0 - miny / height) * perlin.get([ // 66666. + opts.seed * 1.3, 88.3 + xv * 0.5, 88.1 + y * 0.5, ]); if offsetstrategy == 4 { y += amp * peakfactor * xv * xv; } if y < miny { miny = y; } let mut collides = false; let xi = (x / precision) as usize; if xi >= height_map.len() { height_map.push(y); } else { if y > height_map[xi] { collides = true; } else { height_map[xi] = y; } } let inside = !collides && pad < x && x < width - pad && pad < y && y < height - pad; if inside && passage.get((x, y)) < passage_threshold { if was_outside { if route.len() > min_route { routes.push(route); } route = Vec::new(); } was_outside = false; route.push((x, y)); passage.count((x, y)); } else { was_outside = true; } x += precision; } if route.len() > min_route { routes.push(route); } base_y -= yincr; } let radius = 6.0; passage.grow_passage(radius); rng.shuffle(&mut cave_initial_pos); let mut positions = Vec::new(); for i in 0..rng.gen_range(4, 12) { if i >= cave_initial_pos.len() - 1 { break; } let initial = cave_initial_pos[i]; let mut a = -PI / 2. + rng.gen_range(-1.0, 1.0) * rng.gen_range(0.5, 1.0); let mut p = initial; let amp = 3.0; let pad = pad * 2.; loop {
a += rng.gen_range(-0.5, 0.5) * rng.gen_range(0.0, 1.0); } } rng.shuffle(&mut positions); let disp = rng.gen_range(0.5, 3.0); let mut eagles = Vec::new(); for p in positions { if rng.gen_bool(0.2) { continue; } let scale = rng.gen_range(0.3, 0.5); let p = ( p.0 + disp * rng.gen_range(-1.0, 1.0), p.1 + disp * rng.gen_range(-1.0, 1.0), ); if p.0 < pad || p.0 > width - pad || p.1 < pad || p.1 > height - pad { continue; } eagles.push(eagle( p, scale, rng.gen_range(-1.0, 1.0), rng.gen_bool(0.5), &mut rng, )); } routes = vec![routes, eagles.concat()].concat(); for i in 0..10 { let d = i as f64 * 0.25; routes.push(vec![ (pad + d, pad + d), (pad + d, height - pad - d), (width - pad - d, height - pad - d), (width - pad - d, pad + d), (pad + d, pad + d), ]); } let color = "black"; let mut data = Data::new(); for route in routes.clone() { data = render_route(data, route); } let mut l = layer(color); l = l.add(base_path(color, 0.35, data)); vec![l] } fn main() { let opts: Opts = Opts::parse(); let groups = art(&opts); let mut document = base_document("white", opts.width, opts.height); for g in groups { document = document.add(g); } svg::save(opts.file, &document).unwrap(); } #[derive(Clone)] struct Passage { precision: f64, width: f64, height: f64, counters: Vec<usize>, } impl Passage { pub fn new(precision: f64, width: f64, height: f64) -> Self { let wi = (width / precision).ceil() as usize; let hi = (height / precision).ceil() as usize; let counters = vec![0; wi * hi]; Passage { precision, width, height, counters, } } fn index(self: &Self, (x, y): (f64, f64)) -> usize { let wi = (self.width / self.precision).ceil() as usize; let hi = (self.height / self.precision).ceil() as usize; let xi = ((x / self.precision).round() as usize).max(0).min(wi - 1); let yi = ((y / self.precision).round() as usize).max(0).min(hi - 1); yi * wi + xi } pub fn count(self: &mut Self, p: (f64, f64)) -> usize { let i = self.index(p); let v = self.counters[i] + 1; self.counters[i] = v; v } pub fn count_once(self: &mut Self, p: (f64, f64)) { let i = self.index(p); let v = self.counters[i]; if v == 0 { self.counters[i] = 1; } } pub fn get(self: &Self, p: (f64, f64)) -> usize { let i = self.index(p); self.counters[i] } pub fn grow_passage(self: &mut Self, radius: f64) { let precision = self.precision; let width = self.width; let height = self.height; let counters: Vec<usize> = self.counters.iter().cloned().collect(); let mut mask = Vec::new(); // TODO, in future for even better perf, I will rewrite this // working directly with index integers instead of having to use index() / count_once() let mut x = -radius; loop { if x >= radius { break; } let mut y = -radius; loop { if y >= radius { break; } if x * x + y * y < radius * radius { mask.push((x, y)); } y += precision; } x += precision; } let mut x = 0.0; loop { if x >= width { break; } let mut y = 0.0; loop { if y >= height { break; } let index = self.index((x, y)); if counters[index] > 0 { for &(dx, dy) in mask.iter() { self.count_once((x + dx, y + dy)); } } y += precision; } x += precision; } } } fn lerp_point(a: (f64, f64), b: (f64, f64), m: f64) -> (f64, f64) { (a.0 * (1. - m) + b.0 * m, a.1 * (1. - m) + b.1 * m) } fn path_subdivide_to_curve_it( path: Vec<(f64, f64)>, interpolation: f64, ) -> Vec<(f64, f64)> { let l = path.len(); if l < 3 { return path; } let mut route = Vec::new(); let mut first = path[0]; let mut last = path[l - 1]; let looped = euclidian_dist(first, last) < 0.1; if looped { first = lerp_point(path[1], first, interpolation); } route.push(first); for i in 1..(l - 1) { let p = path[i]; let p1 = lerp_point(path[i - 1], p, interpolation); let p2 = lerp_point(path[i + 1], p, interpolation); route.push(p1); route.push(p2); } if looped { last = lerp_point(path[l - 2], last, interpolation); } route.push(last); if looped { route.push(first); } route } fn path_subdivide_to_curve( path: Vec<(f64, f64)>, n: usize, interpolation: f64, ) -> Vec<(f64, f64)> { let mut route = path; for _i in 0..n { route = path_subdivide_to_curve_it(route, interpolation); } route }
if p.0 < pad || p.0 > width - pad || p.1 < pad || p.1 > height - pad { break; } p = (p.0 + amp * a.cos(), p.1 + amp * a.sin()); positions.push(p);
random_line_split
main.rs
use std::f64::consts::PI; use clap::*; use gre::*; use noise::*; use rand::Rng; use svg::node::element::path::Data; use svg::node::element::*; #[derive(Parser)] #[clap()] pub struct Opts { #[clap(short, long, default_value = "image.svg")] file: String, #[clap(short, long, default_value = "100.0")] pub width: f64, #[clap(short, long, default_value = "150.0")] pub height: f64, #[clap(short, long, default_value = "5.0")] pub pad: f64, #[clap(short, long, default_value = "0.0")] pub seed: f64, #[clap(short, long, default_value = "0.0")] pub seed1: f64, #[clap(short, long, default_value = "0.0")] pub seed2: f64, #[clap(short, long, default_value = "0.0")] pub seed3: f64, } fn shake<R: Rng>( path: Vec<(f64, f64)>, scale: f64, rng: &mut R, ) -> Vec<(f64, f64)>
fn eagle<R: Rng>( origin: (f64, f64), scale: f64, rotation: f64, xreverse: bool, rng: &mut R, ) -> Vec<Vec<(f64, f64)>> { let xmul = if xreverse { -1.0 } else { 1.0 }; let count = 2 + (scale * 3.0) as usize; let mut routes: Vec<Vec<(f64, f64)>> = Vec::new(); let shaking = scale * 0.1; // body let bodyw = 5.0; let bodyh = 1.5; let headcompression = rng.gen_range(0.1, 0.5); let headoff = rng.gen_range(0.1, 0.5); for i in 0..count { let yp = (i as f64 - (count - 1) as f64 * 0.5) / (count as f64); let ybase = bodyh * yp; let route = shake( path_subdivide_to_curve( vec![ (-rng.gen_range(0.4, 0.6) * bodyw, 1.5 * ybase), (-0.3 * bodyw, ybase), (0.2 * bodyw, ybase), (0.45 * bodyw, headcompression * ybase + headoff * bodyh), ], 1, 0.8, ), shaking, rng, ); routes.push(route); } let count = 2 + (scale * rng.gen_range(4.0, 6.0)) as usize; // wings let wingw = 1.4; let wingh = 8.0; let dx1 = rng.gen_range(-4.0, 4.0) * rng.gen_range(0.0, 1.0); let dx2 = if rng.gen_bool(0.8) { -dx1 } else { rng.gen_range(-3.0, 3.0) }; let spread1 = 1.0 + rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let spread2 = 1.0 + rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let offset1 = rng.gen_range(-1.0, 0.6) * rng.gen_range(0.0, 1.0); let offset2 = rng.gen_range(-1.0, 0.6) * rng.gen_range(0.0, 1.0); let interp = 0.5; let wing1m = 1.0 - rng.gen_range(0.0, 0.5) * rng.gen_range(0.0, 1.0); let wing2m = 1.0 - rng.gen_range(0.0, 0.5) * rng.gen_range(0.0, 1.0); let wing2up = rng.gen_bool(0.5); for i in 0..count { let xp = (i as f64 - (count - 1) as f64 * 0.5) / (count as f64); let xbase = wingw * xp; let wing1 = rng.gen_range(0.8, 1.1) * wing1m; let wing2 = rng.gen_range(0.8, 1.1) * wing2m * (if wing2up { -1.0 } else { 1.0 }); let route = shake( path_subdivide_to_curve( vec![ ( xbase * spread1 + dx1 + wingw * offset1, -wingh * 0.5 * wing1, ), (xbase + dx1 * interp, -wingh * 0.5 * interp * wing1), (xbase, 0.0), (xbase + dx2 * interp, wingh * 0.5 * interp * wing2), (xbase * spread2 + dx2 + wingw * offset2, wingh * 0.5 * wing2), ], 2, 0.8, ), shaking, rng, ); routes.push(route); } // scale, rotate & translate routes .iter() .map(|route| { route .iter() .map(|&p| { let p = p_r(p, rotation); (xmul * scale * p.0 + origin.0, scale * p.1 + origin.1) }) .collect() }) .collect() } fn art(opts: &Opts) -> Vec<Group> { let height = opts.height; let width = opts.width; let pad = opts.pad; let mut rng = rng_from_seed(opts.seed); let perlin = Perlin::new(); let mut passage = Passage::new(0.5, width, height); let passage_threshold = 5; let min_route = 2; let peakfactor = rng.gen_range(-0.001, 0.001) * rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let ynoisefactor = rng.gen_range(0.02, 0.1); let yincr = rng.gen_range(1.5, 3.0); let amp2 = rng.gen_range(1.0, 12.0); let precision = rng.gen_range(0.1, 0.3); let offsetstrategy = rng.gen_range(0, 5); let mut routes = Vec::new(); let mut cave_spawned = false; let cave_threshold = rng.gen_range(0.5, 0.9) * height; let mut cave_initial_pos = Vec::new(); let w = rng.gen_range(0.05, 0.2); let mut base_y = height * 5.0; let mut miny = height; let stopy = rng.gen_range(0.2, 0.5) * height; let mut height_map: Vec<f64> = Vec::new(); loop { if miny < stopy { break; } if miny < cave_threshold &&!cave_spawned { cave_spawned = true; let xfrom = (0.5 - w / 2.0) * width; let xto = (0.5 + w / 2.0) * width; let yamp = rng.gen_range(8.0, 24.0); let mut x = xfrom; let mut route = Vec::new(); loop { if x > xto { break; } let xi = (x / precision) as usize; let ybottom = height_map[xi].min(height - pad); let ytop = ybottom - yamp * (0.4 * perlin.get([opts.seed * 3.1 + 5.4, x * 0.01]).abs() + 0.6 * (1.0 - (((x - xfrom) / (xto - xfrom) - 0.5) * 2.0).powf(2.0))); height_map[xi] = ytop; cave_initial_pos.push((x, ytop)); if (route.len() / 2) % 2 == 0 { route.push((x, ybottom)); route.push((x, ytop)); } else { route.push((x, ytop)); route.push((x, ybottom)); } x += precision; } routes.push(route); } let mut route = Vec::new(); let mut x = pad; let mut was_outside = true; loop { if x > width - pad { break; } let xv = (4.0 - base_y / height) * (x - width / 2.); let amp = height * 0.05; let mut y = base_y; if offsetstrategy == 0 { y += amp * peakfactor * xv * xv; } y += -amp * perlin .get([ // xv * 0.005, y * 0.02, 77. + opts.seed / 7.3 + perlin.get([ // -opts.seed * 7.3, 8.3 + xv * 0.02, y * 0.1, ]), ]) .abs(); if offsetstrategy == 1 { y += amp * peakfactor * xv * xv; } y += amp2 * amp * perlin.get([ // 8.3 + xv * 0.01, 88.1 + y * ynoisefactor, opts.seed * 97.3, ]); if offsetstrategy == 2 { y += amp * peakfactor * xv * xv; } y += amp * perlin.get([ // opts.seed * 9.3 - 77., xv * 0.1, y * 0.5, ]) * perlin .get([ // xv * 0.02, 88.1 + y * 0.2, -opts.seed / 7.7, ]) .min(0.0); if offsetstrategy == 3 { y += amp * peakfactor * xv * xv; } y += 0.1 * amp * (1.0 - miny / height) * perlin.get([ // 66666. + opts.seed * 1.3, 88.3 + xv * 0.5, 88.1 + y * 0.5, ]); if offsetstrategy == 4 { y += amp * peakfactor * xv * xv; } if y < miny { miny = y; } let mut collides = false; let xi = (x / precision) as usize; if xi >= height_map.len() { height_map.push(y); } else { if y > height_map[xi] { collides = true; } else { height_map[xi] = y; } } let inside = !collides && pad < x && x < width - pad && pad < y && y < height - pad; if inside && passage.get((x, y)) < passage_threshold { if was_outside { if route.len() > min_route { routes.push(route); } route = Vec::new(); } was_outside = false; route.push((x, y)); passage.count((x, y)); } else { was_outside = true; } x += precision; } if route.len() > min_route { routes.push(route); } base_y -= yincr; } let radius = 6.0; passage.grow_passage(radius); rng.shuffle(&mut cave_initial_pos); let mut positions = Vec::new(); for i in 0..rng.gen_range(4, 12) { if i >= cave_initial_pos.len() - 1 { break; } let initial = cave_initial_pos[i]; let mut a = -PI / 2. + rng.gen_range(-1.0, 1.0) * rng.gen_range(0.5, 1.0); let mut p = initial; let amp = 3.0; let pad = pad * 2.; loop { if p.0 < pad || p.0 > width - pad || p.1 < pad || p.1 > height - pad { break; } p = (p.0 + amp * a.cos(), p.1 + amp * a.sin()); positions.push(p); a += rng.gen_range(-0.5, 0.5) * rng.gen_range(0.0, 1.0); } } rng.shuffle(&mut positions); let disp = rng.gen_range(0.5, 3.0); let mut eagles = Vec::new(); for p in positions { if rng.gen_bool(0.2) { continue; } let scale = rng.gen_range(0.3, 0.5); let p = ( p.0 + disp * rng.gen_range(-1.0, 1.0), p.1 + disp * rng.gen_range(-1.0, 1.0), ); if p.0 < pad || p.0 > width - pad || p.1 < pad || p.1 > height - pad { continue; } eagles.push(eagle( p, scale, rng.gen_range(-1.0, 1.0), rng.gen_bool(0.5), &mut rng, )); } routes = vec![routes, eagles.concat()].concat(); for i in 0..10 { let d = i as f64 * 0.25; routes.push(vec![ (pad + d, pad + d), (pad + d, height - pad - d), (width - pad - d, height - pad - d), (width - pad - d, pad + d), (pad + d, pad + d), ]); } let color = "black"; let mut data = Data::new(); for route in routes.clone() { data = render_route(data, route); } let mut l = layer(color); l = l.add(base_path(color, 0.35, data)); vec![l] } fn main() { let opts: Opts = Opts::parse(); let groups = art(&opts); let mut document = base_document("white", opts.width, opts.height); for g in groups { document = document.add(g); } svg::save(opts.file, &document).unwrap(); } #[derive(Clone)] struct Passage { precision: f64, width: f64, height: f64, counters: Vec<usize>, } impl Passage { pub fn new(precision: f64, width: f64, height: f64) -> Self { let wi = (width / precision).ceil() as usize; let hi = (height / precision).ceil() as usize; let counters = vec![0; wi * hi]; Passage { precision, width, height, counters, } } fn index(self: &Self, (x, y): (f64, f64)) -> usize { let wi = (self.width / self.precision).ceil() as usize; let hi = (self.height / self.precision).ceil() as usize; let xi = ((x / self.precision).round() as usize).max(0).min(wi - 1); let yi = ((y / self.precision).round() as usize).max(0).min(hi - 1); yi * wi + xi } pub fn count(self: &mut Self, p: (f64, f64)) -> usize { let i = self.index(p); let v = self.counters[i] + 1; self.counters[i] = v; v } pub fn count_once(self: &mut Self, p: (f64, f64)) { let i = self.index(p); let v = self.counters[i]; if v == 0 { self.counters[i] = 1; } } pub fn get(self: &Self, p: (f64, f64)) -> usize { let i = self.index(p); self.counters[i] } pub fn grow_passage(self: &mut Self, radius: f64) { let precision = self.precision; let width = self.width; let height = self.height; let counters: Vec<usize> = self.counters.iter().cloned().collect(); let mut mask = Vec::new(); // TODO, in future for even better perf, I will rewrite this // working directly with index integers instead of having to use index() / count_once() let mut x = -radius; loop { if x >= radius { break; } let mut y = -radius; loop { if y >= radius { break; } if x * x + y * y < radius * radius { mask.push((x, y)); } y += precision; } x += precision; } let mut x = 0.0; loop { if x >= width { break; } let mut y = 0.0; loop { if y >= height { break; } let index = self.index((x, y)); if counters[index] > 0 { for &(dx, dy) in mask.iter() { self.count_once((x + dx, y + dy)); } } y += precision; } x += precision; } } } fn lerp_point(a: (f64, f64), b: (f64, f64), m: f64) -> (f64, f64) { (a.0 * (1. - m) + b.0 * m, a.1 * (1. - m) + b.1 * m) } fn path_subdivide_to_curve_it( path: Vec<(f64, f64)>, interpolation: f64, ) -> Vec<(f64, f64)> { let l = path.len(); if l < 3 { return path; } let mut route = Vec::new(); let mut first = path[0]; let mut last = path[l - 1]; let looped = euclidian_dist(first, last) < 0.1; if looped { first = lerp_point(path[1], first, interpolation); } route.push(first); for i in 1..(l - 1) { let p = path[i]; let p1 = lerp_point(path[i - 1], p, interpolation); let p2 = lerp_point(path[i + 1], p, interpolation); route.push(p1); route.push(p2); } if looped { last = lerp_point(path[l - 2], last, interpolation); } route.push(last); if looped { route.push(first); } route } fn path_subdivide_to_curve( path: Vec<(f64, f64)>, n: usize, interpolation: f64, ) -> Vec<(f64, f64)> { let mut route = path; for _i in 0..n { route = path_subdivide_to_curve_it(route, interpolation); } route }
{ path .iter() .map(|&(x, y)| { let dx = rng.gen_range(-scale, scale); let dy = rng.gen_range(-scale, scale); (x + dx, y + dy) }) .collect() }
identifier_body
main.rs
use std::f64::consts::PI; use clap::*; use gre::*; use noise::*; use rand::Rng; use svg::node::element::path::Data; use svg::node::element::*; #[derive(Parser)] #[clap()] pub struct
{ #[clap(short, long, default_value = "image.svg")] file: String, #[clap(short, long, default_value = "100.0")] pub width: f64, #[clap(short, long, default_value = "150.0")] pub height: f64, #[clap(short, long, default_value = "5.0")] pub pad: f64, #[clap(short, long, default_value = "0.0")] pub seed: f64, #[clap(short, long, default_value = "0.0")] pub seed1: f64, #[clap(short, long, default_value = "0.0")] pub seed2: f64, #[clap(short, long, default_value = "0.0")] pub seed3: f64, } fn shake<R: Rng>( path: Vec<(f64, f64)>, scale: f64, rng: &mut R, ) -> Vec<(f64, f64)> { path .iter() .map(|&(x, y)| { let dx = rng.gen_range(-scale, scale); let dy = rng.gen_range(-scale, scale); (x + dx, y + dy) }) .collect() } fn eagle<R: Rng>( origin: (f64, f64), scale: f64, rotation: f64, xreverse: bool, rng: &mut R, ) -> Vec<Vec<(f64, f64)>> { let xmul = if xreverse { -1.0 } else { 1.0 }; let count = 2 + (scale * 3.0) as usize; let mut routes: Vec<Vec<(f64, f64)>> = Vec::new(); let shaking = scale * 0.1; // body let bodyw = 5.0; let bodyh = 1.5; let headcompression = rng.gen_range(0.1, 0.5); let headoff = rng.gen_range(0.1, 0.5); for i in 0..count { let yp = (i as f64 - (count - 1) as f64 * 0.5) / (count as f64); let ybase = bodyh * yp; let route = shake( path_subdivide_to_curve( vec![ (-rng.gen_range(0.4, 0.6) * bodyw, 1.5 * ybase), (-0.3 * bodyw, ybase), (0.2 * bodyw, ybase), (0.45 * bodyw, headcompression * ybase + headoff * bodyh), ], 1, 0.8, ), shaking, rng, ); routes.push(route); } let count = 2 + (scale * rng.gen_range(4.0, 6.0)) as usize; // wings let wingw = 1.4; let wingh = 8.0; let dx1 = rng.gen_range(-4.0, 4.0) * rng.gen_range(0.0, 1.0); let dx2 = if rng.gen_bool(0.8) { -dx1 } else { rng.gen_range(-3.0, 3.0) }; let spread1 = 1.0 + rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let spread2 = 1.0 + rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let offset1 = rng.gen_range(-1.0, 0.6) * rng.gen_range(0.0, 1.0); let offset2 = rng.gen_range(-1.0, 0.6) * rng.gen_range(0.0, 1.0); let interp = 0.5; let wing1m = 1.0 - rng.gen_range(0.0, 0.5) * rng.gen_range(0.0, 1.0); let wing2m = 1.0 - rng.gen_range(0.0, 0.5) * rng.gen_range(0.0, 1.0); let wing2up = rng.gen_bool(0.5); for i in 0..count { let xp = (i as f64 - (count - 1) as f64 * 0.5) / (count as f64); let xbase = wingw * xp; let wing1 = rng.gen_range(0.8, 1.1) * wing1m; let wing2 = rng.gen_range(0.8, 1.1) * wing2m * (if wing2up { -1.0 } else { 1.0 }); let route = shake( path_subdivide_to_curve( vec![ ( xbase * spread1 + dx1 + wingw * offset1, -wingh * 0.5 * wing1, ), (xbase + dx1 * interp, -wingh * 0.5 * interp * wing1), (xbase, 0.0), (xbase + dx2 * interp, wingh * 0.5 * interp * wing2), (xbase * spread2 + dx2 + wingw * offset2, wingh * 0.5 * wing2), ], 2, 0.8, ), shaking, rng, ); routes.push(route); } // scale, rotate & translate routes .iter() .map(|route| { route .iter() .map(|&p| { let p = p_r(p, rotation); (xmul * scale * p.0 + origin.0, scale * p.1 + origin.1) }) .collect() }) .collect() } fn art(opts: &Opts) -> Vec<Group> { let height = opts.height; let width = opts.width; let pad = opts.pad; let mut rng = rng_from_seed(opts.seed); let perlin = Perlin::new(); let mut passage = Passage::new(0.5, width, height); let passage_threshold = 5; let min_route = 2; let peakfactor = rng.gen_range(-0.001, 0.001) * rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let ynoisefactor = rng.gen_range(0.02, 0.1); let yincr = rng.gen_range(1.5, 3.0); let amp2 = rng.gen_range(1.0, 12.0); let precision = rng.gen_range(0.1, 0.3); let offsetstrategy = rng.gen_range(0, 5); let mut routes = Vec::new(); let mut cave_spawned = false; let cave_threshold = rng.gen_range(0.5, 0.9) * height; let mut cave_initial_pos = Vec::new(); let w = rng.gen_range(0.05, 0.2); let mut base_y = height * 5.0; let mut miny = height; let stopy = rng.gen_range(0.2, 0.5) * height; let mut height_map: Vec<f64> = Vec::new(); loop { if miny < stopy { break; } if miny < cave_threshold &&!cave_spawned { cave_spawned = true; let xfrom = (0.5 - w / 2.0) * width; let xto = (0.5 + w / 2.0) * width; let yamp = rng.gen_range(8.0, 24.0); let mut x = xfrom; let mut route = Vec::new(); loop { if x > xto { break; } let xi = (x / precision) as usize; let ybottom = height_map[xi].min(height - pad); let ytop = ybottom - yamp * (0.4 * perlin.get([opts.seed * 3.1 + 5.4, x * 0.01]).abs() + 0.6 * (1.0 - (((x - xfrom) / (xto - xfrom) - 0.5) * 2.0).powf(2.0))); height_map[xi] = ytop; cave_initial_pos.push((x, ytop)); if (route.len() / 2) % 2 == 0 { route.push((x, ybottom)); route.push((x, ytop)); } else { route.push((x, ytop)); route.push((x, ybottom)); } x += precision; } routes.push(route); } let mut route = Vec::new(); let mut x = pad; let mut was_outside = true; loop { if x > width - pad { break; } let xv = (4.0 - base_y / height) * (x - width / 2.); let amp = height * 0.05; let mut y = base_y; if offsetstrategy == 0 { y += amp * peakfactor * xv * xv; } y += -amp * perlin .get([ // xv * 0.005, y * 0.02, 77. + opts.seed / 7.3 + perlin.get([ // -opts.seed * 7.3, 8.3 + xv * 0.02, y * 0.1, ]), ]) .abs(); if offsetstrategy == 1 { y += amp * peakfactor * xv * xv; } y += amp2 * amp * perlin.get([ // 8.3 + xv * 0.01, 88.1 + y * ynoisefactor, opts.seed * 97.3, ]); if offsetstrategy == 2 { y += amp * peakfactor * xv * xv; } y += amp * perlin.get([ // opts.seed * 9.3 - 77., xv * 0.1, y * 0.5, ]) * perlin .get([ // xv * 0.02, 88.1 + y * 0.2, -opts.seed / 7.7, ]) .min(0.0); if offsetstrategy == 3 { y += amp * peakfactor * xv * xv; } y += 0.1 * amp * (1.0 - miny / height) * perlin.get([ // 66666. + opts.seed * 1.3, 88.3 + xv * 0.5, 88.1 + y * 0.5, ]); if offsetstrategy == 4 { y += amp * peakfactor * xv * xv; } if y < miny { miny = y; } let mut collides = false; let xi = (x / precision) as usize; if xi >= height_map.len() { height_map.push(y); } else { if y > height_map[xi] { collides = true; } else { height_map[xi] = y; } } let inside = !collides && pad < x && x < width - pad && pad < y && y < height - pad; if inside && passage.get((x, y)) < passage_threshold { if was_outside { if route.len() > min_route { routes.push(route); } route = Vec::new(); } was_outside = false; route.push((x, y)); passage.count((x, y)); } else { was_outside = true; } x += precision; } if route.len() > min_route { routes.push(route); } base_y -= yincr; } let radius = 6.0; passage.grow_passage(radius); rng.shuffle(&mut cave_initial_pos); let mut positions = Vec::new(); for i in 0..rng.gen_range(4, 12) { if i >= cave_initial_pos.len() - 1 { break; } let initial = cave_initial_pos[i]; let mut a = -PI / 2. + rng.gen_range(-1.0, 1.0) * rng.gen_range(0.5, 1.0); let mut p = initial; let amp = 3.0; let pad = pad * 2.; loop { if p.0 < pad || p.0 > width - pad || p.1 < pad || p.1 > height - pad { break; } p = (p.0 + amp * a.cos(), p.1 + amp * a.sin()); positions.push(p); a += rng.gen_range(-0.5, 0.5) * rng.gen_range(0.0, 1.0); } } rng.shuffle(&mut positions); let disp = rng.gen_range(0.5, 3.0); let mut eagles = Vec::new(); for p in positions { if rng.gen_bool(0.2) { continue; } let scale = rng.gen_range(0.3, 0.5); let p = ( p.0 + disp * rng.gen_range(-1.0, 1.0), p.1 + disp * rng.gen_range(-1.0, 1.0), ); if p.0 < pad || p.0 > width - pad || p.1 < pad || p.1 > height - pad { continue; } eagles.push(eagle( p, scale, rng.gen_range(-1.0, 1.0), rng.gen_bool(0.5), &mut rng, )); } routes = vec![routes, eagles.concat()].concat(); for i in 0..10 { let d = i as f64 * 0.25; routes.push(vec![ (pad + d, pad + d), (pad + d, height - pad - d), (width - pad - d, height - pad - d), (width - pad - d, pad + d), (pad + d, pad + d), ]); } let color = "black"; let mut data = Data::new(); for route in routes.clone() { data = render_route(data, route); } let mut l = layer(color); l = l.add(base_path(color, 0.35, data)); vec![l] } fn main() { let opts: Opts = Opts::parse(); let groups = art(&opts); let mut document = base_document("white", opts.width, opts.height); for g in groups { document = document.add(g); } svg::save(opts.file, &document).unwrap(); } #[derive(Clone)] struct Passage { precision: f64, width: f64, height: f64, counters: Vec<usize>, } impl Passage { pub fn new(precision: f64, width: f64, height: f64) -> Self { let wi = (width / precision).ceil() as usize; let hi = (height / precision).ceil() as usize; let counters = vec![0; wi * hi]; Passage { precision, width, height, counters, } } fn index(self: &Self, (x, y): (f64, f64)) -> usize { let wi = (self.width / self.precision).ceil() as usize; let hi = (self.height / self.precision).ceil() as usize; let xi = ((x / self.precision).round() as usize).max(0).min(wi - 1); let yi = ((y / self.precision).round() as usize).max(0).min(hi - 1); yi * wi + xi } pub fn count(self: &mut Self, p: (f64, f64)) -> usize { let i = self.index(p); let v = self.counters[i] + 1; self.counters[i] = v; v } pub fn count_once(self: &mut Self, p: (f64, f64)) { let i = self.index(p); let v = self.counters[i]; if v == 0 { self.counters[i] = 1; } } pub fn get(self: &Self, p: (f64, f64)) -> usize { let i = self.index(p); self.counters[i] } pub fn grow_passage(self: &mut Self, radius: f64) { let precision = self.precision; let width = self.width; let height = self.height; let counters: Vec<usize> = self.counters.iter().cloned().collect(); let mut mask = Vec::new(); // TODO, in future for even better perf, I will rewrite this // working directly with index integers instead of having to use index() / count_once() let mut x = -radius; loop { if x >= radius { break; } let mut y = -radius; loop { if y >= radius { break; } if x * x + y * y < radius * radius { mask.push((x, y)); } y += precision; } x += precision; } let mut x = 0.0; loop { if x >= width { break; } let mut y = 0.0; loop { if y >= height { break; } let index = self.index((x, y)); if counters[index] > 0 { for &(dx, dy) in mask.iter() { self.count_once((x + dx, y + dy)); } } y += precision; } x += precision; } } } fn lerp_point(a: (f64, f64), b: (f64, f64), m: f64) -> (f64, f64) { (a.0 * (1. - m) + b.0 * m, a.1 * (1. - m) + b.1 * m) } fn path_subdivide_to_curve_it( path: Vec<(f64, f64)>, interpolation: f64, ) -> Vec<(f64, f64)> { let l = path.len(); if l < 3 { return path; } let mut route = Vec::new(); let mut first = path[0]; let mut last = path[l - 1]; let looped = euclidian_dist(first, last) < 0.1; if looped { first = lerp_point(path[1], first, interpolation); } route.push(first); for i in 1..(l - 1) { let p = path[i]; let p1 = lerp_point(path[i - 1], p, interpolation); let p2 = lerp_point(path[i + 1], p, interpolation); route.push(p1); route.push(p2); } if looped { last = lerp_point(path[l - 2], last, interpolation); } route.push(last); if looped { route.push(first); } route } fn path_subdivide_to_curve( path: Vec<(f64, f64)>, n: usize, interpolation: f64, ) -> Vec<(f64, f64)> { let mut route = path; for _i in 0..n { route = path_subdivide_to_curve_it(route, interpolation); } route }
Opts
identifier_name
main.rs
use std::f64::consts::PI; use clap::*; use gre::*; use noise::*; use rand::Rng; use svg::node::element::path::Data; use svg::node::element::*; #[derive(Parser)] #[clap()] pub struct Opts { #[clap(short, long, default_value = "image.svg")] file: String, #[clap(short, long, default_value = "100.0")] pub width: f64, #[clap(short, long, default_value = "150.0")] pub height: f64, #[clap(short, long, default_value = "5.0")] pub pad: f64, #[clap(short, long, default_value = "0.0")] pub seed: f64, #[clap(short, long, default_value = "0.0")] pub seed1: f64, #[clap(short, long, default_value = "0.0")] pub seed2: f64, #[clap(short, long, default_value = "0.0")] pub seed3: f64, } fn shake<R: Rng>( path: Vec<(f64, f64)>, scale: f64, rng: &mut R, ) -> Vec<(f64, f64)> { path .iter() .map(|&(x, y)| { let dx = rng.gen_range(-scale, scale); let dy = rng.gen_range(-scale, scale); (x + dx, y + dy) }) .collect() } fn eagle<R: Rng>( origin: (f64, f64), scale: f64, rotation: f64, xreverse: bool, rng: &mut R, ) -> Vec<Vec<(f64, f64)>> { let xmul = if xreverse { -1.0 } else { 1.0 }; let count = 2 + (scale * 3.0) as usize; let mut routes: Vec<Vec<(f64, f64)>> = Vec::new(); let shaking = scale * 0.1; // body let bodyw = 5.0; let bodyh = 1.5; let headcompression = rng.gen_range(0.1, 0.5); let headoff = rng.gen_range(0.1, 0.5); for i in 0..count { let yp = (i as f64 - (count - 1) as f64 * 0.5) / (count as f64); let ybase = bodyh * yp; let route = shake( path_subdivide_to_curve( vec![ (-rng.gen_range(0.4, 0.6) * bodyw, 1.5 * ybase), (-0.3 * bodyw, ybase), (0.2 * bodyw, ybase), (0.45 * bodyw, headcompression * ybase + headoff * bodyh), ], 1, 0.8, ), shaking, rng, ); routes.push(route); } let count = 2 + (scale * rng.gen_range(4.0, 6.0)) as usize; // wings let wingw = 1.4; let wingh = 8.0; let dx1 = rng.gen_range(-4.0, 4.0) * rng.gen_range(0.0, 1.0); let dx2 = if rng.gen_bool(0.8)
else { rng.gen_range(-3.0, 3.0) }; let spread1 = 1.0 + rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let spread2 = 1.0 + rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let offset1 = rng.gen_range(-1.0, 0.6) * rng.gen_range(0.0, 1.0); let offset2 = rng.gen_range(-1.0, 0.6) * rng.gen_range(0.0, 1.0); let interp = 0.5; let wing1m = 1.0 - rng.gen_range(0.0, 0.5) * rng.gen_range(0.0, 1.0); let wing2m = 1.0 - rng.gen_range(0.0, 0.5) * rng.gen_range(0.0, 1.0); let wing2up = rng.gen_bool(0.5); for i in 0..count { let xp = (i as f64 - (count - 1) as f64 * 0.5) / (count as f64); let xbase = wingw * xp; let wing1 = rng.gen_range(0.8, 1.1) * wing1m; let wing2 = rng.gen_range(0.8, 1.1) * wing2m * (if wing2up { -1.0 } else { 1.0 }); let route = shake( path_subdivide_to_curve( vec![ ( xbase * spread1 + dx1 + wingw * offset1, -wingh * 0.5 * wing1, ), (xbase + dx1 * interp, -wingh * 0.5 * interp * wing1), (xbase, 0.0), (xbase + dx2 * interp, wingh * 0.5 * interp * wing2), (xbase * spread2 + dx2 + wingw * offset2, wingh * 0.5 * wing2), ], 2, 0.8, ), shaking, rng, ); routes.push(route); } // scale, rotate & translate routes .iter() .map(|route| { route .iter() .map(|&p| { let p = p_r(p, rotation); (xmul * scale * p.0 + origin.0, scale * p.1 + origin.1) }) .collect() }) .collect() } fn art(opts: &Opts) -> Vec<Group> { let height = opts.height; let width = opts.width; let pad = opts.pad; let mut rng = rng_from_seed(opts.seed); let perlin = Perlin::new(); let mut passage = Passage::new(0.5, width, height); let passage_threshold = 5; let min_route = 2; let peakfactor = rng.gen_range(-0.001, 0.001) * rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let ynoisefactor = rng.gen_range(0.02, 0.1); let yincr = rng.gen_range(1.5, 3.0); let amp2 = rng.gen_range(1.0, 12.0); let precision = rng.gen_range(0.1, 0.3); let offsetstrategy = rng.gen_range(0, 5); let mut routes = Vec::new(); let mut cave_spawned = false; let cave_threshold = rng.gen_range(0.5, 0.9) * height; let mut cave_initial_pos = Vec::new(); let w = rng.gen_range(0.05, 0.2); let mut base_y = height * 5.0; let mut miny = height; let stopy = rng.gen_range(0.2, 0.5) * height; let mut height_map: Vec<f64> = Vec::new(); loop { if miny < stopy { break; } if miny < cave_threshold &&!cave_spawned { cave_spawned = true; let xfrom = (0.5 - w / 2.0) * width; let xto = (0.5 + w / 2.0) * width; let yamp = rng.gen_range(8.0, 24.0); let mut x = xfrom; let mut route = Vec::new(); loop { if x > xto { break; } let xi = (x / precision) as usize; let ybottom = height_map[xi].min(height - pad); let ytop = ybottom - yamp * (0.4 * perlin.get([opts.seed * 3.1 + 5.4, x * 0.01]).abs() + 0.6 * (1.0 - (((x - xfrom) / (xto - xfrom) - 0.5) * 2.0).powf(2.0))); height_map[xi] = ytop; cave_initial_pos.push((x, ytop)); if (route.len() / 2) % 2 == 0 { route.push((x, ybottom)); route.push((x, ytop)); } else { route.push((x, ytop)); route.push((x, ybottom)); } x += precision; } routes.push(route); } let mut route = Vec::new(); let mut x = pad; let mut was_outside = true; loop { if x > width - pad { break; } let xv = (4.0 - base_y / height) * (x - width / 2.); let amp = height * 0.05; let mut y = base_y; if offsetstrategy == 0 { y += amp * peakfactor * xv * xv; } y += -amp * perlin .get([ // xv * 0.005, y * 0.02, 77. + opts.seed / 7.3 + perlin.get([ // -opts.seed * 7.3, 8.3 + xv * 0.02, y * 0.1, ]), ]) .abs(); if offsetstrategy == 1 { y += amp * peakfactor * xv * xv; } y += amp2 * amp * perlin.get([ // 8.3 + xv * 0.01, 88.1 + y * ynoisefactor, opts.seed * 97.3, ]); if offsetstrategy == 2 { y += amp * peakfactor * xv * xv; } y += amp * perlin.get([ // opts.seed * 9.3 - 77., xv * 0.1, y * 0.5, ]) * perlin .get([ // xv * 0.02, 88.1 + y * 0.2, -opts.seed / 7.7, ]) .min(0.0); if offsetstrategy == 3 { y += amp * peakfactor * xv * xv; } y += 0.1 * amp * (1.0 - miny / height) * perlin.get([ // 66666. + opts.seed * 1.3, 88.3 + xv * 0.5, 88.1 + y * 0.5, ]); if offsetstrategy == 4 { y += amp * peakfactor * xv * xv; } if y < miny { miny = y; } let mut collides = false; let xi = (x / precision) as usize; if xi >= height_map.len() { height_map.push(y); } else { if y > height_map[xi] { collides = true; } else { height_map[xi] = y; } } let inside = !collides && pad < x && x < width - pad && pad < y && y < height - pad; if inside && passage.get((x, y)) < passage_threshold { if was_outside { if route.len() > min_route { routes.push(route); } route = Vec::new(); } was_outside = false; route.push((x, y)); passage.count((x, y)); } else { was_outside = true; } x += precision; } if route.len() > min_route { routes.push(route); } base_y -= yincr; } let radius = 6.0; passage.grow_passage(radius); rng.shuffle(&mut cave_initial_pos); let mut positions = Vec::new(); for i in 0..rng.gen_range(4, 12) { if i >= cave_initial_pos.len() - 1 { break; } let initial = cave_initial_pos[i]; let mut a = -PI / 2. + rng.gen_range(-1.0, 1.0) * rng.gen_range(0.5, 1.0); let mut p = initial; let amp = 3.0; let pad = pad * 2.; loop { if p.0 < pad || p.0 > width - pad || p.1 < pad || p.1 > height - pad { break; } p = (p.0 + amp * a.cos(), p.1 + amp * a.sin()); positions.push(p); a += rng.gen_range(-0.5, 0.5) * rng.gen_range(0.0, 1.0); } } rng.shuffle(&mut positions); let disp = rng.gen_range(0.5, 3.0); let mut eagles = Vec::new(); for p in positions { if rng.gen_bool(0.2) { continue; } let scale = rng.gen_range(0.3, 0.5); let p = ( p.0 + disp * rng.gen_range(-1.0, 1.0), p.1 + disp * rng.gen_range(-1.0, 1.0), ); if p.0 < pad || p.0 > width - pad || p.1 < pad || p.1 > height - pad { continue; } eagles.push(eagle( p, scale, rng.gen_range(-1.0, 1.0), rng.gen_bool(0.5), &mut rng, )); } routes = vec![routes, eagles.concat()].concat(); for i in 0..10 { let d = i as f64 * 0.25; routes.push(vec![ (pad + d, pad + d), (pad + d, height - pad - d), (width - pad - d, height - pad - d), (width - pad - d, pad + d), (pad + d, pad + d), ]); } let color = "black"; let mut data = Data::new(); for route in routes.clone() { data = render_route(data, route); } let mut l = layer(color); l = l.add(base_path(color, 0.35, data)); vec![l] } fn main() { let opts: Opts = Opts::parse(); let groups = art(&opts); let mut document = base_document("white", opts.width, opts.height); for g in groups { document = document.add(g); } svg::save(opts.file, &document).unwrap(); } #[derive(Clone)] struct Passage { precision: f64, width: f64, height: f64, counters: Vec<usize>, } impl Passage { pub fn new(precision: f64, width: f64, height: f64) -> Self { let wi = (width / precision).ceil() as usize; let hi = (height / precision).ceil() as usize; let counters = vec![0; wi * hi]; Passage { precision, width, height, counters, } } fn index(self: &Self, (x, y): (f64, f64)) -> usize { let wi = (self.width / self.precision).ceil() as usize; let hi = (self.height / self.precision).ceil() as usize; let xi = ((x / self.precision).round() as usize).max(0).min(wi - 1); let yi = ((y / self.precision).round() as usize).max(0).min(hi - 1); yi * wi + xi } pub fn count(self: &mut Self, p: (f64, f64)) -> usize { let i = self.index(p); let v = self.counters[i] + 1; self.counters[i] = v; v } pub fn count_once(self: &mut Self, p: (f64, f64)) { let i = self.index(p); let v = self.counters[i]; if v == 0 { self.counters[i] = 1; } } pub fn get(self: &Self, p: (f64, f64)) -> usize { let i = self.index(p); self.counters[i] } pub fn grow_passage(self: &mut Self, radius: f64) { let precision = self.precision; let width = self.width; let height = self.height; let counters: Vec<usize> = self.counters.iter().cloned().collect(); let mut mask = Vec::new(); // TODO, in future for even better perf, I will rewrite this // working directly with index integers instead of having to use index() / count_once() let mut x = -radius; loop { if x >= radius { break; } let mut y = -radius; loop { if y >= radius { break; } if x * x + y * y < radius * radius { mask.push((x, y)); } y += precision; } x += precision; } let mut x = 0.0; loop { if x >= width { break; } let mut y = 0.0; loop { if y >= height { break; } let index = self.index((x, y)); if counters[index] > 0 { for &(dx, dy) in mask.iter() { self.count_once((x + dx, y + dy)); } } y += precision; } x += precision; } } } fn lerp_point(a: (f64, f64), b: (f64, f64), m: f64) -> (f64, f64) { (a.0 * (1. - m) + b.0 * m, a.1 * (1. - m) + b.1 * m) } fn path_subdivide_to_curve_it( path: Vec<(f64, f64)>, interpolation: f64, ) -> Vec<(f64, f64)> { let l = path.len(); if l < 3 { return path; } let mut route = Vec::new(); let mut first = path[0]; let mut last = path[l - 1]; let looped = euclidian_dist(first, last) < 0.1; if looped { first = lerp_point(path[1], first, interpolation); } route.push(first); for i in 1..(l - 1) { let p = path[i]; let p1 = lerp_point(path[i - 1], p, interpolation); let p2 = lerp_point(path[i + 1], p, interpolation); route.push(p1); route.push(p2); } if looped { last = lerp_point(path[l - 2], last, interpolation); } route.push(last); if looped { route.push(first); } route } fn path_subdivide_to_curve( path: Vec<(f64, f64)>, n: usize, interpolation: f64, ) -> Vec<(f64, f64)> { let mut route = path; for _i in 0..n { route = path_subdivide_to_curve_it(route, interpolation); } route }
{ -dx1 }
conditional_block
leetcode.rs
//! The common data structure definition for leetcode problems. /** The definition of `ListNode`, used by many problems. */ #[derive(PartialEq, Eq, Debug)] pub(crate) struct ListNode { val: i32, next: Option<Box<Self>>, } impl ListNode { #[inline] fn new(val: i32) -> Self { Self { next: None, val } } } trait ToListNode { fn to_list_node(self, reverse: bool) -> Option<Box<ListNode>>; } impl ToListNode for i32 { /// Convert a number to the list of every bit of the number. fn to_list_node(mut self, reverse: bool) -> Option<Box<ListNode>> { let mut vec = vec![]; while self / 10 > 0 { vec.push(self % 10); self /= 10; } vec.push(self % 10); // the sequence of the vec is opposite of the number vec.to_list_node(!reverse) } } impl ToListNode for Vec<i32> { /// Build list node from the vector of the numbers. fn to_list_node(mut self, reverse: bool) -> Option<Box<ListNode>> { let mut next = None; if!reverse { self.reverse(); } for val in self { next = Some(Box::new(ListNode { val, next })); } next } } trait ToVec { fn to_num_vec(self) -> Vec<i32>; fn to_node_vec(self) -> Vec<Option<Box<ListNode>>>; } impl ToVec for Option<Box<ListNode>> { /// Build the vector of the numbers from the a list node. fn to_num_vec(self) -> Vec<i32> { let (mut vec, mut temp) = (vec![], &self); while let Some(n) = temp { vec.push(n.val); temp = &n.next; } vec } /// Build the vector of the node from the a list node. fn to_node_vec(self) -> Vec<Option<Box<ListNode>>> { let (mut vec, mut current) = (vec![], self); while let Some(v) = current.as_mut() { // use Option::take() to take the value out of the Option, and then leaving a None in its place. // let node = std::mem::replace(&mut v.next, None); let node = v.next.take(); vec.push(current); current = node; } vec } } use std::{cell::RefCell, rc::Rc}; /// The definition of a binary tree node (`ListNode`), used by many problems. #[derive(Debug, PartialEq, Eq)] pub(crate) struct
{ val: i32, left: Option<Rc<RefCell<Self>>>, right: Option<Rc<RefCell<Self>>>, } impl TreeNode { #[inline] fn new(val: i32) -> Self { TreeNode { val, left: None, right: None, } } #[inline] fn new_option(val: Option<i32>) -> Option<Rc<RefCell<Self>>> { val.map(|v| Rc::new(RefCell::new(Self::new(v)))) } /** Building binary tree from `Vec<Option<i32>>`, Some means valued node, None means empty node. For example: `[Some(1), Some(2), Some(3), Some(4), Some(5), Some(6)]` will be transformed to: ```html 1 / \ 2 3 / \ / 4 5 6 ``` `[Some(1), Some(2), Some(3), Some(4), None, Some(5), None, Some(6)]` will be transformed to: ```html 1 / \ 2 3 / \ / \ 4 N 5 N / 6 ``` `[Some(7), Some(5), Some(11), Some(4), None, Some(8), Some(13), Some(2), None, None, None, Some(12)]` will be transformed to: ```html 7 / \ 5 11 / \ / \ 4 N 8 13 / \ / \ / 2 N N N 12 ``` */ fn from(vec: Vec<Option<i32>>) -> Option<Rc<RefCell<Self>>> { use std::collections::VecDeque; let mut root = None; // save the root node let mut nodes: VecDeque<*mut Option<Rc<RefCell<Self>>>> = Default::default(); // save the pointer to child nodes for v in vec { // use the macro to deal with child node macro_rules! update { ($node: expr) => { if let Some(n) = &*$node { // add the pointer of child node, use raw pointer to avoid the ownership check // save the raw pointer of child node of new tree node dosn't need UNSAFE nodes.push_back(&mut n.borrow_mut().left); nodes.push_back(&mut n.borrow_mut().right); } }; } let node = Self::new_option(v); // new tree node if root.is_none() { root = node; update!(&root); } else if let Some(current) = nodes.pop_front() { unsafe { // only dereference raw pointer should under UNSAFE *current = node; update!(current); } } } root } } /// For `q15` and `q18`, check if the target is included in the **vec_list**. fn check_vecs_contain_target(vec_list: &Vec<Vec<i32>>, target: &Vec<i32>) -> bool { for old_vec in vec_list { let mut new_vec = target.clone(); for old_val in old_vec { for i in 0..new_vec.len() { // check target vec if have equal element in old_vec if old_val == &new_vec[i] { new_vec.remove(i); break; } } } // if all elemnets have been removed, mean the vec is duplicate if new_vec.is_empty() { return true; } } false } /// For `q126` and `q127`, check if two words differ by only one character. fn check_diff_one_char(old_word: &String, new_word: &String) -> bool { let mut count = 0; let (old_u8s, new_u8s): (&[u8], &[u8]) = (old_word.as_ref(), new_word.as_ref()); for i in 0..old_u8s.len() { if old_u8s[i]!= new_u8s[i] { count += 1; if count > 1 { return false; } } } count == 1 } /// Check element content equivalence without element order. fn check_element_eq<T>(v1: T, v2: T) -> bool where T: IntoIterator, T::Item: Eq + std::hash::Hash + std::fmt::Debug, { use std::collections::HashMap; let (mut length1, mut length2) = (0, 0); let (mut content1, mut content2) = (HashMap::new(), HashMap::new()); for v in v1 { length1 += 1; *content1.entry(v).or_insert(0) += 1; } for v in v2 { length2 += 1; *content2.entry(v).or_insert(0) += 1; } let eq = content1 == content2 && length1 == length2; if!eq { println!("Elements are different!\nLength 1: {length1}, Length 2: {length2}"); println!("Content 1: {content1:?}\nContent 2: {content2:?}"); } eq } /** Unlike everything else in the languages, macros will remain visible in sub-modules. Also, unlike everything else in the language, macros are only accessible after their definition. Or use `#[macro_export]` to export the macro, then use macro with code "crate::xxx_macro_name!". */ macro_rules! string_vec { ($($content:expr),*) => {{ let mut temp = vec![]; $(temp.push($content.to_string());)* temp }} } /// Provide a macro to build TreeNode which can directly use the test case syntax in LeetCode. macro_rules! build_tree_node { () => { None }; // macro matcher type 'tt' means "a single token tree", // which allow a independent sub token tree for other macro usage, // until the current rust version (1.58), // only positive number or zero will be treated as a single token, // a negative number won't be treated as it ($($t:tt),*) => {{ let mut temp = vec![]; $(temp.push(covert_tree_node!($t));)* TreeNode::from(temp) }}; } // Use macro to transform the input content. macro_rules! covert_tree_node { (null) => { None }; ($l:literal) => { Some($l) }; } // normal problems mod q1008_construct_binary_search_tree_from_preorder_traversal; mod q102_binary_tree_level_order_traversal; mod q103_binary_tree_zipzag_level_order_traversal; mod q107_binary_tree_level_order_traversal_ii; mod q10_regular_expression_matching; mod q11_container_with_most_water; mod q126_word_ladder_ii; mod q127_word_ladder; mod q12_integer_to_roman; mod q16_three_sum_closest; mod q17_letter_combinations_of_a_phone_number; mod q18_four_sum; mod q19_remove_nth_node_from_end_of_list; mod q200_number_of_islands; mod q208_implement_trie; mod q212_word_search_ii; mod q22_generate_parentheses; mod q23_merge_k_sorted_lists; mod q24_swap_nodes_in_pairs; mod q25_reverse_nodes_in_k_group; mod q29_divide_two_integers; mod q2_add_two_numbers; mod q30_substring_with_concatenation_of_all_words; mod q31_next_permutation; mod q32_longest_valid_parentheses; mod q33_search_in_rotated_sorted_array; mod q34_find_first_and_last_position_of_element_in_sorted_array; mod q35_valid_sudoku; mod q37_sudoku_solver; mod q39_combination_sum; mod q3_length_of_longest_substring; mod q407_trapping_rain_water_ii; mod q40_combination_sum_ii; mod q41_first_missing_positive; mod q42_trapping_rain_water; mod q43_multiply_strings; mod q44_wildcard_matching; mod q454_four_sum_ii; mod q45_jump_game_ii; mod q46_permutations; mod q47_permutations_ii; mod q48_rotate_image; mod q49_group_anagrams; mod q4_find_median_sorted_arrays; mod q50_pow_x_n; mod q51_n_queens; mod q525_contiguous_array; mod q52_n_queens_ii; mod q53_maximum_subarray; mod q543_diameter_of_binary_tree; mod q54_spiral_matrix; mod q55_jump_game; mod q56_merge_intervals; mod q57_insert_interval; mod q59_spiral_matrix_ii; mod q5_longest_palindrome; mod q60_permutation_sequence; mod q61_rotate_list; mod q62_unique_paths; mod q63_unique_paths_ii; mod q64_minimum_path_sum; mod q65_valid_number; mod q68_text_justification; mod q6_zipzag_conversion; mod q71_simplify_path; mod q72_edit_distance; mod q73_set_matrix_zeroes; mod q74_search_a_2d_matrix; mod q75_sort_colors; mod q76_minimum_window_substring; mod q77_combinations; mod q78_subsets; mod q79_word_search; mod q7_reverse_integer; mod q80_remove_duplicates_from_sorted_array_ii; mod q81_search_in_rotated_sorted_array_ii; mod q82_remove_duplicates_from_sorted_list_ii; mod q844_backspace_string_compare; mod q84_largest_rectangle_in_histogram; mod q85_maximal_rectangle; mod q86_partition_list; mod q87_scramble_string; mod q89_gray_code; mod q8_my_atoi; mod q90_subsets_ii; mod q91_decode_ways; mod q92_reverse_linked_list_ii; mod q93_restore_ip_addresses; mod q94_binary_tree_inorder_traversal; mod q95_unique_binary_search_trees_ii; mod q96_unique_binary_search_trees; mod q97_interleaving_string; mod q98_validate_binary_search_tree; mod q99_recover_binary_search_tree; // some extra problems can only be found in "30-Day LeetCoding Challenge" mod day_30_leetcoding_challenge; // mod q834_sum_of_distances_in_tree; // DNF // mod q105_construct_binary_tree_from_preorder_and_inorder_traversal; // DNF // mod q814_binary_tree_pruning; // mod q173_binary_search_tree_iterator; // mod q958_check_completeness_of_a_binary_tree; // mod q639_decode_ways_ii; // need explain // mod q124_binary_tree_maximum_path_sum; // mod q221_maximal_square; // mod q1143_longest_common_subsequence; // mod q146_lru_cache; // mod q201_bitwise_and_of_numbers_range; // mod q560_subarray_sum_equals_k; // mod q678_valid_parenthesis_string; // mod q238_product_of_array_except_self; // mod q1046_last_stone_weight; // mod q155_min_stack; // mod q876_middle_of_the_linked_list; // mod q122_best_time_to_buy_and_sell_stock_ii; // mod q283_move_zeroes; // mod q136_single_number; // mod q202_happy_number; // mod q328_odd_even_linked_list; // mod q725_split_linked_list; // mod q885_spiral_matrix_iii; // mod q143_recoder_list; // mod q216_combination_sum_iii; // mod q377_combination_sum_iv;
TreeNode
identifier_name
leetcode.rs
//! The common data structure definition for leetcode problems. /** The definition of `ListNode`, used by many problems. */ #[derive(PartialEq, Eq, Debug)] pub(crate) struct ListNode { val: i32, next: Option<Box<Self>>, } impl ListNode { #[inline] fn new(val: i32) -> Self { Self { next: None, val } } } trait ToListNode { fn to_list_node(self, reverse: bool) -> Option<Box<ListNode>>; } impl ToListNode for i32 { /// Convert a number to the list of every bit of the number. fn to_list_node(mut self, reverse: bool) -> Option<Box<ListNode>> { let mut vec = vec![]; while self / 10 > 0 { vec.push(self % 10); self /= 10; } vec.push(self % 10); // the sequence of the vec is opposite of the number vec.to_list_node(!reverse) } } impl ToListNode for Vec<i32> { /// Build list node from the vector of the numbers. fn to_list_node(mut self, reverse: bool) -> Option<Box<ListNode>> { let mut next = None; if!reverse { self.reverse(); } for val in self { next = Some(Box::new(ListNode { val, next })); } next } } trait ToVec { fn to_num_vec(self) -> Vec<i32>; fn to_node_vec(self) -> Vec<Option<Box<ListNode>>>; } impl ToVec for Option<Box<ListNode>> { /// Build the vector of the numbers from the a list node. fn to_num_vec(self) -> Vec<i32> { let (mut vec, mut temp) = (vec![], &self); while let Some(n) = temp { vec.push(n.val); temp = &n.next; } vec } /// Build the vector of the node from the a list node. fn to_node_vec(self) -> Vec<Option<Box<ListNode>>> { let (mut vec, mut current) = (vec![], self); while let Some(v) = current.as_mut() { // use Option::take() to take the value out of the Option, and then leaving a None in its place. // let node = std::mem::replace(&mut v.next, None); let node = v.next.take(); vec.push(current); current = node; } vec } } use std::{cell::RefCell, rc::Rc}; /// The definition of a binary tree node (`ListNode`), used by many problems. #[derive(Debug, PartialEq, Eq)] pub(crate) struct TreeNode { val: i32, left: Option<Rc<RefCell<Self>>>, right: Option<Rc<RefCell<Self>>>, } impl TreeNode { #[inline] fn new(val: i32) -> Self { TreeNode { val, left: None, right: None, } } #[inline] fn new_option(val: Option<i32>) -> Option<Rc<RefCell<Self>>> { val.map(|v| Rc::new(RefCell::new(Self::new(v)))) } /** Building binary tree from `Vec<Option<i32>>`, Some means valued node, None means empty node. For example: `[Some(1), Some(2), Some(3), Some(4), Some(5), Some(6)]` will be transformed to: ```html 1 / \ 2 3 / \ / 4 5 6 ``` `[Some(1), Some(2), Some(3), Some(4), None, Some(5), None, Some(6)]` will be transformed to: ```html 1 / \ 2 3 / \ / \ 4 N 5 N / 6 ``` `[Some(7), Some(5), Some(11), Some(4), None, Some(8), Some(13), Some(2), None, None, None, Some(12)]` will be transformed to: ```html 7 / \ 5 11 / \ / \ 4 N 8 13 / \ / \ / 2 N N N 12 ``` */ fn from(vec: Vec<Option<i32>>) -> Option<Rc<RefCell<Self>>> { use std::collections::VecDeque; let mut root = None; // save the root node let mut nodes: VecDeque<*mut Option<Rc<RefCell<Self>>>> = Default::default(); // save the pointer to child nodes for v in vec { // use the macro to deal with child node macro_rules! update { ($node: expr) => { if let Some(n) = &*$node { // add the pointer of child node, use raw pointer to avoid the ownership check // save the raw pointer of child node of new tree node dosn't need UNSAFE nodes.push_back(&mut n.borrow_mut().left); nodes.push_back(&mut n.borrow_mut().right); } }; } let node = Self::new_option(v); // new tree node if root.is_none() { root = node; update!(&root); } else if let Some(current) = nodes.pop_front() { unsafe { // only dereference raw pointer should under UNSAFE *current = node; update!(current); } } } root } } /// For `q15` and `q18`, check if the target is included in the **vec_list**. fn check_vecs_contain_target(vec_list: &Vec<Vec<i32>>, target: &Vec<i32>) -> bool { for old_vec in vec_list { let mut new_vec = target.clone(); for old_val in old_vec { for i in 0..new_vec.len() { // check target vec if have equal element in old_vec if old_val == &new_vec[i] { new_vec.remove(i); break; } } } // if all elemnets have been removed, mean the vec is duplicate if new_vec.is_empty() { return true; } } false } /// For `q126` and `q127`, check if two words differ by only one character. fn check_diff_one_char(old_word: &String, new_word: &String) -> bool { let mut count = 0; let (old_u8s, new_u8s): (&[u8], &[u8]) = (old_word.as_ref(), new_word.as_ref()); for i in 0..old_u8s.len() { if old_u8s[i]!= new_u8s[i] { count += 1; if count > 1
} } count == 1 } /// Check element content equivalence without element order. fn check_element_eq<T>(v1: T, v2: T) -> bool where T: IntoIterator, T::Item: Eq + std::hash::Hash + std::fmt::Debug, { use std::collections::HashMap; let (mut length1, mut length2) = (0, 0); let (mut content1, mut content2) = (HashMap::new(), HashMap::new()); for v in v1 { length1 += 1; *content1.entry(v).or_insert(0) += 1; } for v in v2 { length2 += 1; *content2.entry(v).or_insert(0) += 1; } let eq = content1 == content2 && length1 == length2; if!eq { println!("Elements are different!\nLength 1: {length1}, Length 2: {length2}"); println!("Content 1: {content1:?}\nContent 2: {content2:?}"); } eq } /** Unlike everything else in the languages, macros will remain visible in sub-modules. Also, unlike everything else in the language, macros are only accessible after their definition. Or use `#[macro_export]` to export the macro, then use macro with code "crate::xxx_macro_name!". */ macro_rules! string_vec { ($($content:expr),*) => {{ let mut temp = vec![]; $(temp.push($content.to_string());)* temp }} } /// Provide a macro to build TreeNode which can directly use the test case syntax in LeetCode. macro_rules! build_tree_node { () => { None }; // macro matcher type 'tt' means "a single token tree", // which allow a independent sub token tree for other macro usage, // until the current rust version (1.58), // only positive number or zero will be treated as a single token, // a negative number won't be treated as it ($($t:tt),*) => {{ let mut temp = vec![]; $(temp.push(covert_tree_node!($t));)* TreeNode::from(temp) }}; } // Use macro to transform the input content. macro_rules! covert_tree_node { (null) => { None }; ($l:literal) => { Some($l) }; } // normal problems mod q1008_construct_binary_search_tree_from_preorder_traversal; mod q102_binary_tree_level_order_traversal; mod q103_binary_tree_zipzag_level_order_traversal; mod q107_binary_tree_level_order_traversal_ii; mod q10_regular_expression_matching; mod q11_container_with_most_water; mod q126_word_ladder_ii; mod q127_word_ladder; mod q12_integer_to_roman; mod q16_three_sum_closest; mod q17_letter_combinations_of_a_phone_number; mod q18_four_sum; mod q19_remove_nth_node_from_end_of_list; mod q200_number_of_islands; mod q208_implement_trie; mod q212_word_search_ii; mod q22_generate_parentheses; mod q23_merge_k_sorted_lists; mod q24_swap_nodes_in_pairs; mod q25_reverse_nodes_in_k_group; mod q29_divide_two_integers; mod q2_add_two_numbers; mod q30_substring_with_concatenation_of_all_words; mod q31_next_permutation; mod q32_longest_valid_parentheses; mod q33_search_in_rotated_sorted_array; mod q34_find_first_and_last_position_of_element_in_sorted_array; mod q35_valid_sudoku; mod q37_sudoku_solver; mod q39_combination_sum; mod q3_length_of_longest_substring; mod q407_trapping_rain_water_ii; mod q40_combination_sum_ii; mod q41_first_missing_positive; mod q42_trapping_rain_water; mod q43_multiply_strings; mod q44_wildcard_matching; mod q454_four_sum_ii; mod q45_jump_game_ii; mod q46_permutations; mod q47_permutations_ii; mod q48_rotate_image; mod q49_group_anagrams; mod q4_find_median_sorted_arrays; mod q50_pow_x_n; mod q51_n_queens; mod q525_contiguous_array; mod q52_n_queens_ii; mod q53_maximum_subarray; mod q543_diameter_of_binary_tree; mod q54_spiral_matrix; mod q55_jump_game; mod q56_merge_intervals; mod q57_insert_interval; mod q59_spiral_matrix_ii; mod q5_longest_palindrome; mod q60_permutation_sequence; mod q61_rotate_list; mod q62_unique_paths; mod q63_unique_paths_ii; mod q64_minimum_path_sum; mod q65_valid_number; mod q68_text_justification; mod q6_zipzag_conversion; mod q71_simplify_path; mod q72_edit_distance; mod q73_set_matrix_zeroes; mod q74_search_a_2d_matrix; mod q75_sort_colors; mod q76_minimum_window_substring; mod q77_combinations; mod q78_subsets; mod q79_word_search; mod q7_reverse_integer; mod q80_remove_duplicates_from_sorted_array_ii; mod q81_search_in_rotated_sorted_array_ii; mod q82_remove_duplicates_from_sorted_list_ii; mod q844_backspace_string_compare; mod q84_largest_rectangle_in_histogram; mod q85_maximal_rectangle; mod q86_partition_list; mod q87_scramble_string; mod q89_gray_code; mod q8_my_atoi; mod q90_subsets_ii; mod q91_decode_ways; mod q92_reverse_linked_list_ii; mod q93_restore_ip_addresses; mod q94_binary_tree_inorder_traversal; mod q95_unique_binary_search_trees_ii; mod q96_unique_binary_search_trees; mod q97_interleaving_string; mod q98_validate_binary_search_tree; mod q99_recover_binary_search_tree; // some extra problems can only be found in "30-Day LeetCoding Challenge" mod day_30_leetcoding_challenge; // mod q834_sum_of_distances_in_tree; // DNF // mod q105_construct_binary_tree_from_preorder_and_inorder_traversal; // DNF // mod q814_binary_tree_pruning; // mod q173_binary_search_tree_iterator; // mod q958_check_completeness_of_a_binary_tree; // mod q639_decode_ways_ii; // need explain // mod q124_binary_tree_maximum_path_sum; // mod q221_maximal_square; // mod q1143_longest_common_subsequence; // mod q146_lru_cache; // mod q201_bitwise_and_of_numbers_range; // mod q560_subarray_sum_equals_k; // mod q678_valid_parenthesis_string; // mod q238_product_of_array_except_self; // mod q1046_last_stone_weight; // mod q155_min_stack; // mod q876_middle_of_the_linked_list; // mod q122_best_time_to_buy_and_sell_stock_ii; // mod q283_move_zeroes; // mod q136_single_number; // mod q202_happy_number; // mod q328_odd_even_linked_list; // mod q725_split_linked_list; // mod q885_spiral_matrix_iii; // mod q143_recoder_list; // mod q216_combination_sum_iii; // mod q377_combination_sum_iv;
{ return false; }
conditional_block
leetcode.rs
//! The common data structure definition for leetcode problems. /** The definition of `ListNode`, used by many problems. */ #[derive(PartialEq, Eq, Debug)] pub(crate) struct ListNode { val: i32, next: Option<Box<Self>>, } impl ListNode { #[inline] fn new(val: i32) -> Self { Self { next: None, val } } } trait ToListNode { fn to_list_node(self, reverse: bool) -> Option<Box<ListNode>>; } impl ToListNode for i32 { /// Convert a number to the list of every bit of the number. fn to_list_node(mut self, reverse: bool) -> Option<Box<ListNode>> { let mut vec = vec![]; while self / 10 > 0 { vec.push(self % 10); self /= 10; } vec.push(self % 10); // the sequence of the vec is opposite of the number vec.to_list_node(!reverse) } } impl ToListNode for Vec<i32> { /// Build list node from the vector of the numbers. fn to_list_node(mut self, reverse: bool) -> Option<Box<ListNode>> { let mut next = None; if!reverse { self.reverse(); } for val in self { next = Some(Box::new(ListNode { val, next })); } next } } trait ToVec { fn to_num_vec(self) -> Vec<i32>; fn to_node_vec(self) -> Vec<Option<Box<ListNode>>>; } impl ToVec for Option<Box<ListNode>> { /// Build the vector of the numbers from the a list node. fn to_num_vec(self) -> Vec<i32> { let (mut vec, mut temp) = (vec![], &self); while let Some(n) = temp { vec.push(n.val); temp = &n.next; } vec } /// Build the vector of the node from the a list node. fn to_node_vec(self) -> Vec<Option<Box<ListNode>>> { let (mut vec, mut current) = (vec![], self); while let Some(v) = current.as_mut() { // use Option::take() to take the value out of the Option, and then leaving a None in its place. // let node = std::mem::replace(&mut v.next, None); let node = v.next.take(); vec.push(current); current = node; } vec } } use std::{cell::RefCell, rc::Rc}; /// The definition of a binary tree node (`ListNode`), used by many problems. #[derive(Debug, PartialEq, Eq)] pub(crate) struct TreeNode { val: i32, left: Option<Rc<RefCell<Self>>>, right: Option<Rc<RefCell<Self>>>, } impl TreeNode { #[inline] fn new(val: i32) -> Self { TreeNode { val, left: None, right: None, } } #[inline] fn new_option(val: Option<i32>) -> Option<Rc<RefCell<Self>>> { val.map(|v| Rc::new(RefCell::new(Self::new(v)))) } /** Building binary tree from `Vec<Option<i32>>`, Some means valued node, None means empty node. For example: `[Some(1), Some(2), Some(3), Some(4), Some(5), Some(6)]` will be transformed to: ```html 1 / \ 2 3 / \ / 4 5 6 ``` `[Some(1), Some(2), Some(3), Some(4), None, Some(5), None, Some(6)]` will be transformed to: ```html 1 / \ 2 3 / \ / \ 4 N 5 N / 6 ``` `[Some(7), Some(5), Some(11), Some(4), None, Some(8), Some(13), Some(2), None, None, None, Some(12)]` will be transformed to: ```html 7 / \ 5 11 / \ / \ 4 N 8 13 / \ / \ / 2 N N N 12 ``` */ fn from(vec: Vec<Option<i32>>) -> Option<Rc<RefCell<Self>>> { use std::collections::VecDeque; let mut root = None; // save the root node let mut nodes: VecDeque<*mut Option<Rc<RefCell<Self>>>> = Default::default(); // save the pointer to child nodes for v in vec { // use the macro to deal with child node macro_rules! update { ($node: expr) => { if let Some(n) = &*$node { // add the pointer of child node, use raw pointer to avoid the ownership check // save the raw pointer of child node of new tree node dosn't need UNSAFE nodes.push_back(&mut n.borrow_mut().left); nodes.push_back(&mut n.borrow_mut().right); } }; } let node = Self::new_option(v); // new tree node if root.is_none() { root = node; update!(&root); } else if let Some(current) = nodes.pop_front() { unsafe { // only dereference raw pointer should under UNSAFE *current = node; update!(current); } } } root } } /// For `q15` and `q18`, check if the target is included in the **vec_list**. fn check_vecs_contain_target(vec_list: &Vec<Vec<i32>>, target: &Vec<i32>) -> bool { for old_vec in vec_list { let mut new_vec = target.clone(); for old_val in old_vec { for i in 0..new_vec.len() { // check target vec if have equal element in old_vec if old_val == &new_vec[i] { new_vec.remove(i); break; } } } // if all elemnets have been removed, mean the vec is duplicate if new_vec.is_empty() { return true; } } false } /// For `q126` and `q127`, check if two words differ by only one character. fn check_diff_one_char(old_word: &String, new_word: &String) -> bool { let mut count = 0; let (old_u8s, new_u8s): (&[u8], &[u8]) = (old_word.as_ref(), new_word.as_ref()); for i in 0..old_u8s.len() { if old_u8s[i]!= new_u8s[i] { count += 1; if count > 1 { return false; } } } count == 1 } /// Check element content equivalence without element order. fn check_element_eq<T>(v1: T, v2: T) -> bool where T: IntoIterator, T::Item: Eq + std::hash::Hash + std::fmt::Debug, { use std::collections::HashMap; let (mut length1, mut length2) = (0, 0); let (mut content1, mut content2) = (HashMap::new(), HashMap::new()); for v in v1 { length1 += 1; *content1.entry(v).or_insert(0) += 1; } for v in v2 { length2 += 1; *content2.entry(v).or_insert(0) += 1; } let eq = content1 == content2 && length1 == length2; if!eq { println!("Elements are different!\nLength 1: {length1}, Length 2: {length2}"); println!("Content 1: {content1:?}\nContent 2: {content2:?}"); } eq } /** Unlike everything else in the languages, macros will remain visible in sub-modules. Also, unlike everything else in the language, macros are only accessible after their definition. Or use `#[macro_export]` to export the macro, then use macro with code "crate::xxx_macro_name!". */ macro_rules! string_vec { ($($content:expr),*) => {{ let mut temp = vec![]; $(temp.push($content.to_string());)* temp }} } /// Provide a macro to build TreeNode which can directly use the test case syntax in LeetCode. macro_rules! build_tree_node { () => { None }; // macro matcher type 'tt' means "a single token tree", // which allow a independent sub token tree for other macro usage, // until the current rust version (1.58), // only positive number or zero will be treated as a single token, // a negative number won't be treated as it ($($t:tt),*) => {{ let mut temp = vec![]; $(temp.push(covert_tree_node!($t));)* TreeNode::from(temp) }}; } // Use macro to transform the input content. macro_rules! covert_tree_node { (null) => { None }; ($l:literal) => { Some($l) }; } // normal problems mod q1008_construct_binary_search_tree_from_preorder_traversal; mod q102_binary_tree_level_order_traversal; mod q103_binary_tree_zipzag_level_order_traversal; mod q107_binary_tree_level_order_traversal_ii; mod q10_regular_expression_matching; mod q11_container_with_most_water; mod q126_word_ladder_ii; mod q127_word_ladder; mod q12_integer_to_roman; mod q16_three_sum_closest; mod q17_letter_combinations_of_a_phone_number; mod q18_four_sum; mod q19_remove_nth_node_from_end_of_list; mod q200_number_of_islands; mod q208_implement_trie; mod q212_word_search_ii; mod q22_generate_parentheses; mod q23_merge_k_sorted_lists; mod q24_swap_nodes_in_pairs; mod q25_reverse_nodes_in_k_group; mod q29_divide_two_integers; mod q2_add_two_numbers; mod q30_substring_with_concatenation_of_all_words; mod q31_next_permutation; mod q32_longest_valid_parentheses; mod q33_search_in_rotated_sorted_array; mod q34_find_first_and_last_position_of_element_in_sorted_array; mod q35_valid_sudoku; mod q37_sudoku_solver; mod q39_combination_sum; mod q3_length_of_longest_substring; mod q407_trapping_rain_water_ii; mod q40_combination_sum_ii; mod q41_first_missing_positive; mod q42_trapping_rain_water; mod q43_multiply_strings; mod q44_wildcard_matching; mod q454_four_sum_ii; mod q45_jump_game_ii; mod q46_permutations; mod q47_permutations_ii; mod q48_rotate_image; mod q49_group_anagrams; mod q4_find_median_sorted_arrays; mod q50_pow_x_n; mod q51_n_queens; mod q525_contiguous_array; mod q52_n_queens_ii; mod q53_maximum_subarray; mod q543_diameter_of_binary_tree; mod q54_spiral_matrix; mod q55_jump_game; mod q56_merge_intervals; mod q57_insert_interval; mod q59_spiral_matrix_ii; mod q5_longest_palindrome; mod q60_permutation_sequence; mod q61_rotate_list; mod q62_unique_paths; mod q63_unique_paths_ii; mod q64_minimum_path_sum; mod q65_valid_number; mod q68_text_justification; mod q6_zipzag_conversion; mod q71_simplify_path; mod q72_edit_distance; mod q73_set_matrix_zeroes; mod q74_search_a_2d_matrix; mod q75_sort_colors; mod q76_minimum_window_substring; mod q77_combinations; mod q78_subsets; mod q79_word_search; mod q7_reverse_integer; mod q80_remove_duplicates_from_sorted_array_ii; mod q81_search_in_rotated_sorted_array_ii; mod q82_remove_duplicates_from_sorted_list_ii; mod q844_backspace_string_compare;
mod q87_scramble_string; mod q89_gray_code; mod q8_my_atoi; mod q90_subsets_ii; mod q91_decode_ways; mod q92_reverse_linked_list_ii; mod q93_restore_ip_addresses; mod q94_binary_tree_inorder_traversal; mod q95_unique_binary_search_trees_ii; mod q96_unique_binary_search_trees; mod q97_interleaving_string; mod q98_validate_binary_search_tree; mod q99_recover_binary_search_tree; // some extra problems can only be found in "30-Day LeetCoding Challenge" mod day_30_leetcoding_challenge; // mod q834_sum_of_distances_in_tree; // DNF // mod q105_construct_binary_tree_from_preorder_and_inorder_traversal; // DNF // mod q814_binary_tree_pruning; // mod q173_binary_search_tree_iterator; // mod q958_check_completeness_of_a_binary_tree; // mod q639_decode_ways_ii; // need explain // mod q124_binary_tree_maximum_path_sum; // mod q221_maximal_square; // mod q1143_longest_common_subsequence; // mod q146_lru_cache; // mod q201_bitwise_and_of_numbers_range; // mod q560_subarray_sum_equals_k; // mod q678_valid_parenthesis_string; // mod q238_product_of_array_except_self; // mod q1046_last_stone_weight; // mod q155_min_stack; // mod q876_middle_of_the_linked_list; // mod q122_best_time_to_buy_and_sell_stock_ii; // mod q283_move_zeroes; // mod q136_single_number; // mod q202_happy_number; // mod q328_odd_even_linked_list; // mod q725_split_linked_list; // mod q885_spiral_matrix_iii; // mod q143_recoder_list; // mod q216_combination_sum_iii; // mod q377_combination_sum_iv;
mod q84_largest_rectangle_in_histogram; mod q85_maximal_rectangle; mod q86_partition_list;
random_line_split
lib.rs
#![no_std] //! //! You can populate [`Petnames`] with your own word lists, but the word lists //! from upstream [petname](https://github.com/dustinkirkland/petname) are //! included with the `default_dictionary` feature (enabled by default). See //! [`Petnames::small`], [`Petnames::medium`], and [`Petnames::large`] to select //! a particular built-in word list, or use the [`Default`] implementation. //! //! The other thing you need is a random number generator from [rand][]: //! //! ```rust //! # #[cfg(feature = "std_rng")] //! let mut rng = rand::thread_rng(); //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let pname = petname::Petnames::default().generate(&mut rng, 7, ":"); //! ``` //! //! It may be more convenient to use the default random number generator: //! //! ```rust //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let pname = petname::Petnames::default().generate_one(7, ":"); //! ``` //! //! There's a [convenience function][petname] that'll do all of this: //! //! ```rust //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let pname = petname::petname(7, ":"); //! ``` //! //! But the most flexible approach is to create an [`Iterator`] with //! [`iter`][`Petnames::iter`]: //! //! ```rust //! # #[cfg(feature = "std_rng")] //! let mut rng = rand::thread_rng(); //! # #[cfg(feature = "default_dictionary")] //! let petnames = petname::Petnames::default(); //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let ten_thousand_names: Vec<String> = //! petnames.iter(&mut rng, 3, "_").take(10000).collect(); //! ``` //! //! You can modify the word lists to, for example, only use words beginning with //! the letter "b": //! //! ```rust //! # #[cfg(feature = "default_dictionary")] //! let mut petnames = petname::Petnames::default(); //! # #[cfg(feature = "default_dictionary")] //! petnames.retain(|s| s.starts_with("b")); //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! petnames.generate_one(3, "."); //! ``` //! extern crate alloc; use alloc::{ borrow::Cow, string::{String, ToString}, vec::Vec, }; use itertools::Itertools; use rand::seq::SliceRandom; /// Convenience function to generate a new petname from default word lists. #[allow(dead_code)] #[cfg(feature = "std_rng")] #[cfg(feature = "default_dictionary")] pub fn petname(words: u8, separator: &str) -> String { Petnames::new().generate_one(words, separator) } /// A word list. pub type Words<'a> = Cow<'a, [&'a str]>; /// Word lists and the logic to combine them into _petnames_. /// /// A _petname_ with `n` words will contain, in order: /// /// * `n - 2` adverbs when `n >= 2`, otherwise 0 adverbs. /// * 1 adjective when `n >= 2`, otherwise 0 adjectives. /// * 1 name / noun when `n >= 1`, otherwise 0 names. /// #[derive(Clone, Debug, Eq, PartialEq)] pub struct Petnames<'a> { pub adjectives: Words<'a>, pub adverbs: Words<'a>, pub names: Words<'a>, } #[cfg(feature = "default_dictionary")] mod words { include!(concat!(env!("OUT_DIR"), "/words.rs")); } impl<'a> Petnames<'a> { /// Constructs a new `Petnames` from the default (small) word lists. #[cfg(feature = "default_dictionary")] pub fn new() -> Self { Self::default() } /// Constructs a new `Petnames` from the small word lists. #[cfg(feature = "default_dictionary")] pub fn small() -> Self { Self { adjectives: Cow::from(&words::small::ADJECTIVES[..]), adverbs: Cow::from(&words::small::ADVERBS[..]), names: Cow::from(&words::small::NAMES[..]), } } /// Constructs a new `Petnames` from the medium word lists. #[cfg(feature = "default_dictionary")] pub fn medium() -> Self { Self { adjectives: Cow::from(&words::medium::ADJECTIVES[..]), adverbs: Cow::from(&words::medium::ADVERBS[..]), names: Cow::from(&words::medium::NAMES[..]), } } /// Constructs a new `Petnames` from the large word lists. #[cfg(feature = "default_dictionary")] pub fn large() -> Self { Self { adjectives: Cow::from(&words::large::ADJECTIVES[..]), adverbs: Cow::from(&words::large::ADVERBS[..]), names: Cow::from(&words::large::NAMES[..]), } } /// Constructs a new `Petnames` from the given word lists. /// /// The words are extracted from the given strings by splitting on whitespace. pub fn init(adjectives: &'a str, adverbs: &'a str, names: &'a str) -> Self { Self { adjectives: Cow::Owned(adjectives.split_whitespace().collect::<Vec<_>>()), adverbs: Cow::Owned(adverbs.split_whitespace().collect::<Vec<_>>()), names: Cow::Owned(names.split_whitespace().collect::<Vec<_>>()), } } /// Keep words matching a predicate. /// /// # Examples /// /// ```rust /// # #[cfg(feature = "default_dictionary")] /// let mut petnames = petname::Petnames::default(); /// # #[cfg(feature = "default_dictionary")] /// petnames.retain(|s| s.starts_with("b")); /// # #[cfg(feature = "default_dictionary")] /// # #[cfg(feature = "std_rng")] /// petnames.generate_one(2, "."); /// ``` /// /// This is merely a convenience wrapper that applies the same predicate to /// the adjectives, adverbs, and names lists. /// pub fn retain<F>(&mut self, mut predicate: F) where F: FnMut(&str) -> bool, { self.adjectives.to_mut().retain(|word| predicate(word)); self.adverbs.to_mut().retain(|word| predicate(word)); self.names.to_mut().retain(|word| predicate(word)); } /// Calculate the cardinality of this `Petnames`. /// /// If this is low, names may be repeated by the generator with a higher /// frequency than your use-case may allow. If it is 0 (zero) the generator /// will panic (unless `words` is also zero). /// /// This can saturate. If the total possible combinations of words exceeds /// `u128::MAX` then this will return `u128::MAX`. pub fn cardinality(&self, words: u8) -> u128 { Lists::new(words) .map(|list| match list { List::Adverb => self.adverbs.len() as u128, List::Adjective => self.adjectives.len() as u128, List::Name => self.names.len() as u128, }) .reduce(u128::saturating_mul) .unwrap_or(0u128) } /// Generate a new petname. /// /// # Examples /// /// ```rust /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut rng = rand::thread_rng(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// petname::Petnames::default().generate(&mut rng, 7, ":"); /// ``` /// /// # Notes /// /// This may return fewer words than you request if one or more of the word /// lists are empty. For example, if there are no adverbs, requesting 3 or /// more words may still yield only "doubtful-salmon". /// pub fn generate<RNG>(&self, rng: &mut RNG, words: u8, separator: &str) -> String where RNG: rand::Rng, { Itertools::intersperse( Lists::new(words).filter_map(|list| match list { List::Adverb => self.adverbs.choose(rng).copied(), List::Adjective => self.adjectives.choose(rng).copied(), List::Name => self.names.choose(rng).copied(), }), separator, ) .collect::<String>() } /// Generate a single new petname. /// /// This is like `generate` but uses `rand::thread_rng` as the random /// source. For efficiency use `generate` when creating multiple names, or /// when you want to use a custom source of randomness. #[cfg(feature = "std_rng")] pub fn generate_one(&self, words: u8, separator: &str) -> String { self.generate(&mut rand::thread_rng(), words, separator) } /// Iterator yielding petnames. /// /// # Examples /// /// ```rust /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut rng = rand::thread_rng(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let petnames = petname::Petnames::default(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut iter = petnames.iter(&mut rng, 4, "_"); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// println!("name: {}", iter.next().unwrap()); /// ``` /// pub fn
<RNG>(&'a self, rng: &'a mut RNG, words: u8, separator: &str) -> Names<'a, RNG> where RNG: rand::Rng, { Names { petnames: self, rng, words, separator: separator.to_string() } } /// Iterator yielding unique – i.e. non-repeating – petnames. /// /// # Examples /// /// ```rust /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut rng = rand::thread_rng(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let petnames = petname::Petnames::default(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut iter = petnames.iter_non_repeating(&mut rng, 4, "_"); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// println!("name: {}", iter.next().unwrap()); /// ``` /// pub fn iter_non_repeating<RNG>( &'a self, rng: &'a mut RNG, words: u8, separator: &str, ) -> impl Iterator<Item = String> + 'a where RNG: rand::Rng, { let lists: Vec<&'a Words<'a>> = Lists::new(words) .map(|list| match list { List::Adverb => &self.adverbs, List::Adjective => &self.adjectives, List::Name => &self.names, }) .collect(); NamesProduct::shuffled(&lists, rng, separator) } } #[cfg(feature = "default_dictionary")] impl<'a> Default for Petnames<'a> { fn default() -> Self { Self::small() } } /// Enum representing which word list to use. #[derive(Debug, PartialEq)] enum List { Adverb, Adjective, Name, } /// Iterator, yielding which word list to use next. /// /// This yields the appropriate list – [adverbs][List::Adverb], /// [adjectives][List::Adjective]s, [names][List::Name] – from which to select /// a word when constructing a petname of `n` words. For example, if you want 4 /// words in your petname, this will first yield [List::Adverb], then /// [List::Adverb] again, then [List::Adjective], and lastly [List::Name]. #[derive(Debug, PartialEq)] enum Lists { Adverb(u8), Adjective, Name, Done, } impl Lists { fn new(words: u8) -> Self { match words { 0 => Self::Done, 1 => Self::Name, 2 => Self::Adjective, n => Self::Adverb(n - 3), } } fn advance(&mut self) { *self = match self { Self::Adverb(0) => Self::Adjective, Self::Adverb(remaining) => Self::Adverb(*remaining - 1), Self::Adjective => Self::Name, Self::Name | Self::Done => Self::Done, } } } impl Iterator for Lists { type Item = List; fn next(&mut self) -> Option<Self::Item> { let list = match self { Self::Adjective => Some(List::Adjective), Self::Adverb(_) => Some(List::Adverb), Self::Name => Some(List::Name), Self::Done => None, }; self.advance(); list } fn size_hint(&self) -> (usize, Option<usize>) { let remains = match self { Self::Adverb(n) => (n + 3) as usize, Self::Adjective => 2, Self::Name => 1, Self::Done => 0, }; (remains, Some(remains)) } } /// Iterator yielding petnames. pub struct Names<'a, RNG> where RNG: rand::Rng, { petnames: &'a Petnames<'a>, rng: &'a mut RNG, words: u8, separator: String, } impl<'a, RNG> Names<'a, RNG> where RNG: rand::Rng, { /// Calculate the cardinality of this iterator; see `Petnames::cardinality`. #[allow(dead_code)] pub fn cardinality(&self) -> u128 { self.petnames.cardinality(self.words) } } impl<'a, RNG> Iterator for Names<'a, RNG> where RNG: rand::Rng, { type Item = String; fn next(&mut self) -> Option<Self::Item> { Some(self.petnames.generate(self.rng, self.words, &self.separator)) } } /// Iterator yielding petnames from the product of given word lists. /// /// This can be used to ensure that only unique names are produced. struct NamesProduct<'a, ITERATOR> where ITERATOR: Iterator<Item = Option<&'a str>>, { iters: Vec<(ITERATOR, Option<&'a str>)>, separator: String, capacity: usize, } impl<'a> NamesProduct<'a, core::iter::Cycle<alloc::vec::IntoIter<Option<&'a str>>>> { /// Shuffles each of the given `lists` with `rng`, then cycles through the /// product of the lists, joining with `separator`. The leftmost list will /// cycle most rapidly. fn shuffled<RNG>(lists: &[&'a Words<'a>], rng: &'a mut RNG, separator: &str) -> Self where RNG: rand::Rng, { NamesProduct { iters: lists .iter() .map(|words| { let mut list: Vec<Option<&'a str>> = Vec::with_capacity(words.len().saturating_add(1)); list.extend(words.iter().map(|word| Some(*word))); list.shuffle(rng); // Could be expensive. list.push(None); // Cycle marker. (list.into_iter().cycle(), None) }) .collect(), separator: separator.to_string(), capacity: Self::capacity(lists, separator), } } fn capacity(lists: &[&'a Words<'a>], separator: &str) -> usize { ( // Sum of the length of the longest possible word in each word list. lists .iter() .filter_map(|words| words.iter().map(|word| word.len()).max()) .fold(0usize, |sum, len| sum.saturating_add(len)) // The total length of all separators. Careful not to wrap usize. + (separator.len().saturating_mul(lists.len().saturating_sub(1))) ) // Things run _much_ quicker when the capacity is a power of 2. Memory // alignment? If so it may be enough to align at, say, 8 bytes, but this // works for now. .checked_next_power_of_two() // In case there are no lists, or they're all empty... or we have // calculated that we need more than usize::MAX capacity. .unwrap_or(0) } } impl<'a, ITERATOR> Iterator for NamesProduct<'a, ITERATOR> where ITERATOR: Iterator<Item = Option<&'a str>>, { type Item = String; fn next(&mut self) -> Option<Self::Item> { let mut bump = true; // Request advance of next iterator. for (iter, word) in self.iters.iter_mut() { if bump || word.is_none() { match iter.next() { None => { // This shouldn't happen because we expect the iterators // to cycle. However, if it does, we're definitely done. return None; } Some(None) => { // This is the cycle end marker. We want to get another // new word from this iterator, and advance the *next* // iterator too. match iter.next() { None => return None, Some(None) => return None, Some(s) => *word = s, } bump = true } Some(s) => { // We have a new word from this iterator, so we do not // yet need to advance the next iterator. *word = s; bump = false } } } } if bump { // We reached the end of the last iterator, hence we're done. None } else { // We may be able to construct a word! self.iters.iter().try_fold(String::with_capacity(self.capacity), |acc, (_, w)| match (acc, *w) { (s, Some(w)) if s.is_empty() => Some(s + w), (s, Some(w)) => Some(s + &self.separator + w), _ => None, }) } } } #[cfg(test)] mod tests { #[test] fn lists_sequences_adverbs_adjectives_then_names() { let mut lists = super::Lists::new(4); assert_eq!(super::Lists::Adverb(1), lists); assert_eq!(Some(super::List::Adverb), lists.next()); assert_eq!(super::Lists::Adverb(0), lists); assert_eq!(Some(super::List::Adverb), lists.next()); assert_eq!(super::Lists::Adjective, lists); assert_eq!(Some(super::List::Adjective), lists.next()); assert_eq!(super::Lists::Name, lists); assert_eq!(Some(super::List::Name), lists.next()); assert_eq!(super::Lists::Done, lists); assert_eq!(None, lists.next()); } #[test] fn lists_size_hint() { let mut lists = super::Lists::new(3); assert_eq!((3, Some(3)), lists.size_hint()); assert!(lists.next().is_some()); assert_eq!((2, Some(2)), lists.size_hint()); assert!(lists.next().is_some()); assert_eq!((1, Some(1)), lists.size_hint()); assert!(lists.next().is_some()); assert_eq!((0, Some(0)), lists.size_hint()); assert_eq!(None, lists.next()); assert_eq!((0, Some(0)), lists.size_hint()); } }
iter
identifier_name
lib.rs
#![no_std] //! //! You can populate [`Petnames`] with your own word lists, but the word lists //! from upstream [petname](https://github.com/dustinkirkland/petname) are //! included with the `default_dictionary` feature (enabled by default). See //! [`Petnames::small`], [`Petnames::medium`], and [`Petnames::large`] to select //! a particular built-in word list, or use the [`Default`] implementation. //! //! The other thing you need is a random number generator from [rand][]: //! //! ```rust //! # #[cfg(feature = "std_rng")] //! let mut rng = rand::thread_rng(); //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let pname = petname::Petnames::default().generate(&mut rng, 7, ":"); //! ``` //! //! It may be more convenient to use the default random number generator: //! //! ```rust //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let pname = petname::Petnames::default().generate_one(7, ":"); //! ``` //! //! There's a [convenience function][petname] that'll do all of this: //! //! ```rust //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let pname = petname::petname(7, ":"); //! ``` //! //! But the most flexible approach is to create an [`Iterator`] with //! [`iter`][`Petnames::iter`]: //! //! ```rust //! # #[cfg(feature = "std_rng")] //! let mut rng = rand::thread_rng(); //! # #[cfg(feature = "default_dictionary")] //! let petnames = petname::Petnames::default(); //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let ten_thousand_names: Vec<String> = //! petnames.iter(&mut rng, 3, "_").take(10000).collect(); //! ``` //! //! You can modify the word lists to, for example, only use words beginning with //! the letter "b": //! //! ```rust //! # #[cfg(feature = "default_dictionary")] //! let mut petnames = petname::Petnames::default(); //! # #[cfg(feature = "default_dictionary")] //! petnames.retain(|s| s.starts_with("b")); //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! petnames.generate_one(3, "."); //! ``` //! extern crate alloc; use alloc::{ borrow::Cow, string::{String, ToString}, vec::Vec, }; use itertools::Itertools; use rand::seq::SliceRandom; /// Convenience function to generate a new petname from default word lists. #[allow(dead_code)] #[cfg(feature = "std_rng")] #[cfg(feature = "default_dictionary")] pub fn petname(words: u8, separator: &str) -> String { Petnames::new().generate_one(words, separator) } /// A word list. pub type Words<'a> = Cow<'a, [&'a str]>; /// Word lists and the logic to combine them into _petnames_. /// /// A _petname_ with `n` words will contain, in order: /// /// * `n - 2` adverbs when `n >= 2`, otherwise 0 adverbs. /// * 1 adjective when `n >= 2`, otherwise 0 adjectives. /// * 1 name / noun when `n >= 1`, otherwise 0 names. /// #[derive(Clone, Debug, Eq, PartialEq)] pub struct Petnames<'a> { pub adjectives: Words<'a>, pub adverbs: Words<'a>, pub names: Words<'a>, } #[cfg(feature = "default_dictionary")] mod words { include!(concat!(env!("OUT_DIR"), "/words.rs")); } impl<'a> Petnames<'a> { /// Constructs a new `Petnames` from the default (small) word lists. #[cfg(feature = "default_dictionary")] pub fn new() -> Self { Self::default() } /// Constructs a new `Petnames` from the small word lists. #[cfg(feature = "default_dictionary")] pub fn small() -> Self { Self { adjectives: Cow::from(&words::small::ADJECTIVES[..]), adverbs: Cow::from(&words::small::ADVERBS[..]), names: Cow::from(&words::small::NAMES[..]), } } /// Constructs a new `Petnames` from the medium word lists. #[cfg(feature = "default_dictionary")] pub fn medium() -> Self { Self { adjectives: Cow::from(&words::medium::ADJECTIVES[..]), adverbs: Cow::from(&words::medium::ADVERBS[..]), names: Cow::from(&words::medium::NAMES[..]), } } /// Constructs a new `Petnames` from the large word lists. #[cfg(feature = "default_dictionary")] pub fn large() -> Self { Self { adjectives: Cow::from(&words::large::ADJECTIVES[..]), adverbs: Cow::from(&words::large::ADVERBS[..]), names: Cow::from(&words::large::NAMES[..]), } } /// Constructs a new `Petnames` from the given word lists. /// /// The words are extracted from the given strings by splitting on whitespace. pub fn init(adjectives: &'a str, adverbs: &'a str, names: &'a str) -> Self { Self { adjectives: Cow::Owned(adjectives.split_whitespace().collect::<Vec<_>>()), adverbs: Cow::Owned(adverbs.split_whitespace().collect::<Vec<_>>()), names: Cow::Owned(names.split_whitespace().collect::<Vec<_>>()), } } /// Keep words matching a predicate. /// /// # Examples /// /// ```rust /// # #[cfg(feature = "default_dictionary")] /// let mut petnames = petname::Petnames::default(); /// # #[cfg(feature = "default_dictionary")] /// petnames.retain(|s| s.starts_with("b")); /// # #[cfg(feature = "default_dictionary")] /// # #[cfg(feature = "std_rng")] /// petnames.generate_one(2, "."); /// ``` /// /// This is merely a convenience wrapper that applies the same predicate to
{ self.adjectives.to_mut().retain(|word| predicate(word)); self.adverbs.to_mut().retain(|word| predicate(word)); self.names.to_mut().retain(|word| predicate(word)); } /// Calculate the cardinality of this `Petnames`. /// /// If this is low, names may be repeated by the generator with a higher /// frequency than your use-case may allow. If it is 0 (zero) the generator /// will panic (unless `words` is also zero). /// /// This can saturate. If the total possible combinations of words exceeds /// `u128::MAX` then this will return `u128::MAX`. pub fn cardinality(&self, words: u8) -> u128 { Lists::new(words) .map(|list| match list { List::Adverb => self.adverbs.len() as u128, List::Adjective => self.adjectives.len() as u128, List::Name => self.names.len() as u128, }) .reduce(u128::saturating_mul) .unwrap_or(0u128) } /// Generate a new petname. /// /// # Examples /// /// ```rust /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut rng = rand::thread_rng(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// petname::Petnames::default().generate(&mut rng, 7, ":"); /// ``` /// /// # Notes /// /// This may return fewer words than you request if one or more of the word /// lists are empty. For example, if there are no adverbs, requesting 3 or /// more words may still yield only "doubtful-salmon". /// pub fn generate<RNG>(&self, rng: &mut RNG, words: u8, separator: &str) -> String where RNG: rand::Rng, { Itertools::intersperse( Lists::new(words).filter_map(|list| match list { List::Adverb => self.adverbs.choose(rng).copied(), List::Adjective => self.adjectives.choose(rng).copied(), List::Name => self.names.choose(rng).copied(), }), separator, ) .collect::<String>() } /// Generate a single new petname. /// /// This is like `generate` but uses `rand::thread_rng` as the random /// source. For efficiency use `generate` when creating multiple names, or /// when you want to use a custom source of randomness. #[cfg(feature = "std_rng")] pub fn generate_one(&self, words: u8, separator: &str) -> String { self.generate(&mut rand::thread_rng(), words, separator) } /// Iterator yielding petnames. /// /// # Examples /// /// ```rust /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut rng = rand::thread_rng(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let petnames = petname::Petnames::default(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut iter = petnames.iter(&mut rng, 4, "_"); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// println!("name: {}", iter.next().unwrap()); /// ``` /// pub fn iter<RNG>(&'a self, rng: &'a mut RNG, words: u8, separator: &str) -> Names<'a, RNG> where RNG: rand::Rng, { Names { petnames: self, rng, words, separator: separator.to_string() } } /// Iterator yielding unique – i.e. non-repeating – petnames. /// /// # Examples /// /// ```rust /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut rng = rand::thread_rng(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let petnames = petname::Petnames::default(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut iter = petnames.iter_non_repeating(&mut rng, 4, "_"); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// println!("name: {}", iter.next().unwrap()); /// ``` /// pub fn iter_non_repeating<RNG>( &'a self, rng: &'a mut RNG, words: u8, separator: &str, ) -> impl Iterator<Item = String> + 'a where RNG: rand::Rng, { let lists: Vec<&'a Words<'a>> = Lists::new(words) .map(|list| match list { List::Adverb => &self.adverbs, List::Adjective => &self.adjectives, List::Name => &self.names, }) .collect(); NamesProduct::shuffled(&lists, rng, separator) } } #[cfg(feature = "default_dictionary")] impl<'a> Default for Petnames<'a> { fn default() -> Self { Self::small() } } /// Enum representing which word list to use. #[derive(Debug, PartialEq)] enum List { Adverb, Adjective, Name, } /// Iterator, yielding which word list to use next. /// /// This yields the appropriate list – [adverbs][List::Adverb], /// [adjectives][List::Adjective]s, [names][List::Name] – from which to select /// a word when constructing a petname of `n` words. For example, if you want 4 /// words in your petname, this will first yield [List::Adverb], then /// [List::Adverb] again, then [List::Adjective], and lastly [List::Name]. #[derive(Debug, PartialEq)] enum Lists { Adverb(u8), Adjective, Name, Done, } impl Lists { fn new(words: u8) -> Self { match words { 0 => Self::Done, 1 => Self::Name, 2 => Self::Adjective, n => Self::Adverb(n - 3), } } fn advance(&mut self) { *self = match self { Self::Adverb(0) => Self::Adjective, Self::Adverb(remaining) => Self::Adverb(*remaining - 1), Self::Adjective => Self::Name, Self::Name | Self::Done => Self::Done, } } } impl Iterator for Lists { type Item = List; fn next(&mut self) -> Option<Self::Item> { let list = match self { Self::Adjective => Some(List::Adjective), Self::Adverb(_) => Some(List::Adverb), Self::Name => Some(List::Name), Self::Done => None, }; self.advance(); list } fn size_hint(&self) -> (usize, Option<usize>) { let remains = match self { Self::Adverb(n) => (n + 3) as usize, Self::Adjective => 2, Self::Name => 1, Self::Done => 0, }; (remains, Some(remains)) } } /// Iterator yielding petnames. pub struct Names<'a, RNG> where RNG: rand::Rng, { petnames: &'a Petnames<'a>, rng: &'a mut RNG, words: u8, separator: String, } impl<'a, RNG> Names<'a, RNG> where RNG: rand::Rng, { /// Calculate the cardinality of this iterator; see `Petnames::cardinality`. #[allow(dead_code)] pub fn cardinality(&self) -> u128 { self.petnames.cardinality(self.words) } } impl<'a, RNG> Iterator for Names<'a, RNG> where RNG: rand::Rng, { type Item = String; fn next(&mut self) -> Option<Self::Item> { Some(self.petnames.generate(self.rng, self.words, &self.separator)) } } /// Iterator yielding petnames from the product of given word lists. /// /// This can be used to ensure that only unique names are produced. struct NamesProduct<'a, ITERATOR> where ITERATOR: Iterator<Item = Option<&'a str>>, { iters: Vec<(ITERATOR, Option<&'a str>)>, separator: String, capacity: usize, } impl<'a> NamesProduct<'a, core::iter::Cycle<alloc::vec::IntoIter<Option<&'a str>>>> { /// Shuffles each of the given `lists` with `rng`, then cycles through the /// product of the lists, joining with `separator`. The leftmost list will /// cycle most rapidly. fn shuffled<RNG>(lists: &[&'a Words<'a>], rng: &'a mut RNG, separator: &str) -> Self where RNG: rand::Rng, { NamesProduct { iters: lists .iter() .map(|words| { let mut list: Vec<Option<&'a str>> = Vec::with_capacity(words.len().saturating_add(1)); list.extend(words.iter().map(|word| Some(*word))); list.shuffle(rng); // Could be expensive. list.push(None); // Cycle marker. (list.into_iter().cycle(), None) }) .collect(), separator: separator.to_string(), capacity: Self::capacity(lists, separator), } } fn capacity(lists: &[&'a Words<'a>], separator: &str) -> usize { ( // Sum of the length of the longest possible word in each word list. lists .iter() .filter_map(|words| words.iter().map(|word| word.len()).max()) .fold(0usize, |sum, len| sum.saturating_add(len)) // The total length of all separators. Careful not to wrap usize. + (separator.len().saturating_mul(lists.len().saturating_sub(1))) ) // Things run _much_ quicker when the capacity is a power of 2. Memory // alignment? If so it may be enough to align at, say, 8 bytes, but this // works for now. .checked_next_power_of_two() // In case there are no lists, or they're all empty... or we have // calculated that we need more than usize::MAX capacity. .unwrap_or(0) } } impl<'a, ITERATOR> Iterator for NamesProduct<'a, ITERATOR> where ITERATOR: Iterator<Item = Option<&'a str>>, { type Item = String; fn next(&mut self) -> Option<Self::Item> { let mut bump = true; // Request advance of next iterator. for (iter, word) in self.iters.iter_mut() { if bump || word.is_none() { match iter.next() { None => { // This shouldn't happen because we expect the iterators // to cycle. However, if it does, we're definitely done. return None; } Some(None) => { // This is the cycle end marker. We want to get another // new word from this iterator, and advance the *next* // iterator too. match iter.next() { None => return None, Some(None) => return None, Some(s) => *word = s, } bump = true } Some(s) => { // We have a new word from this iterator, so we do not // yet need to advance the next iterator. *word = s; bump = false } } } } if bump { // We reached the end of the last iterator, hence we're done. None } else { // We may be able to construct a word! self.iters.iter().try_fold(String::with_capacity(self.capacity), |acc, (_, w)| match (acc, *w) { (s, Some(w)) if s.is_empty() => Some(s + w), (s, Some(w)) => Some(s + &self.separator + w), _ => None, }) } } } #[cfg(test)] mod tests { #[test] fn lists_sequences_adverbs_adjectives_then_names() { let mut lists = super::Lists::new(4); assert_eq!(super::Lists::Adverb(1), lists); assert_eq!(Some(super::List::Adverb), lists.next()); assert_eq!(super::Lists::Adverb(0), lists); assert_eq!(Some(super::List::Adverb), lists.next()); assert_eq!(super::Lists::Adjective, lists); assert_eq!(Some(super::List::Adjective), lists.next()); assert_eq!(super::Lists::Name, lists); assert_eq!(Some(super::List::Name), lists.next()); assert_eq!(super::Lists::Done, lists); assert_eq!(None, lists.next()); } #[test] fn lists_size_hint() { let mut lists = super::Lists::new(3); assert_eq!((3, Some(3)), lists.size_hint()); assert!(lists.next().is_some()); assert_eq!((2, Some(2)), lists.size_hint()); assert!(lists.next().is_some()); assert_eq!((1, Some(1)), lists.size_hint()); assert!(lists.next().is_some()); assert_eq!((0, Some(0)), lists.size_hint()); assert_eq!(None, lists.next()); assert_eq!((0, Some(0)), lists.size_hint()); } }
/// the adjectives, adverbs, and names lists. /// pub fn retain<F>(&mut self, mut predicate: F) where F: FnMut(&str) -> bool,
random_line_split
lib.rs
#![no_std] //! //! You can populate [`Petnames`] with your own word lists, but the word lists //! from upstream [petname](https://github.com/dustinkirkland/petname) are //! included with the `default_dictionary` feature (enabled by default). See //! [`Petnames::small`], [`Petnames::medium`], and [`Petnames::large`] to select //! a particular built-in word list, or use the [`Default`] implementation. //! //! The other thing you need is a random number generator from [rand][]: //! //! ```rust //! # #[cfg(feature = "std_rng")] //! let mut rng = rand::thread_rng(); //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let pname = petname::Petnames::default().generate(&mut rng, 7, ":"); //! ``` //! //! It may be more convenient to use the default random number generator: //! //! ```rust //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let pname = petname::Petnames::default().generate_one(7, ":"); //! ``` //! //! There's a [convenience function][petname] that'll do all of this: //! //! ```rust //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let pname = petname::petname(7, ":"); //! ``` //! //! But the most flexible approach is to create an [`Iterator`] with //! [`iter`][`Petnames::iter`]: //! //! ```rust //! # #[cfg(feature = "std_rng")] //! let mut rng = rand::thread_rng(); //! # #[cfg(feature = "default_dictionary")] //! let petnames = petname::Petnames::default(); //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let ten_thousand_names: Vec<String> = //! petnames.iter(&mut rng, 3, "_").take(10000).collect(); //! ``` //! //! You can modify the word lists to, for example, only use words beginning with //! the letter "b": //! //! ```rust //! # #[cfg(feature = "default_dictionary")] //! let mut petnames = petname::Petnames::default(); //! # #[cfg(feature = "default_dictionary")] //! petnames.retain(|s| s.starts_with("b")); //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! petnames.generate_one(3, "."); //! ``` //! extern crate alloc; use alloc::{ borrow::Cow, string::{String, ToString}, vec::Vec, }; use itertools::Itertools; use rand::seq::SliceRandom; /// Convenience function to generate a new petname from default word lists. #[allow(dead_code)] #[cfg(feature = "std_rng")] #[cfg(feature = "default_dictionary")] pub fn petname(words: u8, separator: &str) -> String { Petnames::new().generate_one(words, separator) } /// A word list. pub type Words<'a> = Cow<'a, [&'a str]>; /// Word lists and the logic to combine them into _petnames_. /// /// A _petname_ with `n` words will contain, in order: /// /// * `n - 2` adverbs when `n >= 2`, otherwise 0 adverbs. /// * 1 adjective when `n >= 2`, otherwise 0 adjectives. /// * 1 name / noun when `n >= 1`, otherwise 0 names. /// #[derive(Clone, Debug, Eq, PartialEq)] pub struct Petnames<'a> { pub adjectives: Words<'a>, pub adverbs: Words<'a>, pub names: Words<'a>, } #[cfg(feature = "default_dictionary")] mod words { include!(concat!(env!("OUT_DIR"), "/words.rs")); } impl<'a> Petnames<'a> { /// Constructs a new `Petnames` from the default (small) word lists. #[cfg(feature = "default_dictionary")] pub fn new() -> Self { Self::default() } /// Constructs a new `Petnames` from the small word lists. #[cfg(feature = "default_dictionary")] pub fn small() -> Self { Self { adjectives: Cow::from(&words::small::ADJECTIVES[..]), adverbs: Cow::from(&words::small::ADVERBS[..]), names: Cow::from(&words::small::NAMES[..]), } } /// Constructs a new `Petnames` from the medium word lists. #[cfg(feature = "default_dictionary")] pub fn medium() -> Self { Self { adjectives: Cow::from(&words::medium::ADJECTIVES[..]), adverbs: Cow::from(&words::medium::ADVERBS[..]), names: Cow::from(&words::medium::NAMES[..]), } } /// Constructs a new `Petnames` from the large word lists. #[cfg(feature = "default_dictionary")] pub fn large() -> Self { Self { adjectives: Cow::from(&words::large::ADJECTIVES[..]), adverbs: Cow::from(&words::large::ADVERBS[..]), names: Cow::from(&words::large::NAMES[..]), } } /// Constructs a new `Petnames` from the given word lists. /// /// The words are extracted from the given strings by splitting on whitespace. pub fn init(adjectives: &'a str, adverbs: &'a str, names: &'a str) -> Self { Self { adjectives: Cow::Owned(adjectives.split_whitespace().collect::<Vec<_>>()), adverbs: Cow::Owned(adverbs.split_whitespace().collect::<Vec<_>>()), names: Cow::Owned(names.split_whitespace().collect::<Vec<_>>()), } } /// Keep words matching a predicate. /// /// # Examples /// /// ```rust /// # #[cfg(feature = "default_dictionary")] /// let mut petnames = petname::Petnames::default(); /// # #[cfg(feature = "default_dictionary")] /// petnames.retain(|s| s.starts_with("b")); /// # #[cfg(feature = "default_dictionary")] /// # #[cfg(feature = "std_rng")] /// petnames.generate_one(2, "."); /// ``` /// /// This is merely a convenience wrapper that applies the same predicate to /// the adjectives, adverbs, and names lists. /// pub fn retain<F>(&mut self, mut predicate: F) where F: FnMut(&str) -> bool, { self.adjectives.to_mut().retain(|word| predicate(word)); self.adverbs.to_mut().retain(|word| predicate(word)); self.names.to_mut().retain(|word| predicate(word)); } /// Calculate the cardinality of this `Petnames`. /// /// If this is low, names may be repeated by the generator with a higher /// frequency than your use-case may allow. If it is 0 (zero) the generator /// will panic (unless `words` is also zero). /// /// This can saturate. If the total possible combinations of words exceeds /// `u128::MAX` then this will return `u128::MAX`. pub fn cardinality(&self, words: u8) -> u128 { Lists::new(words) .map(|list| match list { List::Adverb => self.adverbs.len() as u128, List::Adjective => self.adjectives.len() as u128, List::Name => self.names.len() as u128, }) .reduce(u128::saturating_mul) .unwrap_or(0u128) } /// Generate a new petname. /// /// # Examples /// /// ```rust /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut rng = rand::thread_rng(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// petname::Petnames::default().generate(&mut rng, 7, ":"); /// ``` /// /// # Notes /// /// This may return fewer words than you request if one or more of the word /// lists are empty. For example, if there are no adverbs, requesting 3 or /// more words may still yield only "doubtful-salmon". /// pub fn generate<RNG>(&self, rng: &mut RNG, words: u8, separator: &str) -> String where RNG: rand::Rng, { Itertools::intersperse( Lists::new(words).filter_map(|list| match list { List::Adverb => self.adverbs.choose(rng).copied(), List::Adjective => self.adjectives.choose(rng).copied(), List::Name => self.names.choose(rng).copied(), }), separator, ) .collect::<String>() } /// Generate a single new petname. /// /// This is like `generate` but uses `rand::thread_rng` as the random /// source. For efficiency use `generate` when creating multiple names, or /// when you want to use a custom source of randomness. #[cfg(feature = "std_rng")] pub fn generate_one(&self, words: u8, separator: &str) -> String { self.generate(&mut rand::thread_rng(), words, separator) } /// Iterator yielding petnames. /// /// # Examples /// /// ```rust /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut rng = rand::thread_rng(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let petnames = petname::Petnames::default(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut iter = petnames.iter(&mut rng, 4, "_"); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// println!("name: {}", iter.next().unwrap()); /// ``` /// pub fn iter<RNG>(&'a self, rng: &'a mut RNG, words: u8, separator: &str) -> Names<'a, RNG> where RNG: rand::Rng, { Names { petnames: self, rng, words, separator: separator.to_string() } } /// Iterator yielding unique – i.e. non-repeating – petnames. /// /// # Examples /// /// ```rust /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut rng = rand::thread_rng(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let petnames = petname::Petnames::default(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut iter = petnames.iter_non_repeating(&mut rng, 4, "_"); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// println!("name: {}", iter.next().unwrap()); /// ``` /// pub fn iter_non_repeating<RNG>( &'a self, rng: &'a mut RNG, words: u8, separator: &str, ) -> impl Iterator<Item = String> + 'a where RNG: rand::Rng, { let lists: Vec<&'a Words<'a>> = Lists::new(words) .map(|list| match list { List::Adverb => &self.adverbs, List::Adjective => &self.adjectives, List::Name => &self.names, }) .collect(); NamesProduct::shuffled(&lists, rng, separator) } } #[cfg(feature = "default_dictionary")] impl<'a> Default for Petnames<'a> { fn default() -> Self { Self::small() } } /// Enum representing which word list to use. #[derive(Debug, PartialEq)] enum List { Adverb, Adjective, Name, } /// Iterator, yielding which word list to use next. /// /// This yields the appropriate list – [adverbs][List::Adverb], /// [adjectives][List::Adjective]s, [names][List::Name] – from which to select /// a word when constructing a petname of `n` words. For example, if you want 4 /// words in your petname, this will first yield [List::Adverb], then /// [List::Adverb] again, then [List::Adjective], and lastly [List::Name]. #[derive(Debug, PartialEq)] enum Lists { Adverb(u8), Adjective, Name, Done, } impl Lists { fn new(words: u8) -> Self { match words { 0 => Self::Done, 1 => Self::Name, 2 => Self::Adjective, n => Self::Adverb(n - 3), } } fn advance(&mut self) { *self = match self { Self::Adverb(0) => Self::Adjective, Self::Adverb(remaining) => Self::Adverb(*remaining - 1), Self::Adjective => Self::Name, Self::Name | Self::Done => Self::Done, } } } impl Iterator for Lists { type Item = List; fn next(&mut self) -> Option<Self::Item> {
size_hint(&self) -> (usize, Option<usize>) { let remains = match self { Self::Adverb(n) => (n + 3) as usize, Self::Adjective => 2, Self::Name => 1, Self::Done => 0, }; (remains, Some(remains)) } } /// Iterator yielding petnames. pub struct Names<'a, RNG> where RNG: rand::Rng, { petnames: &'a Petnames<'a>, rng: &'a mut RNG, words: u8, separator: String, } impl<'a, RNG> Names<'a, RNG> where RNG: rand::Rng, { /// Calculate the cardinality of this iterator; see `Petnames::cardinality`. #[allow(dead_code)] pub fn cardinality(&self) -> u128 { self.petnames.cardinality(self.words) } } impl<'a, RNG> Iterator for Names<'a, RNG> where RNG: rand::Rng, { type Item = String; fn next(&mut self) -> Option<Self::Item> { Some(self.petnames.generate(self.rng, self.words, &self.separator)) } } /// Iterator yielding petnames from the product of given word lists. /// /// This can be used to ensure that only unique names are produced. struct NamesProduct<'a, ITERATOR> where ITERATOR: Iterator<Item = Option<&'a str>>, { iters: Vec<(ITERATOR, Option<&'a str>)>, separator: String, capacity: usize, } impl<'a> NamesProduct<'a, core::iter::Cycle<alloc::vec::IntoIter<Option<&'a str>>>> { /// Shuffles each of the given `lists` with `rng`, then cycles through the /// product of the lists, joining with `separator`. The leftmost list will /// cycle most rapidly. fn shuffled<RNG>(lists: &[&'a Words<'a>], rng: &'a mut RNG, separator: &str) -> Self where RNG: rand::Rng, { NamesProduct { iters: lists .iter() .map(|words| { let mut list: Vec<Option<&'a str>> = Vec::with_capacity(words.len().saturating_add(1)); list.extend(words.iter().map(|word| Some(*word))); list.shuffle(rng); // Could be expensive. list.push(None); // Cycle marker. (list.into_iter().cycle(), None) }) .collect(), separator: separator.to_string(), capacity: Self::capacity(lists, separator), } } fn capacity(lists: &[&'a Words<'a>], separator: &str) -> usize { ( // Sum of the length of the longest possible word in each word list. lists .iter() .filter_map(|words| words.iter().map(|word| word.len()).max()) .fold(0usize, |sum, len| sum.saturating_add(len)) // The total length of all separators. Careful not to wrap usize. + (separator.len().saturating_mul(lists.len().saturating_sub(1))) ) // Things run _much_ quicker when the capacity is a power of 2. Memory // alignment? If so it may be enough to align at, say, 8 bytes, but this // works for now. .checked_next_power_of_two() // In case there are no lists, or they're all empty... or we have // calculated that we need more than usize::MAX capacity. .unwrap_or(0) } } impl<'a, ITERATOR> Iterator for NamesProduct<'a, ITERATOR> where ITERATOR: Iterator<Item = Option<&'a str>>, { type Item = String; fn next(&mut self) -> Option<Self::Item> { let mut bump = true; // Request advance of next iterator. for (iter, word) in self.iters.iter_mut() { if bump || word.is_none() { match iter.next() { None => { // This shouldn't happen because we expect the iterators // to cycle. However, if it does, we're definitely done. return None; } Some(None) => { // This is the cycle end marker. We want to get another // new word from this iterator, and advance the *next* // iterator too. match iter.next() { None => return None, Some(None) => return None, Some(s) => *word = s, } bump = true } Some(s) => { // We have a new word from this iterator, so we do not // yet need to advance the next iterator. *word = s; bump = false } } } } if bump { // We reached the end of the last iterator, hence we're done. None } else { // We may be able to construct a word! self.iters.iter().try_fold(String::with_capacity(self.capacity), |acc, (_, w)| match (acc, *w) { (s, Some(w)) if s.is_empty() => Some(s + w), (s, Some(w)) => Some(s + &self.separator + w), _ => None, }) } } } #[cfg(test)] mod tests { #[test] fn lists_sequences_adverbs_adjectives_then_names() { let mut lists = super::Lists::new(4); assert_eq!(super::Lists::Adverb(1), lists); assert_eq!(Some(super::List::Adverb), lists.next()); assert_eq!(super::Lists::Adverb(0), lists); assert_eq!(Some(super::List::Adverb), lists.next()); assert_eq!(super::Lists::Adjective, lists); assert_eq!(Some(super::List::Adjective), lists.next()); assert_eq!(super::Lists::Name, lists); assert_eq!(Some(super::List::Name), lists.next()); assert_eq!(super::Lists::Done, lists); assert_eq!(None, lists.next()); } #[test] fn lists_size_hint() { let mut lists = super::Lists::new(3); assert_eq!((3, Some(3)), lists.size_hint()); assert!(lists.next().is_some()); assert_eq!((2, Some(2)), lists.size_hint()); assert!(lists.next().is_some()); assert_eq!((1, Some(1)), lists.size_hint()); assert!(lists.next().is_some()); assert_eq!((0, Some(0)), lists.size_hint()); assert_eq!(None, lists.next()); assert_eq!((0, Some(0)), lists.size_hint()); } }
let list = match self { Self::Adjective => Some(List::Adjective), Self::Adverb(_) => Some(List::Adverb), Self::Name => Some(List::Name), Self::Done => None, }; self.advance(); list } fn
identifier_body
main.rs
// main struggle problems in this section were 11 and 18, and to some extent, 12 and 14. 17 was annoying to debug, but not hard. extern crate timings_proc_macro; use timings_proc_macro::timings; #[timings] fn
() { let s: Vec<usize> = std::fs::read_to_string("src/e11.txt") .unwrap() .split_whitespace() .map(|n| n.parse::<usize>().unwrap()) .collect(); //println!("{:?}", s); // could just run with s, but let's build our 2d array. let mut v = [[0; 20]; 20]; (0..400).for_each(|i| v[i / 20][i % 20] = s[i]); //println!("{:?}", v); let mut big = 0; use itertools::Itertools; (0..20).cartesian_product(0..20).for_each(|(i, j)| { if i < 17 { // h_ let temp = v[i][j] * v[i + 1][j] * v[i + 2][j] * v[i + 3][j]; if temp > big { // println!( // "h_ new biggest: {} starting at: ({},{}), with init value {}:", // big, i, j, v[i][j] // ); big = temp } } if j < 17 { // v| let temp = v[i][j] * v[i][j + 1] * v[i][j + 2] * v[i][j + 3]; if temp > big { // println!( // "v| new biggest: {} starting at: ({},{}), with init value {}:", // big, i, j, v[i][j] // ); big = temp } } if i < 17 && j < 17 { // d\ let temp = v[i][j] * v[i + 1][j + 1] * v[i + 2][j + 2] * v[i + 3][j + 3]; if temp > big { // println!( // "d\\ new biggest: {} starting at: ({},{}), with init value {}:", // big, i, j, v[i][j], // ); big = temp } } if i < 17 && j > 2 { // d/ let temp = v[i][j] * v[i + 1][j - 1] * v[i + 2][j - 2] * v[i + 3][j - 3]; if temp > big { // println!( // "d/ new biggest: {} starting at: ({},{}), with init value {}:", // big, i, j, v[i][j] // ); big = temp } } }); println!("biggest: {}", big); } // v elegant: https://github.com/zacharydenton/euler/blob/master/011/grid.rs // 1. include_str!("grid.txt") I could be using this macro instead. // 2. .filter_map(|n| n.parse().ok()), well isn't that sweet. // 3. his solution collects the maximum value in each direction in an interesting way. Each element is k farther ahead than the current elem. h:1,v:20,d\:21,d/:19. This fails if the line crosses a boundary though. // What is the value of the first triangle number to have over five hundred divisors? #[timings] fn e12() { // entire problem is "count divisors". Naive soln sucks. Derive a soln. // Proposition. given X = p_1^a * p_2^b *..., // N_factors(X) = (a+1)(b+1).... // now we only need to find the algebraic multiplicity of each prime divisor. let multiplicities = |input: usize| -> std::collections::HashMap<usize, usize> { let mut h = std::collections::HashMap::new(); let mut n = input; while n % 2 == 0 { let counter = h.entry(2).or_insert(0); *counter += 1; n /= 2; } let mut i = 3; while n > 1 { while n % i == 0 { let counter = h.entry(i).or_insert(0); *counter += 1; n /= i; } i += 2; } h }; let mut i = 1; let mut sum = 0; loop { sum += i; i += 1; let divisors = multiplicities(sum).values().fold(1, |acc, d| acc * (1 + d)); //dbg!(sum, divisors); if divisors > 500 { println!("value: {}, the {}th triangle number", sum, i); break; } } } #[timings] fn e13() { let s: Vec<String> = std::fs::read_to_string("src/e13.txt") .unwrap() .split_whitespace() .map(|s| s.parse::<String>().unwrap()) .collect(); let s13: Vec<usize> = s .iter() .map(|l| l[..13].parse::<usize>().unwrap()) .collect(); let n = s13.iter().sum::<usize>().to_string(); println!("e13: {}", &n[..10]); } #[allow(dead_code)] fn collatz(n: usize) -> usize { match n % 2 { 0 => n / 2, 1 => 3 * n + 1, _ => unreachable!(), } } #[timings] fn e14() { use std::collections::HashMap; let mut h = HashMap::new(); h.insert(1, 0); let mut it_counter = 0; let mut biggest = (0, 0); for it in 2..1_000_000 { if h.contains_key(&it) { continue; } // Build a cache of values til we find a value we have seen let mut next = collatz(it); it_counter += 1; let mut cache: Vec<(usize, usize)> = vec![(it, it_counter)]; // 2: 1 while h.get(&next).is_none() { it_counter += 1; cache.push((next, it_counter)); next = collatz(next); } // the next value is now in the hashmap let count_last = *h.get(&next).unwrap(); let count_for_it = count_last + it_counter; //println!("it:{},count: {}", it, count_for_it); for (n, c) in cache { let count = count_for_it + 1 - c; //println!("n:{},c: {}, count: {}", n, c, count); h.insert(n, count); } it_counter = 0; if count_for_it > biggest.0 { biggest = (count_for_it, it); } } println!("biggest seq len: {:?}, for n={:?}", biggest.0, biggest.1); } #[timings] //https://github.com/zacharydenton/euler/blob/master/014/collatz.rs fn e14_zach_denton() { let mut collatz: Vec<usize> = vec![0; 1_000_000]; collatz[1] = 1; let max = (2..collatz.len()) .max_by_key(|&i| { let f = |n: usize| match n % 2 { 0 => n / 2, _ => n * 3 + 1, }; // og: let (mut j, mut len) = (i, 0); loop { // exit if: if j < collatz.len() && collatz[j]!= 0 { break; } len += 1; j = f(j); } len += collatz[j]; collatz[i] = len; len }) .unwrap(); println!("{}", max); } // How many such (only move left or down) routes are there through a 20×20 grid? #[timings] fn e15() { // basic combinatorics. of 40 positions, choose 20. Equally, the 20th Catalan. let a: u128 = (21..=40).product(); let b: u128 = (2..=20).product(); println!("{}", a / b); } #[timings] fn e16() { // mostly, futzing with bigint. use num_bigint::BigUint; // note that 2**1000 will have about 300 digits, so can't fit into a normal integer representation. Need a bigint. let a = BigUint::new(vec![2]); let b = a.pow(1000); //println!("{:?}", b); // TFAE: //let res = b.to_string().chars().fold(0, |a, d| a + d.to_digit(10).unwrap()); let res: u32 = b.to_string().chars().map(|c| c.to_digit(10).unwrap()).sum(); println!("{:?}", res); //let digits: num::BigInt = 2.pow(1000); } // If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used? #[timings] fn e17() { let map = vec![ (0, 0), (1, 3), (2, 3), (3, 5), (4, 4), (5, 4), (6, 3), (7, 5), (8, 5), (9, 4), (10, 3), (11, 6), (12, 6), (13, 8), (14, 8), (15, 7), (16, 7), (17, 9), (18, 8), (19, 8), (20, 6), (30, 6), (40, 5), (50, 5), (60, 5), (70, 7), (80, 6), (90, 6), ]; let h = std::collections::HashMap::from_iter(map.into_iter()); let res: usize = (1..=1000).fold(0, |acc, x| acc + count_letters(x, &h)); println!("{}", res); } fn count_letters(d: usize, h: &std::collections::HashMap<usize, usize>) -> usize { let (a, b, c, e) = (d % 10, d / 10 % 10, d / 100 % 10, d / 1000 % 10); let aa = if b == 1 { 0 } else { *h.get(&a).unwrap() }; let bb = if b == 1 { *h.get(&(b * 10 + a)).unwrap() } else { *h.get(&(b * 10)).unwrap() }; let mut cc = if c > 0 { 3 + 7 + h.get(&c).unwrap() } else { 0 }; // "and" counts apparently if c > 0 && aa == 0 && bb == 0 { cc -= 3 // 100 doesn't have an "and" }; let ee = if e > 0 { 8 + h.get(&e).unwrap() } else { 0 }; //println!("{}:{},{},{},{}", d, ee, cc, bb, aa); aa + bb + cc + ee } // first problem to be a bit of a challenge. I struggled picking a data structure and strategy for this one. // A couple possible approaches occur: // naive: at each step, pick the greatest next value // brute: calculate the value of all 2^14 paths, not hard // pruning: similar to brute, but if some sufficiently low sequence is included, exit early (optimization parameters: how often to prune, and what sufficiently low means) // This problem begs to be solved recursively somehow. #[timings] fn e18() { let triangle: Vec<Vec<usize>> = std::fs::read_to_string("src/e18.txt") .unwrap() .lines() .map(|l| { l.split_whitespace() .into_iter() .map(|n| n.parse::<usize>().unwrap()) .collect::<Vec<usize>>() }) .collect(); let res = e18_less_naive_r(&triangle[1..], 75, 0); println!("{}", res); } /// traverse the triangle picking the greatest value at the next binary choice #[allow(dead_code)] fn e18_naive_r(t: &[Vec<usize>], running_sum: usize, last_index: usize) -> usize { if t.is_empty() { running_sum } else { let (rs, li) = if t[0][last_index] > t[0][last_index + 1] { (t[0][last_index], last_index) } else { (t[0][last_index + 1], last_index + 1) }; println!("append:{},{}", rs, li); e18_naive_r(&t[1..], running_sum + rs, li) } } // 18 minutes to try naively. Now let's try a little harder. // let's try something with look ahead. const PEEK_DIST: usize = 5; /// traverse the triangle picking the greatest single step-PEEK_DIST-chain at each next binary choice fn e18_less_naive_r(t: &[Vec<usize>], running_sum: usize, last_index: usize) -> usize { if t.is_empty() { running_sum } else { // need to peek here let (_, dir, _path) = peek_ahead_r(t, running_sum, last_index, PEEK_DIST, None, vec![]); let (val, ind) = match dir { Dir::Left => (t[0][last_index], last_index), Dir::Right => (t[0][last_index + 1], last_index + 1), }; //println!("append val:{}, ind:{}, path:{:?}", val, ind, _path); e18_less_naive_r(&t[1..], running_sum + val, ind) } } // if looking ahead 1 step, terminate, returning (running_sum, LEFT|RIGHT) #[derive(Clone, Debug)] enum Dir { Left, Right, } fn peek_ahead_r( t: &[Vec<usize>], running_sum: usize, last_index: usize, mut peek_dist: usize, first_step: Option<Dir>, /* debugging */ mut path: Vec<(usize, usize)>, ) -> (usize /* value */, Dir, Vec<(usize, usize)>) { if peek_dist > t.len() { peek_dist = t.len() } assert!(peek_dist > 0); if peek_dist == 1 { // if tie: prefer rightward motion, THIS IS A (temporarily acceptable) BUG if t[0][last_index] > t[0][last_index + 1] { path.push((t[0][last_index], last_index)); ( t[0][last_index] + running_sum, first_step.unwrap_or(Dir::Left), path, ) } else { path.push((t[0][last_index + 1], last_index + 1)); ( t[0][last_index + 1] + running_sum, first_step.unwrap_or(Dir::Right), path, ) } } else { let mut p_left = path.clone(); p_left.push((t[0][last_index], last_index)); let left = peek_ahead_r( &t[1..], running_sum + t[0][last_index], last_index, peek_dist - 1, first_step.clone().unwrap_or(Dir::Left).into(), p_left, ); let mut p_right = path.clone(); p_right.push((t[0][last_index + 1], last_index + 1)); let right = peek_ahead_r( &t[1..], running_sum + t[0][last_index + 1], last_index + 1, peek_dist - 1, first_step.unwrap_or(Dir::Right).into(), p_right, ); if left.0 > right.0 { left } else { right } } } // How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)? #[timings] fn e19() { // Sundays are uniformly distributed, with P(first is Sunday) = 1/7. // How many first of the months were there? 12*100 println!("{}", 12.0 * 100.0 / 7.0); } // Can't win em all. But when ya do~ #[timings] fn e20() { // Find the sum of the digits in the number 100! // would expect the number of digits to be roughly equiv to 50^100, which has about 150 digits, though there will of course be many zeroes, about 24. Still, it seems probably best to just shove it in a bigint. Anything more creative? 97 multiplications (2..99). Some theorem may exist about the sum of digits of the product of two numbers, could search for it. Meh, thought for 5 minutes, do the bigint thing. use num_bigint::BigUint; // note that 2**1000 will have about 300 digits, so can't fit into a normal integer representation. Need a bigint. let a = BigUint::new(vec![2]); let a = (3..=99).fold(a, |acc, i| acc * (i as u32)); let res = a .to_string() .chars() .fold(0, |acc, i| acc + i.to_digit(10).unwrap()); println!("{:?}", res); } fn main() { e11(); e12(); e13(); //e14(); e14_zach_denton(); e15(); e16(); e17(); e18(); e19(); e20(); }
e11
identifier_name
main.rs
// main struggle problems in this section were 11 and 18, and to some extent, 12 and 14. 17 was annoying to debug, but not hard. extern crate timings_proc_macro; use timings_proc_macro::timings; #[timings] fn e11() { let s: Vec<usize> = std::fs::read_to_string("src/e11.txt") .unwrap() .split_whitespace() .map(|n| n.parse::<usize>().unwrap()) .collect(); //println!("{:?}", s); // could just run with s, but let's build our 2d array. let mut v = [[0; 20]; 20]; (0..400).for_each(|i| v[i / 20][i % 20] = s[i]); //println!("{:?}", v); let mut big = 0; use itertools::Itertools; (0..20).cartesian_product(0..20).for_each(|(i, j)| { if i < 17 { // h_ let temp = v[i][j] * v[i + 1][j] * v[i + 2][j] * v[i + 3][j]; if temp > big { // println!( // "h_ new biggest: {} starting at: ({},{}), with init value {}:", // big, i, j, v[i][j] // ); big = temp } } if j < 17 { // v| let temp = v[i][j] * v[i][j + 1] * v[i][j + 2] * v[i][j + 3]; if temp > big { // println!( // "v| new biggest: {} starting at: ({},{}), with init value {}:", // big, i, j, v[i][j] // ); big = temp } } if i < 17 && j < 17 { // d\ let temp = v[i][j] * v[i + 1][j + 1] * v[i + 2][j + 2] * v[i + 3][j + 3]; if temp > big { // println!( // "d\\ new biggest: {} starting at: ({},{}), with init value {}:", // big, i, j, v[i][j], // ); big = temp } } if i < 17 && j > 2 { // d/ let temp = v[i][j] * v[i + 1][j - 1] * v[i + 2][j - 2] * v[i + 3][j - 3]; if temp > big { // println!( // "d/ new biggest: {} starting at: ({},{}), with init value {}:", // big, i, j, v[i][j] // ); big = temp } } }); println!("biggest: {}", big); } // v elegant: https://github.com/zacharydenton/euler/blob/master/011/grid.rs // 1. include_str!("grid.txt") I could be using this macro instead. // 2. .filter_map(|n| n.parse().ok()), well isn't that sweet. // 3. his solution collects the maximum value in each direction in an interesting way. Each element is k farther ahead than the current elem. h:1,v:20,d\:21,d/:19. This fails if the line crosses a boundary though. // What is the value of the first triangle number to have over five hundred divisors? #[timings] fn e12() { // entire problem is "count divisors". Naive soln sucks. Derive a soln. // Proposition. given X = p_1^a * p_2^b *..., // N_factors(X) = (a+1)(b+1).... // now we only need to find the algebraic multiplicity of each prime divisor. let multiplicities = |input: usize| -> std::collections::HashMap<usize, usize> { let mut h = std::collections::HashMap::new(); let mut n = input; while n % 2 == 0 { let counter = h.entry(2).or_insert(0); *counter += 1; n /= 2; } let mut i = 3; while n > 1 { while n % i == 0 { let counter = h.entry(i).or_insert(0); *counter += 1; n /= i; } i += 2; } h }; let mut i = 1; let mut sum = 0; loop { sum += i; i += 1; let divisors = multiplicities(sum).values().fold(1, |acc, d| acc * (1 + d)); //dbg!(sum, divisors); if divisors > 500 { println!("value: {}, the {}th triangle number", sum, i); break; } } } #[timings] fn e13() { let s: Vec<String> = std::fs::read_to_string("src/e13.txt") .unwrap() .split_whitespace() .map(|s| s.parse::<String>().unwrap()) .collect(); let s13: Vec<usize> = s .iter() .map(|l| l[..13].parse::<usize>().unwrap()) .collect(); let n = s13.iter().sum::<usize>().to_string(); println!("e13: {}", &n[..10]); } #[allow(dead_code)] fn collatz(n: usize) -> usize { match n % 2 { 0 => n / 2, 1 => 3 * n + 1, _ => unreachable!(), } } #[timings] fn e14() { use std::collections::HashMap; let mut h = HashMap::new(); h.insert(1, 0); let mut it_counter = 0; let mut biggest = (0, 0); for it in 2..1_000_000 { if h.contains_key(&it) { continue; } // Build a cache of values til we find a value we have seen let mut next = collatz(it); it_counter += 1; let mut cache: Vec<(usize, usize)> = vec![(it, it_counter)]; // 2: 1 while h.get(&next).is_none() { it_counter += 1; cache.push((next, it_counter)); next = collatz(next); } // the next value is now in the hashmap let count_last = *h.get(&next).unwrap(); let count_for_it = count_last + it_counter; //println!("it:{},count: {}", it, count_for_it); for (n, c) in cache { let count = count_for_it + 1 - c; //println!("n:{},c: {}, count: {}", n, c, count); h.insert(n, count); } it_counter = 0; if count_for_it > biggest.0 { biggest = (count_for_it, it); } } println!("biggest seq len: {:?}, for n={:?}", biggest.0, biggest.1); } #[timings] //https://github.com/zacharydenton/euler/blob/master/014/collatz.rs fn e14_zach_denton() { let mut collatz: Vec<usize> = vec![0; 1_000_000]; collatz[1] = 1; let max = (2..collatz.len()) .max_by_key(|&i| { let f = |n: usize| match n % 2 { 0 => n / 2, _ => n * 3 + 1, }; // og: let (mut j, mut len) = (i, 0); loop { // exit if: if j < collatz.len() && collatz[j]!= 0 { break; } len += 1; j = f(j); } len += collatz[j]; collatz[i] = len; len }) .unwrap(); println!("{}", max); } // How many such (only move left or down) routes are there through a 20×20 grid? #[timings] fn e15() { // basic combinatorics. of 40 positions, choose 20. Equally, the 20th Catalan. let a: u128 = (21..=40).product(); let b: u128 = (2..=20).product(); println!("{}", a / b); } #[timings] fn e16() { // mostly, futzing with bigint. use num_bigint::BigUint; // note that 2**1000 will have about 300 digits, so can't fit into a normal integer representation. Need a bigint. let a = BigUint::new(vec![2]); let b = a.pow(1000); //println!("{:?}", b); // TFAE: //let res = b.to_string().chars().fold(0, |a, d| a + d.to_digit(10).unwrap()); let res: u32 = b.to_string().chars().map(|c| c.to_digit(10).unwrap()).sum(); println!("{:?}", res); //let digits: num::BigInt = 2.pow(1000); } // If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used? #[timings] fn e17() { let map = vec![ (0, 0), (1, 3), (2, 3), (3, 5), (4, 4), (5, 4), (6, 3), (7, 5), (8, 5), (9, 4), (10, 3), (11, 6), (12, 6), (13, 8), (14, 8), (15, 7), (16, 7), (17, 9), (18, 8), (19, 8), (20, 6), (30, 6), (40, 5), (50, 5), (60, 5), (70, 7), (80, 6), (90, 6), ]; let h = std::collections::HashMap::from_iter(map.into_iter()); let res: usize = (1..=1000).fold(0, |acc, x| acc + count_letters(x, &h)); println!("{}", res); } fn count_letters(d: usize, h: &std::collections::HashMap<usize, usize>) -> usize { let (a, b, c, e) = (d % 10, d / 10 % 10, d / 100 % 10, d / 1000 % 10); let aa = if b == 1 { 0 } else { *h.get(&a).unwrap() }; let bb = if b == 1 { *h.get(&(b * 10 + a)).unwrap() } else { *h.get(&(b * 10)).unwrap() }; let mut cc = if c > 0 { 3 + 7 + h.get(&c).unwrap() } else { 0 }; // "and" counts apparently if c > 0 && aa == 0 && bb == 0 { cc -= 3 // 100 doesn't have an "and" }; let ee = if e > 0 { 8 + h.get(&e).unwrap() } else { 0 }; //println!("{}:{},{},{},{}", d, ee, cc, bb, aa); aa + bb + cc + ee } // first problem to be a bit of a challenge. I struggled picking a data structure and strategy for this one. // A couple possible approaches occur: // naive: at each step, pick the greatest next value // brute: calculate the value of all 2^14 paths, not hard // pruning: similar to brute, but if some sufficiently low sequence is included, exit early (optimization parameters: how often to prune, and what sufficiently low means) // This problem begs to be solved recursively somehow. #[timings] fn e18() {
/// traverse the triangle picking the greatest value at the next binary choice #[allow(dead_code)] fn e18_naive_r(t: &[Vec<usize>], running_sum: usize, last_index: usize) -> usize { if t.is_empty() { running_sum } else { let (rs, li) = if t[0][last_index] > t[0][last_index + 1] { (t[0][last_index], last_index) } else { (t[0][last_index + 1], last_index + 1) }; println!("append:{},{}", rs, li); e18_naive_r(&t[1..], running_sum + rs, li) } } // 18 minutes to try naively. Now let's try a little harder. // let's try something with look ahead. const PEEK_DIST: usize = 5; /// traverse the triangle picking the greatest single step-PEEK_DIST-chain at each next binary choice fn e18_less_naive_r(t: &[Vec<usize>], running_sum: usize, last_index: usize) -> usize { if t.is_empty() { running_sum } else { // need to peek here let (_, dir, _path) = peek_ahead_r(t, running_sum, last_index, PEEK_DIST, None, vec![]); let (val, ind) = match dir { Dir::Left => (t[0][last_index], last_index), Dir::Right => (t[0][last_index + 1], last_index + 1), }; //println!("append val:{}, ind:{}, path:{:?}", val, ind, _path); e18_less_naive_r(&t[1..], running_sum + val, ind) } } // if looking ahead 1 step, terminate, returning (running_sum, LEFT|RIGHT) #[derive(Clone, Debug)] enum Dir { Left, Right, } fn peek_ahead_r( t: &[Vec<usize>], running_sum: usize, last_index: usize, mut peek_dist: usize, first_step: Option<Dir>, /* debugging */ mut path: Vec<(usize, usize)>, ) -> (usize /* value */, Dir, Vec<(usize, usize)>) { if peek_dist > t.len() { peek_dist = t.len() } assert!(peek_dist > 0); if peek_dist == 1 { // if tie: prefer rightward motion, THIS IS A (temporarily acceptable) BUG if t[0][last_index] > t[0][last_index + 1] { path.push((t[0][last_index], last_index)); ( t[0][last_index] + running_sum, first_step.unwrap_or(Dir::Left), path, ) } else { path.push((t[0][last_index + 1], last_index + 1)); ( t[0][last_index + 1] + running_sum, first_step.unwrap_or(Dir::Right), path, ) } } else { let mut p_left = path.clone(); p_left.push((t[0][last_index], last_index)); let left = peek_ahead_r( &t[1..], running_sum + t[0][last_index], last_index, peek_dist - 1, first_step.clone().unwrap_or(Dir::Left).into(), p_left, ); let mut p_right = path.clone(); p_right.push((t[0][last_index + 1], last_index + 1)); let right = peek_ahead_r( &t[1..], running_sum + t[0][last_index + 1], last_index + 1, peek_dist - 1, first_step.unwrap_or(Dir::Right).into(), p_right, ); if left.0 > right.0 { left } else { right } } } // How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)? #[timings] fn e19() { // Sundays are uniformly distributed, with P(first is Sunday) = 1/7. // How many first of the months were there? 12*100 println!("{}", 12.0 * 100.0 / 7.0); } // Can't win em all. But when ya do~ #[timings] fn e20() { // Find the sum of the digits in the number 100! // would expect the number of digits to be roughly equiv to 50^100, which has about 150 digits, though there will of course be many zeroes, about 24. Still, it seems probably best to just shove it in a bigint. Anything more creative? 97 multiplications (2..99). Some theorem may exist about the sum of digits of the product of two numbers, could search for it. Meh, thought for 5 minutes, do the bigint thing. use num_bigint::BigUint; // note that 2**1000 will have about 300 digits, so can't fit into a normal integer representation. Need a bigint. let a = BigUint::new(vec![2]); let a = (3..=99).fold(a, |acc, i| acc * (i as u32)); let res = a .to_string() .chars() .fold(0, |acc, i| acc + i.to_digit(10).unwrap()); println!("{:?}", res); } fn main() { e11(); e12(); e13(); //e14(); e14_zach_denton(); e15(); e16(); e17(); e18(); e19(); e20(); }
let triangle: Vec<Vec<usize>> = std::fs::read_to_string("src/e18.txt") .unwrap() .lines() .map(|l| { l.split_whitespace() .into_iter() .map(|n| n.parse::<usize>().unwrap()) .collect::<Vec<usize>>() }) .collect(); let res = e18_less_naive_r(&triangle[1..], 75, 0); println!("{}", res); }
identifier_body
main.rs
// main struggle problems in this section were 11 and 18, and to some extent, 12 and 14. 17 was annoying to debug, but not hard. extern crate timings_proc_macro; use timings_proc_macro::timings; #[timings] fn e11() { let s: Vec<usize> = std::fs::read_to_string("src/e11.txt") .unwrap() .split_whitespace() .map(|n| n.parse::<usize>().unwrap()) .collect(); //println!("{:?}", s); // could just run with s, but let's build our 2d array. let mut v = [[0; 20]; 20]; (0..400).for_each(|i| v[i / 20][i % 20] = s[i]); //println!("{:?}", v); let mut big = 0; use itertools::Itertools; (0..20).cartesian_product(0..20).for_each(|(i, j)| { if i < 17 { // h_ let temp = v[i][j] * v[i + 1][j] * v[i + 2][j] * v[i + 3][j]; if temp > big { // println!( // "h_ new biggest: {} starting at: ({},{}), with init value {}:", // big, i, j, v[i][j] // ); big = temp } } if j < 17 { // v| let temp = v[i][j] * v[i][j + 1] * v[i][j + 2] * v[i][j + 3]; if temp > big { // println!( // "v| new biggest: {} starting at: ({},{}), with init value {}:", // big, i, j, v[i][j] // ); big = temp } } if i < 17 && j < 17 { // d\ let temp = v[i][j] * v[i + 1][j + 1] * v[i + 2][j + 2] * v[i + 3][j + 3]; if temp > big { // println!( // "d\\ new biggest: {} starting at: ({},{}), with init value {}:", // big, i, j, v[i][j], // ); big = temp } } if i < 17 && j > 2 { // d/ let temp = v[i][j] * v[i + 1][j - 1] * v[i + 2][j - 2] * v[i + 3][j - 3]; if temp > big { // println!( // "d/ new biggest: {} starting at: ({},{}), with init value {}:", // big, i, j, v[i][j] // ); big = temp } } }); println!("biggest: {}", big); } // v elegant: https://github.com/zacharydenton/euler/blob/master/011/grid.rs // 1. include_str!("grid.txt") I could be using this macro instead. // 2. .filter_map(|n| n.parse().ok()), well isn't that sweet. // 3. his solution collects the maximum value in each direction in an interesting way. Each element is k farther ahead than the current elem. h:1,v:20,d\:21,d/:19. This fails if the line crosses a boundary though. // What is the value of the first triangle number to have over five hundred divisors? #[timings] fn e12() { // entire problem is "count divisors". Naive soln sucks. Derive a soln. // Proposition. given X = p_1^a * p_2^b *..., // N_factors(X) = (a+1)(b+1).... // now we only need to find the algebraic multiplicity of each prime divisor. let multiplicities = |input: usize| -> std::collections::HashMap<usize, usize> { let mut h = std::collections::HashMap::new(); let mut n = input; while n % 2 == 0 { let counter = h.entry(2).or_insert(0); *counter += 1; n /= 2; } let mut i = 3; while n > 1 { while n % i == 0 { let counter = h.entry(i).or_insert(0); *counter += 1; n /= i; } i += 2; } h }; let mut i = 1; let mut sum = 0; loop { sum += i; i += 1; let divisors = multiplicities(sum).values().fold(1, |acc, d| acc * (1 + d)); //dbg!(sum, divisors); if divisors > 500 { println!("value: {}, the {}th triangle number", sum, i); break; } } } #[timings] fn e13() { let s: Vec<String> = std::fs::read_to_string("src/e13.txt") .unwrap() .split_whitespace() .map(|s| s.parse::<String>().unwrap()) .collect(); let s13: Vec<usize> = s .iter() .map(|l| l[..13].parse::<usize>().unwrap()) .collect(); let n = s13.iter().sum::<usize>().to_string(); println!("e13: {}", &n[..10]); } #[allow(dead_code)] fn collatz(n: usize) -> usize { match n % 2 { 0 => n / 2, 1 => 3 * n + 1, _ => unreachable!(), } } #[timings] fn e14() { use std::collections::HashMap; let mut h = HashMap::new(); h.insert(1, 0); let mut it_counter = 0; let mut biggest = (0, 0); for it in 2..1_000_000 { if h.contains_key(&it) { continue; } // Build a cache of values til we find a value we have seen let mut next = collatz(it); it_counter += 1; let mut cache: Vec<(usize, usize)> = vec![(it, it_counter)]; // 2: 1 while h.get(&next).is_none() { it_counter += 1; cache.push((next, it_counter)); next = collatz(next); } // the next value is now in the hashmap let count_last = *h.get(&next).unwrap(); let count_for_it = count_last + it_counter; //println!("it:{},count: {}", it, count_for_it); for (n, c) in cache { let count = count_for_it + 1 - c; //println!("n:{},c: {}, count: {}", n, c, count); h.insert(n, count); } it_counter = 0; if count_for_it > biggest.0 { biggest = (count_for_it, it); } } println!("biggest seq len: {:?}, for n={:?}", biggest.0, biggest.1); } #[timings] //https://github.com/zacharydenton/euler/blob/master/014/collatz.rs fn e14_zach_denton() { let mut collatz: Vec<usize> = vec![0; 1_000_000]; collatz[1] = 1; let max = (2..collatz.len()) .max_by_key(|&i| { let f = |n: usize| match n % 2 { 0 => n / 2, _ => n * 3 + 1, }; // og: let (mut j, mut len) = (i, 0); loop { // exit if: if j < collatz.len() && collatz[j]!= 0 { break; } len += 1; j = f(j); } len += collatz[j]; collatz[i] = len; len }) .unwrap(); println!("{}", max); } // How many such (only move left or down) routes are there through a 20×20 grid? #[timings] fn e15() { // basic combinatorics. of 40 positions, choose 20. Equally, the 20th Catalan. let a: u128 = (21..=40).product(); let b: u128 = (2..=20).product(); println!("{}", a / b); } #[timings] fn e16() { // mostly, futzing with bigint. use num_bigint::BigUint; // note that 2**1000 will have about 300 digits, so can't fit into a normal integer representation. Need a bigint. let a = BigUint::new(vec![2]); let b = a.pow(1000); //println!("{:?}", b); // TFAE: //let res = b.to_string().chars().fold(0, |a, d| a + d.to_digit(10).unwrap()); let res: u32 = b.to_string().chars().map(|c| c.to_digit(10).unwrap()).sum(); println!("{:?}", res); //let digits: num::BigInt = 2.pow(1000); } // If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used? #[timings] fn e17() { let map = vec![ (0, 0), (1, 3), (2, 3), (3, 5), (4, 4), (5, 4), (6, 3), (7, 5), (8, 5), (9, 4), (10, 3), (11, 6), (12, 6), (13, 8), (14, 8), (15, 7), (16, 7), (17, 9), (18, 8), (19, 8), (20, 6), (30, 6), (40, 5), (50, 5), (60, 5), (70, 7), (80, 6), (90, 6), ]; let h = std::collections::HashMap::from_iter(map.into_iter()); let res: usize = (1..=1000).fold(0, |acc, x| acc + count_letters(x, &h)); println!("{}", res); } fn count_letters(d: usize, h: &std::collections::HashMap<usize, usize>) -> usize { let (a, b, c, e) = (d % 10, d / 10 % 10, d / 100 % 10, d / 1000 % 10); let aa = if b == 1 { 0 } else { *h.get(&a).unwrap() }; let bb = if b == 1 { *h.get(&(b * 10 + a)).unwrap() } else { *h.get(&(b * 10)).unwrap() }; let mut cc = if c > 0 { 3 + 7 + h.get(&c).unwrap() } else { 0 }; // "and" counts apparently if c > 0 && aa == 0 && bb == 0 { cc -= 3 // 100 doesn't have an "and" }; let ee = if e > 0 { 8 + h.get(&e).unwrap() } else { 0 }; //println!("{}:{},{},{},{}", d, ee, cc, bb, aa); aa + bb + cc + ee } // first problem to be a bit of a challenge. I struggled picking a data structure and strategy for this one. // A couple possible approaches occur: // naive: at each step, pick the greatest next value // brute: calculate the value of all 2^14 paths, not hard // pruning: similar to brute, but if some sufficiently low sequence is included, exit early (optimization parameters: how often to prune, and what sufficiently low means) // This problem begs to be solved recursively somehow. #[timings] fn e18() { let triangle: Vec<Vec<usize>> = std::fs::read_to_string("src/e18.txt") .unwrap() .lines() .map(|l| { l.split_whitespace() .into_iter() .map(|n| n.parse::<usize>().unwrap()) .collect::<Vec<usize>>() }) .collect(); let res = e18_less_naive_r(&triangle[1..], 75, 0); println!("{}", res); } /// traverse the triangle picking the greatest value at the next binary choice #[allow(dead_code)] fn e18_naive_r(t: &[Vec<usize>], running_sum: usize, last_index: usize) -> usize { if t.is_empty() { running_sum } else { let (rs, li) = if t[0][last_index] > t[0][last_index + 1] { (t[0][last_index], last_index) } else { (t[0][last_index + 1], last_index + 1) }; println!("append:{},{}", rs, li); e18_naive_r(&t[1..], running_sum + rs, li) } } // 18 minutes to try naively. Now let's try a little harder. // let's try something with look ahead. const PEEK_DIST: usize = 5; /// traverse the triangle picking the greatest single step-PEEK_DIST-chain at each next binary choice fn e18_less_naive_r(t: &[Vec<usize>], running_sum: usize, last_index: usize) -> usize { if t.is_empty() { running_sum } else { // need to peek here let (_, dir, _path) = peek_ahead_r(t, running_sum, last_index, PEEK_DIST, None, vec![]); let (val, ind) = match dir { Dir::Left => (t[0][last_index], last_index), Dir::Right => (t[0][last_index + 1], last_index + 1), }; //println!("append val:{}, ind:{}, path:{:?}", val, ind, _path); e18_less_naive_r(&t[1..], running_sum + val, ind) } } // if looking ahead 1 step, terminate, returning (running_sum, LEFT|RIGHT) #[derive(Clone, Debug)] enum Dir { Left, Right, } fn peek_ahead_r( t: &[Vec<usize>], running_sum: usize, last_index: usize, mut peek_dist: usize, first_step: Option<Dir>, /* debugging */ mut path: Vec<(usize, usize)>, ) -> (usize /* value */, Dir, Vec<(usize, usize)>) { if peek_dist > t.len() { peek_dist = t.len() } assert!(peek_dist > 0); if peek_dist == 1 {
else { let mut p_left = path.clone(); p_left.push((t[0][last_index], last_index)); let left = peek_ahead_r( &t[1..], running_sum + t[0][last_index], last_index, peek_dist - 1, first_step.clone().unwrap_or(Dir::Left).into(), p_left, ); let mut p_right = path.clone(); p_right.push((t[0][last_index + 1], last_index + 1)); let right = peek_ahead_r( &t[1..], running_sum + t[0][last_index + 1], last_index + 1, peek_dist - 1, first_step.unwrap_or(Dir::Right).into(), p_right, ); if left.0 > right.0 { left } else { right } } } // How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)? #[timings] fn e19() { // Sundays are uniformly distributed, with P(first is Sunday) = 1/7. // How many first of the months were there? 12*100 println!("{}", 12.0 * 100.0 / 7.0); } // Can't win em all. But when ya do~ #[timings] fn e20() { // Find the sum of the digits in the number 100! // would expect the number of digits to be roughly equiv to 50^100, which has about 150 digits, though there will of course be many zeroes, about 24. Still, it seems probably best to just shove it in a bigint. Anything more creative? 97 multiplications (2..99). Some theorem may exist about the sum of digits of the product of two numbers, could search for it. Meh, thought for 5 minutes, do the bigint thing. use num_bigint::BigUint; // note that 2**1000 will have about 300 digits, so can't fit into a normal integer representation. Need a bigint. let a = BigUint::new(vec![2]); let a = (3..=99).fold(a, |acc, i| acc * (i as u32)); let res = a .to_string() .chars() .fold(0, |acc, i| acc + i.to_digit(10).unwrap()); println!("{:?}", res); } fn main() { e11(); e12(); e13(); //e14(); e14_zach_denton(); e15(); e16(); e17(); e18(); e19(); e20(); }
// if tie: prefer rightward motion, THIS IS A (temporarily acceptable) BUG if t[0][last_index] > t[0][last_index + 1] { path.push((t[0][last_index], last_index)); ( t[0][last_index] + running_sum, first_step.unwrap_or(Dir::Left), path, ) } else { path.push((t[0][last_index + 1], last_index + 1)); ( t[0][last_index + 1] + running_sum, first_step.unwrap_or(Dir::Right), path, ) } }
conditional_block
main.rs
// main struggle problems in this section were 11 and 18, and to some extent, 12 and 14. 17 was annoying to debug, but not hard. extern crate timings_proc_macro; use timings_proc_macro::timings; #[timings] fn e11() { let s: Vec<usize> = std::fs::read_to_string("src/e11.txt") .unwrap() .split_whitespace() .map(|n| n.parse::<usize>().unwrap()) .collect(); //println!("{:?}", s); // could just run with s, but let's build our 2d array. let mut v = [[0; 20]; 20]; (0..400).for_each(|i| v[i / 20][i % 20] = s[i]); //println!("{:?}", v); let mut big = 0; use itertools::Itertools; (0..20).cartesian_product(0..20).for_each(|(i, j)| { if i < 17 { // h_ let temp = v[i][j] * v[i + 1][j] * v[i + 2][j] * v[i + 3][j]; if temp > big { // println!( // "h_ new biggest: {} starting at: ({},{}), with init value {}:", // big, i, j, v[i][j] // ); big = temp } } if j < 17 { // v| let temp = v[i][j] * v[i][j + 1] * v[i][j + 2] * v[i][j + 3]; if temp > big { // println!( // "v| new biggest: {} starting at: ({},{}), with init value {}:", // big, i, j, v[i][j] // ); big = temp } } if i < 17 && j < 17 { // d\ let temp = v[i][j] * v[i + 1][j + 1] * v[i + 2][j + 2] * v[i + 3][j + 3]; if temp > big { // println!( // "d\\ new biggest: {} starting at: ({},{}), with init value {}:", // big, i, j, v[i][j], // ); big = temp } } if i < 17 && j > 2 { // d/ let temp = v[i][j] * v[i + 1][j - 1] * v[i + 2][j - 2] * v[i + 3][j - 3]; if temp > big { // println!( // "d/ new biggest: {} starting at: ({},{}), with init value {}:", // big, i, j, v[i][j] // ); big = temp } } }); println!("biggest: {}", big); } // v elegant: https://github.com/zacharydenton/euler/blob/master/011/grid.rs // 1. include_str!("grid.txt") I could be using this macro instead. // 2. .filter_map(|n| n.parse().ok()), well isn't that sweet. // 3. his solution collects the maximum value in each direction in an interesting way. Each element is k farther ahead than the current elem. h:1,v:20,d\:21,d/:19. This fails if the line crosses a boundary though. // What is the value of the first triangle number to have over five hundred divisors? #[timings] fn e12() { // entire problem is "count divisors". Naive soln sucks. Derive a soln. // Proposition. given X = p_1^a * p_2^b *..., // N_factors(X) = (a+1)(b+1).... // now we only need to find the algebraic multiplicity of each prime divisor. let multiplicities = |input: usize| -> std::collections::HashMap<usize, usize> { let mut h = std::collections::HashMap::new(); let mut n = input; while n % 2 == 0 { let counter = h.entry(2).or_insert(0); *counter += 1; n /= 2; } let mut i = 3; while n > 1 { while n % i == 0 { let counter = h.entry(i).or_insert(0); *counter += 1; n /= i; } i += 2; } h }; let mut i = 1; let mut sum = 0; loop { sum += i; i += 1; let divisors = multiplicities(sum).values().fold(1, |acc, d| acc * (1 + d)); //dbg!(sum, divisors); if divisors > 500 { println!("value: {}, the {}th triangle number", sum, i); break;
} } } #[timings] fn e13() { let s: Vec<String> = std::fs::read_to_string("src/e13.txt") .unwrap() .split_whitespace() .map(|s| s.parse::<String>().unwrap()) .collect(); let s13: Vec<usize> = s .iter() .map(|l| l[..13].parse::<usize>().unwrap()) .collect(); let n = s13.iter().sum::<usize>().to_string(); println!("e13: {}", &n[..10]); } #[allow(dead_code)] fn collatz(n: usize) -> usize { match n % 2 { 0 => n / 2, 1 => 3 * n + 1, _ => unreachable!(), } } #[timings] fn e14() { use std::collections::HashMap; let mut h = HashMap::new(); h.insert(1, 0); let mut it_counter = 0; let mut biggest = (0, 0); for it in 2..1_000_000 { if h.contains_key(&it) { continue; } // Build a cache of values til we find a value we have seen let mut next = collatz(it); it_counter += 1; let mut cache: Vec<(usize, usize)> = vec![(it, it_counter)]; // 2: 1 while h.get(&next).is_none() { it_counter += 1; cache.push((next, it_counter)); next = collatz(next); } // the next value is now in the hashmap let count_last = *h.get(&next).unwrap(); let count_for_it = count_last + it_counter; //println!("it:{},count: {}", it, count_for_it); for (n, c) in cache { let count = count_for_it + 1 - c; //println!("n:{},c: {}, count: {}", n, c, count); h.insert(n, count); } it_counter = 0; if count_for_it > biggest.0 { biggest = (count_for_it, it); } } println!("biggest seq len: {:?}, for n={:?}", biggest.0, biggest.1); } #[timings] //https://github.com/zacharydenton/euler/blob/master/014/collatz.rs fn e14_zach_denton() { let mut collatz: Vec<usize> = vec![0; 1_000_000]; collatz[1] = 1; let max = (2..collatz.len()) .max_by_key(|&i| { let f = |n: usize| match n % 2 { 0 => n / 2, _ => n * 3 + 1, }; // og: let (mut j, mut len) = (i, 0); loop { // exit if: if j < collatz.len() && collatz[j]!= 0 { break; } len += 1; j = f(j); } len += collatz[j]; collatz[i] = len; len }) .unwrap(); println!("{}", max); } // How many such (only move left or down) routes are there through a 20×20 grid? #[timings] fn e15() { // basic combinatorics. of 40 positions, choose 20. Equally, the 20th Catalan. let a: u128 = (21..=40).product(); let b: u128 = (2..=20).product(); println!("{}", a / b); } #[timings] fn e16() { // mostly, futzing with bigint. use num_bigint::BigUint; // note that 2**1000 will have about 300 digits, so can't fit into a normal integer representation. Need a bigint. let a = BigUint::new(vec![2]); let b = a.pow(1000); //println!("{:?}", b); // TFAE: //let res = b.to_string().chars().fold(0, |a, d| a + d.to_digit(10).unwrap()); let res: u32 = b.to_string().chars().map(|c| c.to_digit(10).unwrap()).sum(); println!("{:?}", res); //let digits: num::BigInt = 2.pow(1000); } // If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used? #[timings] fn e17() { let map = vec![ (0, 0), (1, 3), (2, 3), (3, 5), (4, 4), (5, 4), (6, 3), (7, 5), (8, 5), (9, 4), (10, 3), (11, 6), (12, 6), (13, 8), (14, 8), (15, 7), (16, 7), (17, 9), (18, 8), (19, 8), (20, 6), (30, 6), (40, 5), (50, 5), (60, 5), (70, 7), (80, 6), (90, 6), ]; let h = std::collections::HashMap::from_iter(map.into_iter()); let res: usize = (1..=1000).fold(0, |acc, x| acc + count_letters(x, &h)); println!("{}", res); } fn count_letters(d: usize, h: &std::collections::HashMap<usize, usize>) -> usize { let (a, b, c, e) = (d % 10, d / 10 % 10, d / 100 % 10, d / 1000 % 10); let aa = if b == 1 { 0 } else { *h.get(&a).unwrap() }; let bb = if b == 1 { *h.get(&(b * 10 + a)).unwrap() } else { *h.get(&(b * 10)).unwrap() }; let mut cc = if c > 0 { 3 + 7 + h.get(&c).unwrap() } else { 0 }; // "and" counts apparently if c > 0 && aa == 0 && bb == 0 { cc -= 3 // 100 doesn't have an "and" }; let ee = if e > 0 { 8 + h.get(&e).unwrap() } else { 0 }; //println!("{}:{},{},{},{}", d, ee, cc, bb, aa); aa + bb + cc + ee } // first problem to be a bit of a challenge. I struggled picking a data structure and strategy for this one. // A couple possible approaches occur: // naive: at each step, pick the greatest next value // brute: calculate the value of all 2^14 paths, not hard // pruning: similar to brute, but if some sufficiently low sequence is included, exit early (optimization parameters: how often to prune, and what sufficiently low means) // This problem begs to be solved recursively somehow. #[timings] fn e18() { let triangle: Vec<Vec<usize>> = std::fs::read_to_string("src/e18.txt") .unwrap() .lines() .map(|l| { l.split_whitespace() .into_iter() .map(|n| n.parse::<usize>().unwrap()) .collect::<Vec<usize>>() }) .collect(); let res = e18_less_naive_r(&triangle[1..], 75, 0); println!("{}", res); } /// traverse the triangle picking the greatest value at the next binary choice #[allow(dead_code)] fn e18_naive_r(t: &[Vec<usize>], running_sum: usize, last_index: usize) -> usize { if t.is_empty() { running_sum } else { let (rs, li) = if t[0][last_index] > t[0][last_index + 1] { (t[0][last_index], last_index) } else { (t[0][last_index + 1], last_index + 1) }; println!("append:{},{}", rs, li); e18_naive_r(&t[1..], running_sum + rs, li) } } // 18 minutes to try naively. Now let's try a little harder. // let's try something with look ahead. const PEEK_DIST: usize = 5; /// traverse the triangle picking the greatest single step-PEEK_DIST-chain at each next binary choice fn e18_less_naive_r(t: &[Vec<usize>], running_sum: usize, last_index: usize) -> usize { if t.is_empty() { running_sum } else { // need to peek here let (_, dir, _path) = peek_ahead_r(t, running_sum, last_index, PEEK_DIST, None, vec![]); let (val, ind) = match dir { Dir::Left => (t[0][last_index], last_index), Dir::Right => (t[0][last_index + 1], last_index + 1), }; //println!("append val:{}, ind:{}, path:{:?}", val, ind, _path); e18_less_naive_r(&t[1..], running_sum + val, ind) } } // if looking ahead 1 step, terminate, returning (running_sum, LEFT|RIGHT) #[derive(Clone, Debug)] enum Dir { Left, Right, } fn peek_ahead_r( t: &[Vec<usize>], running_sum: usize, last_index: usize, mut peek_dist: usize, first_step: Option<Dir>, /* debugging */ mut path: Vec<(usize, usize)>, ) -> (usize /* value */, Dir, Vec<(usize, usize)>) { if peek_dist > t.len() { peek_dist = t.len() } assert!(peek_dist > 0); if peek_dist == 1 { // if tie: prefer rightward motion, THIS IS A (temporarily acceptable) BUG if t[0][last_index] > t[0][last_index + 1] { path.push((t[0][last_index], last_index)); ( t[0][last_index] + running_sum, first_step.unwrap_or(Dir::Left), path, ) } else { path.push((t[0][last_index + 1], last_index + 1)); ( t[0][last_index + 1] + running_sum, first_step.unwrap_or(Dir::Right), path, ) } } else { let mut p_left = path.clone(); p_left.push((t[0][last_index], last_index)); let left = peek_ahead_r( &t[1..], running_sum + t[0][last_index], last_index, peek_dist - 1, first_step.clone().unwrap_or(Dir::Left).into(), p_left, ); let mut p_right = path.clone(); p_right.push((t[0][last_index + 1], last_index + 1)); let right = peek_ahead_r( &t[1..], running_sum + t[0][last_index + 1], last_index + 1, peek_dist - 1, first_step.unwrap_or(Dir::Right).into(), p_right, ); if left.0 > right.0 { left } else { right } } } // How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)? #[timings] fn e19() { // Sundays are uniformly distributed, with P(first is Sunday) = 1/7. // How many first of the months were there? 12*100 println!("{}", 12.0 * 100.0 / 7.0); } // Can't win em all. But when ya do~ #[timings] fn e20() { // Find the sum of the digits in the number 100! // would expect the number of digits to be roughly equiv to 50^100, which has about 150 digits, though there will of course be many zeroes, about 24. Still, it seems probably best to just shove it in a bigint. Anything more creative? 97 multiplications (2..99). Some theorem may exist about the sum of digits of the product of two numbers, could search for it. Meh, thought for 5 minutes, do the bigint thing. use num_bigint::BigUint; // note that 2**1000 will have about 300 digits, so can't fit into a normal integer representation. Need a bigint. let a = BigUint::new(vec![2]); let a = (3..=99).fold(a, |acc, i| acc * (i as u32)); let res = a .to_string() .chars() .fold(0, |acc, i| acc + i.to_digit(10).unwrap()); println!("{:?}", res); } fn main() { e11(); e12(); e13(); //e14(); e14_zach_denton(); e15(); e16(); e17(); e18(); e19(); e20(); }
random_line_split
tabs.rs
// Copyright 2016 Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! A container for all the tabs being edited. Also functions as main dispatch for RPC. use std::collections::BTreeMap; use std::io::{self, Read, Write}; use std::path::{PathBuf, Path}; use std::fs::File; use std::sync::{Arc, Mutex}; use serde_json::value::Value; use xi_rope::rope::Rope; use editor::Editor; use rpc::{CoreCommand, EditCommand}; use styles::{Style, StyleMap}; use MainPeer; /// ViewIdentifiers are the primary means of routing messages between xi-core and a client view. pub type ViewIdentifier = String; /// BufferIdentifiers uniquely identify open buffers. type BufferIdentifier = String; // TODO: proposed new name: something like "Core" or "CoreState" or "EditorState"? "Documents?" pub struct Tabs<W: Write> { /// maps file names to buffer identifiers. If a client asks to open a file that is already /// open, we treat it as a request for a new view. open_files: BTreeMap<PathBuf, BufferIdentifier>, /// maps buffer identifiers (filenames) to editor instances buffers: BTreeMap<BufferIdentifier, Arc<Mutex<Editor<W>>>>, /// maps view identifiers to editor instances. All actions originate in a view; this lets us /// route messages correctly when multiple views share a buffer. views: BTreeMap<ViewIdentifier, BufferIdentifier>, id_counter: usize, kill_ring: Arc<Mutex<Rope>>, style_map: Arc<Mutex<StyleMap>>, } #[derive(Clone)] pub struct TabCtx<W: Write> { kill_ring: Arc<Mutex<Rope>>, rpc_peer: MainPeer<W>, style_map: Arc<Mutex<StyleMap>>, } impl<W: Write + Send +'static> Tabs<W> { pub fn new() -> Tabs<W> { Tabs { open_files: BTreeMap::new(), buffers: BTreeMap::new(), views: BTreeMap::new(), id_counter: 0, kill_ring: Arc::new(Mutex::new(Rope::from(""))), style_map: Arc::new(Mutex::new(StyleMap::new())), } } fn new_tab_ctx(&self, peer: &MainPeer<W>) -> TabCtx<W> { TabCtx { kill_ring: self.kill_ring.clone(), rpc_peer: peer.clone(), style_map: self.style_map.clone(), } } fn next_view_id(&mut self) -> ViewIdentifier { self.id_counter += 1; format!("view-id-{}", self.id_counter) } fn next_buffer_id(&mut self) -> BufferIdentifier { self.id_counter += 1; format!("buffer-id-{}", self.id_counter) } pub fn do_rpc(&mut self, cmd: CoreCommand, rpc_peer: &MainPeer<W>) -> Option<Value> { use rpc::CoreCommand::*; match cmd { CloseView { view_id } => { self.do_close_view(view_id); None }, NewView { file_path } => Some(Value::String(self.do_new_view(rpc_peer, file_path))), Save { view_id, file_path } => self.do_save(view_id, file_path), Edit { view_id, edit_command } => self.do_edit(view_id, edit_command), } } /// Creates a new view and associates it with a buffer. /// /// This function always creates a new view and associates it with a buffer (which we access ///through an `Editor` instance). This buffer may be existing, or it may be created. /// ///A `new_view` request is handled differently depending on the `file_path` argument, and on ///application state. If `file_path` is given and a buffer associated with that file is already ///open, we create a new view into the existing buffer. If `file_path` is given and that file ///_isn't_ open, we load that file into a new buffer. If `file_path` is not given, we create a ///new empty buffer. fn do_new_view(&mut self, rpc_peer: &MainPeer<W>, file_path: Option<&str>) -> ViewIdentifier { // three code paths: new buffer, open file, and new view into existing buffer let view_id = self.next_view_id(); if let Some(file_path) = file_path.map(PathBuf::from) { // TODO: here, we should eventually be adding views to the existing editor. // for the time being, we just create a new empty view. if self.open_files.contains_key(&file_path) { let buffer_id = self.next_buffer_id(); self.new_empty_view(rpc_peer, &view_id, buffer_id); // let buffer_id = self.open_files.get(&file_path).unwrap().to_owned(); //self.add_view(&view_id, buffer_id); } else { // not open: create new buffer_id and open file let buffer_id = self.next_buffer_id(); self.open_files.insert(file_path.to_owned(), buffer_id.clone()); self.new_view_with_file(rpc_peer, &view_id, buffer_id.clone(), &file_path); // above fn has two branches: set path after self.buffers.get(&buffer_id).unwrap().lock().unwrap().set_path(&file_path); } } else { // file_path was nil: create a new empty buffer. let buffer_id = self.next_buffer_id(); self.new_empty_view(rpc_peer, &view_id, buffer_id); } view_id } fn do_close_view(&mut self, view_id: &str) { self.close_view(view_id); } fn new_empty_view(&mut self, rpc_peer: &MainPeer<W>, view_id: &str, buffer_id: BufferIdentifier) { let editor = Editor::new(self.new_tab_ctx(rpc_peer), view_id); self.finalize_new_view(view_id, buffer_id, editor); } fn new_view_with_file(&mut self, rpc_peer: &MainPeer<W>, view_id: &str, buffer_id: BufferIdentifier, path: &Path) { match self.read_file(&path) { Ok(contents) => { let editor = Editor::with_text(self.new_tab_ctx(rpc_peer), view_id, contents); self.finalize_new_view(view_id, buffer_id, editor) } Err(err) =>
} } /// Adds a new view to an existing editor instance. #[allow(unreachable_code, unused_variables, dead_code)] fn add_view(&mut self, view_id: &str, buffer_id: BufferIdentifier) { panic!("add_view should not currently be accessible"); let editor = self.buffers.get(&buffer_id).expect("missing editor_id for view_id"); self.views.insert(view_id.to_owned(), buffer_id); editor.lock().unwrap().add_view(view_id); } fn finalize_new_view(&mut self, view_id: &str, buffer_id: String, editor: Arc<Mutex<Editor<W>>>) { self.views.insert(view_id.to_owned(), buffer_id.clone()); self.buffers.insert(buffer_id, editor.clone()); } fn read_file<P: AsRef<Path>>(&self, path: P) -> io::Result<String> { let mut f = File::open(path)?; let mut s = String::new(); f.read_to_string(&mut s)?; Ok(s) } fn close_view(&mut self, view_id: &str) { let buf_id = self.views.remove(view_id).expect("missing buffer id when closing view"); let (has_views, path) = { let editor = self.buffers.get(&buf_id).expect("missing editor when closing view"); let mut editor = editor.lock().unwrap(); editor.remove_view(view_id); (editor.has_views(), editor.get_path().map(PathBuf::from)) }; if!has_views { self.buffers.remove(&buf_id); if let Some(path) = path { self.open_files.remove(&path); } } } fn do_save(&mut self, view_id: &str, file_path: &str) -> Option<Value> { let buffer_id = self.views.get(view_id) .expect(&format!("missing buffer id for view {}", view_id)); let editor = self.buffers.get(buffer_id) .expect(&format!("missing editor for buffer {}", buffer_id)); let file_path = PathBuf::from(file_path); // if this is a new path for an existing file, we have a bit of housekeeping to do: if let Some(prev_path) = editor.lock().unwrap().get_path() { if prev_path!= file_path { self.open_files.remove(prev_path); } } editor.lock().unwrap().do_save(&file_path); self.open_files.insert(file_path, buffer_id.to_owned()); None } fn do_edit(&mut self, view_id: &str, cmd: EditCommand) -> Option<Value> { let buffer_id = self.views.get(view_id) .expect(&format!("missing buffer id for view {}", view_id)); if let Some(editor) = self.buffers.get(buffer_id) { Editor::do_rpc(editor, view_id, cmd) } else { print_err!("buffer not found: {}, for view {}", buffer_id, view_id); None } } pub fn handle_idle(&self) { for editor in self.buffers.values() { editor.lock().unwrap().render(); } } } impl<W: Write> TabCtx<W> { pub fn update_view(&self, view_id: &str, update: &Value) { self.rpc_peer.send_rpc_notification("update", &json!({ "view_id": view_id, "update": update, })); } pub fn scroll_to(&self, view_id: &str, line: usize, col: usize) { self.rpc_peer.send_rpc_notification("scroll_to", &json!({ "view_id": view_id, "line": line, "col": col, })); } pub fn get_kill_ring(&self) -> Rope { self.kill_ring.lock().unwrap().clone() } pub fn set_kill_ring(&self, val: Rope) { let mut kill_ring = self.kill_ring.lock().unwrap(); *kill_ring = val; } pub fn alert(&self, msg: &str) { self.rpc_peer.send_rpc_notification("alert", &json!({ "msg": msg, })); } // Get the index for a given style. If the style is not in the existing // style map, then issues a def_style request to the front end. Intended // to be reasonably efficient, but ideally callers would do their own // indexing. pub fn get_style_id(&self, style: &Style) -> usize { let mut style_map = self.style_map.lock().unwrap(); if let Some(ix) = style_map.lookup(style) { return ix; } let ix = style_map.add(style); self.rpc_peer.send_rpc_notification("def_style", &style.to_json(ix)); ix } }
{ // TODO: we should be reporting errors to the client // (if this is even an error? we treat opening a non-existent file as a new buffer, // but set the editor's path) print_err!("unable to read file: {}, error: {:?}", buffer_id, err); self.new_empty_view(rpc_peer, view_id, buffer_id); }
conditional_block
tabs.rs
// Copyright 2016 Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! A container for all the tabs being edited. Also functions as main dispatch for RPC. use std::collections::BTreeMap; use std::io::{self, Read, Write}; use std::path::{PathBuf, Path}; use std::fs::File; use std::sync::{Arc, Mutex}; use serde_json::value::Value; use xi_rope::rope::Rope; use editor::Editor; use rpc::{CoreCommand, EditCommand}; use styles::{Style, StyleMap}; use MainPeer; /// ViewIdentifiers are the primary means of routing messages between xi-core and a client view. pub type ViewIdentifier = String; /// BufferIdentifiers uniquely identify open buffers. type BufferIdentifier = String; // TODO: proposed new name: something like "Core" or "CoreState" or "EditorState"? "Documents?" pub struct Tabs<W: Write> { /// maps file names to buffer identifiers. If a client asks to open a file that is already /// open, we treat it as a request for a new view. open_files: BTreeMap<PathBuf, BufferIdentifier>, /// maps buffer identifiers (filenames) to editor instances buffers: BTreeMap<BufferIdentifier, Arc<Mutex<Editor<W>>>>, /// maps view identifiers to editor instances. All actions originate in a view; this lets us /// route messages correctly when multiple views share a buffer. views: BTreeMap<ViewIdentifier, BufferIdentifier>, id_counter: usize, kill_ring: Arc<Mutex<Rope>>, style_map: Arc<Mutex<StyleMap>>, } #[derive(Clone)] pub struct TabCtx<W: Write> { kill_ring: Arc<Mutex<Rope>>, rpc_peer: MainPeer<W>, style_map: Arc<Mutex<StyleMap>>, } impl<W: Write + Send +'static> Tabs<W> { pub fn new() -> Tabs<W> { Tabs { open_files: BTreeMap::new(), buffers: BTreeMap::new(), views: BTreeMap::new(), id_counter: 0, kill_ring: Arc::new(Mutex::new(Rope::from(""))), style_map: Arc::new(Mutex::new(StyleMap::new())), } } fn new_tab_ctx(&self, peer: &MainPeer<W>) -> TabCtx<W> { TabCtx { kill_ring: self.kill_ring.clone(), rpc_peer: peer.clone(), style_map: self.style_map.clone(), } } fn next_view_id(&mut self) -> ViewIdentifier { self.id_counter += 1; format!("view-id-{}", self.id_counter) } fn next_buffer_id(&mut self) -> BufferIdentifier { self.id_counter += 1; format!("buffer-id-{}", self.id_counter) } pub fn do_rpc(&mut self, cmd: CoreCommand, rpc_peer: &MainPeer<W>) -> Option<Value> { use rpc::CoreCommand::*; match cmd { CloseView { view_id } => { self.do_close_view(view_id); None }, NewView { file_path } => Some(Value::String(self.do_new_view(rpc_peer, file_path))), Save { view_id, file_path } => self.do_save(view_id, file_path), Edit { view_id, edit_command } => self.do_edit(view_id, edit_command), } } /// Creates a new view and associates it with a buffer. /// /// This function always creates a new view and associates it with a buffer (which we access ///through an `Editor` instance). This buffer may be existing, or it may be created. /// ///A `new_view` request is handled differently depending on the `file_path` argument, and on ///application state. If `file_path` is given and a buffer associated with that file is already ///open, we create a new view into the existing buffer. If `file_path` is given and that file ///_isn't_ open, we load that file into a new buffer. If `file_path` is not given, we create a ///new empty buffer. fn do_new_view(&mut self, rpc_peer: &MainPeer<W>, file_path: Option<&str>) -> ViewIdentifier { // three code paths: new buffer, open file, and new view into existing buffer let view_id = self.next_view_id(); if let Some(file_path) = file_path.map(PathBuf::from) { // TODO: here, we should eventually be adding views to the existing editor. // for the time being, we just create a new empty view. if self.open_files.contains_key(&file_path) { let buffer_id = self.next_buffer_id(); self.new_empty_view(rpc_peer, &view_id, buffer_id); // let buffer_id = self.open_files.get(&file_path).unwrap().to_owned(); //self.add_view(&view_id, buffer_id); } else { // not open: create new buffer_id and open file let buffer_id = self.next_buffer_id(); self.open_files.insert(file_path.to_owned(), buffer_id.clone()); self.new_view_with_file(rpc_peer, &view_id, buffer_id.clone(), &file_path); // above fn has two branches: set path after self.buffers.get(&buffer_id).unwrap().lock().unwrap().set_path(&file_path); } } else { // file_path was nil: create a new empty buffer. let buffer_id = self.next_buffer_id(); self.new_empty_view(rpc_peer, &view_id, buffer_id); } view_id } fn do_close_view(&mut self, view_id: &str) { self.close_view(view_id); } fn new_empty_view(&mut self, rpc_peer: &MainPeer<W>, view_id: &str, buffer_id: BufferIdentifier) { let editor = Editor::new(self.new_tab_ctx(rpc_peer), view_id); self.finalize_new_view(view_id, buffer_id, editor); } fn new_view_with_file(&mut self, rpc_peer: &MainPeer<W>, view_id: &str, buffer_id: BufferIdentifier, path: &Path) { match self.read_file(&path) { Ok(contents) => { let editor = Editor::with_text(self.new_tab_ctx(rpc_peer), view_id, contents); self.finalize_new_view(view_id, buffer_id, editor) } Err(err) => { // TODO: we should be reporting errors to the client // (if this is even an error? we treat opening a non-existent file as a new buffer, // but set the editor's path) print_err!("unable to read file: {}, error: {:?}", buffer_id, err); self.new_empty_view(rpc_peer, view_id, buffer_id); } } } /// Adds a new view to an existing editor instance. #[allow(unreachable_code, unused_variables, dead_code)] fn add_view(&mut self, view_id: &str, buffer_id: BufferIdentifier) { panic!("add_view should not currently be accessible"); let editor = self.buffers.get(&buffer_id).expect("missing editor_id for view_id"); self.views.insert(view_id.to_owned(), buffer_id); editor.lock().unwrap().add_view(view_id); } fn finalize_new_view(&mut self, view_id: &str, buffer_id: String, editor: Arc<Mutex<Editor<W>>>) { self.views.insert(view_id.to_owned(), buffer_id.clone()); self.buffers.insert(buffer_id, editor.clone()); } fn read_file<P: AsRef<Path>>(&self, path: P) -> io::Result<String> { let mut f = File::open(path)?; let mut s = String::new(); f.read_to_string(&mut s)?; Ok(s) } fn close_view(&mut self, view_id: &str) { let buf_id = self.views.remove(view_id).expect("missing buffer id when closing view"); let (has_views, path) = { let editor = self.buffers.get(&buf_id).expect("missing editor when closing view"); let mut editor = editor.lock().unwrap(); editor.remove_view(view_id);
self.buffers.remove(&buf_id); if let Some(path) = path { self.open_files.remove(&path); } } } fn do_save(&mut self, view_id: &str, file_path: &str) -> Option<Value> { let buffer_id = self.views.get(view_id) .expect(&format!("missing buffer id for view {}", view_id)); let editor = self.buffers.get(buffer_id) .expect(&format!("missing editor for buffer {}", buffer_id)); let file_path = PathBuf::from(file_path); // if this is a new path for an existing file, we have a bit of housekeeping to do: if let Some(prev_path) = editor.lock().unwrap().get_path() { if prev_path!= file_path { self.open_files.remove(prev_path); } } editor.lock().unwrap().do_save(&file_path); self.open_files.insert(file_path, buffer_id.to_owned()); None } fn do_edit(&mut self, view_id: &str, cmd: EditCommand) -> Option<Value> { let buffer_id = self.views.get(view_id) .expect(&format!("missing buffer id for view {}", view_id)); if let Some(editor) = self.buffers.get(buffer_id) { Editor::do_rpc(editor, view_id, cmd) } else { print_err!("buffer not found: {}, for view {}", buffer_id, view_id); None } } pub fn handle_idle(&self) { for editor in self.buffers.values() { editor.lock().unwrap().render(); } } } impl<W: Write> TabCtx<W> { pub fn update_view(&self, view_id: &str, update: &Value) { self.rpc_peer.send_rpc_notification("update", &json!({ "view_id": view_id, "update": update, })); } pub fn scroll_to(&self, view_id: &str, line: usize, col: usize) { self.rpc_peer.send_rpc_notification("scroll_to", &json!({ "view_id": view_id, "line": line, "col": col, })); } pub fn get_kill_ring(&self) -> Rope { self.kill_ring.lock().unwrap().clone() } pub fn set_kill_ring(&self, val: Rope) { let mut kill_ring = self.kill_ring.lock().unwrap(); *kill_ring = val; } pub fn alert(&self, msg: &str) { self.rpc_peer.send_rpc_notification("alert", &json!({ "msg": msg, })); } // Get the index for a given style. If the style is not in the existing // style map, then issues a def_style request to the front end. Intended // to be reasonably efficient, but ideally callers would do their own // indexing. pub fn get_style_id(&self, style: &Style) -> usize { let mut style_map = self.style_map.lock().unwrap(); if let Some(ix) = style_map.lookup(style) { return ix; } let ix = style_map.add(style); self.rpc_peer.send_rpc_notification("def_style", &style.to_json(ix)); ix } }
(editor.has_views(), editor.get_path().map(PathBuf::from)) }; if !has_views {
random_line_split
tabs.rs
// Copyright 2016 Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! A container for all the tabs being edited. Also functions as main dispatch for RPC. use std::collections::BTreeMap; use std::io::{self, Read, Write}; use std::path::{PathBuf, Path}; use std::fs::File; use std::sync::{Arc, Mutex}; use serde_json::value::Value; use xi_rope::rope::Rope; use editor::Editor; use rpc::{CoreCommand, EditCommand}; use styles::{Style, StyleMap}; use MainPeer; /// ViewIdentifiers are the primary means of routing messages between xi-core and a client view. pub type ViewIdentifier = String; /// BufferIdentifiers uniquely identify open buffers. type BufferIdentifier = String; // TODO: proposed new name: something like "Core" or "CoreState" or "EditorState"? "Documents?" pub struct Tabs<W: Write> { /// maps file names to buffer identifiers. If a client asks to open a file that is already /// open, we treat it as a request for a new view. open_files: BTreeMap<PathBuf, BufferIdentifier>, /// maps buffer identifiers (filenames) to editor instances buffers: BTreeMap<BufferIdentifier, Arc<Mutex<Editor<W>>>>, /// maps view identifiers to editor instances. All actions originate in a view; this lets us /// route messages correctly when multiple views share a buffer. views: BTreeMap<ViewIdentifier, BufferIdentifier>, id_counter: usize, kill_ring: Arc<Mutex<Rope>>, style_map: Arc<Mutex<StyleMap>>, } #[derive(Clone)] pub struct TabCtx<W: Write> { kill_ring: Arc<Mutex<Rope>>, rpc_peer: MainPeer<W>, style_map: Arc<Mutex<StyleMap>>, } impl<W: Write + Send +'static> Tabs<W> { pub fn new() -> Tabs<W> { Tabs { open_files: BTreeMap::new(), buffers: BTreeMap::new(), views: BTreeMap::new(), id_counter: 0, kill_ring: Arc::new(Mutex::new(Rope::from(""))), style_map: Arc::new(Mutex::new(StyleMap::new())), } } fn new_tab_ctx(&self, peer: &MainPeer<W>) -> TabCtx<W> { TabCtx { kill_ring: self.kill_ring.clone(), rpc_peer: peer.clone(), style_map: self.style_map.clone(), } } fn next_view_id(&mut self) -> ViewIdentifier { self.id_counter += 1; format!("view-id-{}", self.id_counter) } fn next_buffer_id(&mut self) -> BufferIdentifier { self.id_counter += 1; format!("buffer-id-{}", self.id_counter) } pub fn do_rpc(&mut self, cmd: CoreCommand, rpc_peer: &MainPeer<W>) -> Option<Value> { use rpc::CoreCommand::*; match cmd { CloseView { view_id } => { self.do_close_view(view_id); None }, NewView { file_path } => Some(Value::String(self.do_new_view(rpc_peer, file_path))), Save { view_id, file_path } => self.do_save(view_id, file_path), Edit { view_id, edit_command } => self.do_edit(view_id, edit_command), } } /// Creates a new view and associates it with a buffer. /// /// This function always creates a new view and associates it with a buffer (which we access ///through an `Editor` instance). This buffer may be existing, or it may be created. /// ///A `new_view` request is handled differently depending on the `file_path` argument, and on ///application state. If `file_path` is given and a buffer associated with that file is already ///open, we create a new view into the existing buffer. If `file_path` is given and that file ///_isn't_ open, we load that file into a new buffer. If `file_path` is not given, we create a ///new empty buffer. fn do_new_view(&mut self, rpc_peer: &MainPeer<W>, file_path: Option<&str>) -> ViewIdentifier { // three code paths: new buffer, open file, and new view into existing buffer let view_id = self.next_view_id(); if let Some(file_path) = file_path.map(PathBuf::from) { // TODO: here, we should eventually be adding views to the existing editor. // for the time being, we just create a new empty view. if self.open_files.contains_key(&file_path) { let buffer_id = self.next_buffer_id(); self.new_empty_view(rpc_peer, &view_id, buffer_id); // let buffer_id = self.open_files.get(&file_path).unwrap().to_owned(); //self.add_view(&view_id, buffer_id); } else { // not open: create new buffer_id and open file let buffer_id = self.next_buffer_id(); self.open_files.insert(file_path.to_owned(), buffer_id.clone()); self.new_view_with_file(rpc_peer, &view_id, buffer_id.clone(), &file_path); // above fn has two branches: set path after self.buffers.get(&buffer_id).unwrap().lock().unwrap().set_path(&file_path); } } else { // file_path was nil: create a new empty buffer. let buffer_id = self.next_buffer_id(); self.new_empty_view(rpc_peer, &view_id, buffer_id); } view_id } fn do_close_view(&mut self, view_id: &str) { self.close_view(view_id); } fn new_empty_view(&mut self, rpc_peer: &MainPeer<W>, view_id: &str, buffer_id: BufferIdentifier) { let editor = Editor::new(self.new_tab_ctx(rpc_peer), view_id); self.finalize_new_view(view_id, buffer_id, editor); } fn new_view_with_file(&mut self, rpc_peer: &MainPeer<W>, view_id: &str, buffer_id: BufferIdentifier, path: &Path) { match self.read_file(&path) { Ok(contents) => { let editor = Editor::with_text(self.new_tab_ctx(rpc_peer), view_id, contents); self.finalize_new_view(view_id, buffer_id, editor) } Err(err) => { // TODO: we should be reporting errors to the client // (if this is even an error? we treat opening a non-existent file as a new buffer, // but set the editor's path) print_err!("unable to read file: {}, error: {:?}", buffer_id, err); self.new_empty_view(rpc_peer, view_id, buffer_id); } } } /// Adds a new view to an existing editor instance. #[allow(unreachable_code, unused_variables, dead_code)] fn add_view(&mut self, view_id: &str, buffer_id: BufferIdentifier) { panic!("add_view should not currently be accessible"); let editor = self.buffers.get(&buffer_id).expect("missing editor_id for view_id"); self.views.insert(view_id.to_owned(), buffer_id); editor.lock().unwrap().add_view(view_id); } fn finalize_new_view(&mut self, view_id: &str, buffer_id: String, editor: Arc<Mutex<Editor<W>>>) { self.views.insert(view_id.to_owned(), buffer_id.clone()); self.buffers.insert(buffer_id, editor.clone()); } fn
<P: AsRef<Path>>(&self, path: P) -> io::Result<String> { let mut f = File::open(path)?; let mut s = String::new(); f.read_to_string(&mut s)?; Ok(s) } fn close_view(&mut self, view_id: &str) { let buf_id = self.views.remove(view_id).expect("missing buffer id when closing view"); let (has_views, path) = { let editor = self.buffers.get(&buf_id).expect("missing editor when closing view"); let mut editor = editor.lock().unwrap(); editor.remove_view(view_id); (editor.has_views(), editor.get_path().map(PathBuf::from)) }; if!has_views { self.buffers.remove(&buf_id); if let Some(path) = path { self.open_files.remove(&path); } } } fn do_save(&mut self, view_id: &str, file_path: &str) -> Option<Value> { let buffer_id = self.views.get(view_id) .expect(&format!("missing buffer id for view {}", view_id)); let editor = self.buffers.get(buffer_id) .expect(&format!("missing editor for buffer {}", buffer_id)); let file_path = PathBuf::from(file_path); // if this is a new path for an existing file, we have a bit of housekeeping to do: if let Some(prev_path) = editor.lock().unwrap().get_path() { if prev_path!= file_path { self.open_files.remove(prev_path); } } editor.lock().unwrap().do_save(&file_path); self.open_files.insert(file_path, buffer_id.to_owned()); None } fn do_edit(&mut self, view_id: &str, cmd: EditCommand) -> Option<Value> { let buffer_id = self.views.get(view_id) .expect(&format!("missing buffer id for view {}", view_id)); if let Some(editor) = self.buffers.get(buffer_id) { Editor::do_rpc(editor, view_id, cmd) } else { print_err!("buffer not found: {}, for view {}", buffer_id, view_id); None } } pub fn handle_idle(&self) { for editor in self.buffers.values() { editor.lock().unwrap().render(); } } } impl<W: Write> TabCtx<W> { pub fn update_view(&self, view_id: &str, update: &Value) { self.rpc_peer.send_rpc_notification("update", &json!({ "view_id": view_id, "update": update, })); } pub fn scroll_to(&self, view_id: &str, line: usize, col: usize) { self.rpc_peer.send_rpc_notification("scroll_to", &json!({ "view_id": view_id, "line": line, "col": col, })); } pub fn get_kill_ring(&self) -> Rope { self.kill_ring.lock().unwrap().clone() } pub fn set_kill_ring(&self, val: Rope) { let mut kill_ring = self.kill_ring.lock().unwrap(); *kill_ring = val; } pub fn alert(&self, msg: &str) { self.rpc_peer.send_rpc_notification("alert", &json!({ "msg": msg, })); } // Get the index for a given style. If the style is not in the existing // style map, then issues a def_style request to the front end. Intended // to be reasonably efficient, but ideally callers would do their own // indexing. pub fn get_style_id(&self, style: &Style) -> usize { let mut style_map = self.style_map.lock().unwrap(); if let Some(ix) = style_map.lookup(style) { return ix; } let ix = style_map.add(style); self.rpc_peer.send_rpc_notification("def_style", &style.to_json(ix)); ix } }
read_file
identifier_name
tabs.rs
// Copyright 2016 Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! A container for all the tabs being edited. Also functions as main dispatch for RPC. use std::collections::BTreeMap; use std::io::{self, Read, Write}; use std::path::{PathBuf, Path}; use std::fs::File; use std::sync::{Arc, Mutex}; use serde_json::value::Value; use xi_rope::rope::Rope; use editor::Editor; use rpc::{CoreCommand, EditCommand}; use styles::{Style, StyleMap}; use MainPeer; /// ViewIdentifiers are the primary means of routing messages between xi-core and a client view. pub type ViewIdentifier = String; /// BufferIdentifiers uniquely identify open buffers. type BufferIdentifier = String; // TODO: proposed new name: something like "Core" or "CoreState" or "EditorState"? "Documents?" pub struct Tabs<W: Write> { /// maps file names to buffer identifiers. If a client asks to open a file that is already /// open, we treat it as a request for a new view. open_files: BTreeMap<PathBuf, BufferIdentifier>, /// maps buffer identifiers (filenames) to editor instances buffers: BTreeMap<BufferIdentifier, Arc<Mutex<Editor<W>>>>, /// maps view identifiers to editor instances. All actions originate in a view; this lets us /// route messages correctly when multiple views share a buffer. views: BTreeMap<ViewIdentifier, BufferIdentifier>, id_counter: usize, kill_ring: Arc<Mutex<Rope>>, style_map: Arc<Mutex<StyleMap>>, } #[derive(Clone)] pub struct TabCtx<W: Write> { kill_ring: Arc<Mutex<Rope>>, rpc_peer: MainPeer<W>, style_map: Arc<Mutex<StyleMap>>, } impl<W: Write + Send +'static> Tabs<W> { pub fn new() -> Tabs<W> { Tabs { open_files: BTreeMap::new(), buffers: BTreeMap::new(), views: BTreeMap::new(), id_counter: 0, kill_ring: Arc::new(Mutex::new(Rope::from(""))), style_map: Arc::new(Mutex::new(StyleMap::new())), } } fn new_tab_ctx(&self, peer: &MainPeer<W>) -> TabCtx<W> { TabCtx { kill_ring: self.kill_ring.clone(), rpc_peer: peer.clone(), style_map: self.style_map.clone(), } } fn next_view_id(&mut self) -> ViewIdentifier { self.id_counter += 1; format!("view-id-{}", self.id_counter) } fn next_buffer_id(&mut self) -> BufferIdentifier { self.id_counter += 1; format!("buffer-id-{}", self.id_counter) } pub fn do_rpc(&mut self, cmd: CoreCommand, rpc_peer: &MainPeer<W>) -> Option<Value> { use rpc::CoreCommand::*; match cmd { CloseView { view_id } => { self.do_close_view(view_id); None }, NewView { file_path } => Some(Value::String(self.do_new_view(rpc_peer, file_path))), Save { view_id, file_path } => self.do_save(view_id, file_path), Edit { view_id, edit_command } => self.do_edit(view_id, edit_command), } } /// Creates a new view and associates it with a buffer. /// /// This function always creates a new view and associates it with a buffer (which we access ///through an `Editor` instance). This buffer may be existing, or it may be created. /// ///A `new_view` request is handled differently depending on the `file_path` argument, and on ///application state. If `file_path` is given and a buffer associated with that file is already ///open, we create a new view into the existing buffer. If `file_path` is given and that file ///_isn't_ open, we load that file into a new buffer. If `file_path` is not given, we create a ///new empty buffer. fn do_new_view(&mut self, rpc_peer: &MainPeer<W>, file_path: Option<&str>) -> ViewIdentifier { // three code paths: new buffer, open file, and new view into existing buffer let view_id = self.next_view_id(); if let Some(file_path) = file_path.map(PathBuf::from) { // TODO: here, we should eventually be adding views to the existing editor. // for the time being, we just create a new empty view. if self.open_files.contains_key(&file_path) { let buffer_id = self.next_buffer_id(); self.new_empty_view(rpc_peer, &view_id, buffer_id); // let buffer_id = self.open_files.get(&file_path).unwrap().to_owned(); //self.add_view(&view_id, buffer_id); } else { // not open: create new buffer_id and open file let buffer_id = self.next_buffer_id(); self.open_files.insert(file_path.to_owned(), buffer_id.clone()); self.new_view_with_file(rpc_peer, &view_id, buffer_id.clone(), &file_path); // above fn has two branches: set path after self.buffers.get(&buffer_id).unwrap().lock().unwrap().set_path(&file_path); } } else { // file_path was nil: create a new empty buffer. let buffer_id = self.next_buffer_id(); self.new_empty_view(rpc_peer, &view_id, buffer_id); } view_id } fn do_close_view(&mut self, view_id: &str) { self.close_view(view_id); } fn new_empty_view(&mut self, rpc_peer: &MainPeer<W>, view_id: &str, buffer_id: BufferIdentifier) { let editor = Editor::new(self.new_tab_ctx(rpc_peer), view_id); self.finalize_new_view(view_id, buffer_id, editor); } fn new_view_with_file(&mut self, rpc_peer: &MainPeer<W>, view_id: &str, buffer_id: BufferIdentifier, path: &Path) { match self.read_file(&path) { Ok(contents) => { let editor = Editor::with_text(self.new_tab_ctx(rpc_peer), view_id, contents); self.finalize_new_view(view_id, buffer_id, editor) } Err(err) => { // TODO: we should be reporting errors to the client // (if this is even an error? we treat opening a non-existent file as a new buffer, // but set the editor's path) print_err!("unable to read file: {}, error: {:?}", buffer_id, err); self.new_empty_view(rpc_peer, view_id, buffer_id); } } } /// Adds a new view to an existing editor instance. #[allow(unreachable_code, unused_variables, dead_code)] fn add_view(&mut self, view_id: &str, buffer_id: BufferIdentifier) { panic!("add_view should not currently be accessible"); let editor = self.buffers.get(&buffer_id).expect("missing editor_id for view_id"); self.views.insert(view_id.to_owned(), buffer_id); editor.lock().unwrap().add_view(view_id); } fn finalize_new_view(&mut self, view_id: &str, buffer_id: String, editor: Arc<Mutex<Editor<W>>>) { self.views.insert(view_id.to_owned(), buffer_id.clone()); self.buffers.insert(buffer_id, editor.clone()); } fn read_file<P: AsRef<Path>>(&self, path: P) -> io::Result<String>
fn close_view(&mut self, view_id: &str) { let buf_id = self.views.remove(view_id).expect("missing buffer id when closing view"); let (has_views, path) = { let editor = self.buffers.get(&buf_id).expect("missing editor when closing view"); let mut editor = editor.lock().unwrap(); editor.remove_view(view_id); (editor.has_views(), editor.get_path().map(PathBuf::from)) }; if!has_views { self.buffers.remove(&buf_id); if let Some(path) = path { self.open_files.remove(&path); } } } fn do_save(&mut self, view_id: &str, file_path: &str) -> Option<Value> { let buffer_id = self.views.get(view_id) .expect(&format!("missing buffer id for view {}", view_id)); let editor = self.buffers.get(buffer_id) .expect(&format!("missing editor for buffer {}", buffer_id)); let file_path = PathBuf::from(file_path); // if this is a new path for an existing file, we have a bit of housekeeping to do: if let Some(prev_path) = editor.lock().unwrap().get_path() { if prev_path!= file_path { self.open_files.remove(prev_path); } } editor.lock().unwrap().do_save(&file_path); self.open_files.insert(file_path, buffer_id.to_owned()); None } fn do_edit(&mut self, view_id: &str, cmd: EditCommand) -> Option<Value> { let buffer_id = self.views.get(view_id) .expect(&format!("missing buffer id for view {}", view_id)); if let Some(editor) = self.buffers.get(buffer_id) { Editor::do_rpc(editor, view_id, cmd) } else { print_err!("buffer not found: {}, for view {}", buffer_id, view_id); None } } pub fn handle_idle(&self) { for editor in self.buffers.values() { editor.lock().unwrap().render(); } } } impl<W: Write> TabCtx<W> { pub fn update_view(&self, view_id: &str, update: &Value) { self.rpc_peer.send_rpc_notification("update", &json!({ "view_id": view_id, "update": update, })); } pub fn scroll_to(&self, view_id: &str, line: usize, col: usize) { self.rpc_peer.send_rpc_notification("scroll_to", &json!({ "view_id": view_id, "line": line, "col": col, })); } pub fn get_kill_ring(&self) -> Rope { self.kill_ring.lock().unwrap().clone() } pub fn set_kill_ring(&self, val: Rope) { let mut kill_ring = self.kill_ring.lock().unwrap(); *kill_ring = val; } pub fn alert(&self, msg: &str) { self.rpc_peer.send_rpc_notification("alert", &json!({ "msg": msg, })); } // Get the index for a given style. If the style is not in the existing // style map, then issues a def_style request to the front end. Intended // to be reasonably efficient, but ideally callers would do their own // indexing. pub fn get_style_id(&self, style: &Style) -> usize { let mut style_map = self.style_map.lock().unwrap(); if let Some(ix) = style_map.lookup(style) { return ix; } let ix = style_map.add(style); self.rpc_peer.send_rpc_notification("def_style", &style.to_json(ix)); ix } }
{ let mut f = File::open(path)?; let mut s = String::new(); f.read_to_string(&mut s)?; Ok(s) }
identifier_body
features.rs
use bio::io::{bed, gff}; use bio::utils::Strand; use bio::utils::Strand::*; use crate::lib::{Config, ConfigFeature}; use crate::lib::{Database, GeneNameEachReference, GeneNameTree, Region}; use rocks::rocksdb::*; use std::collections::HashMap; use std::error::Error; use std::fs::File; use std::io::{BufRead, BufReader}; use std::mem::*; use std::path::Path; use crate::vg::GraphDB; use crate::vg::GraphDB::VG; // NodeId to corresponding feature items. type Features = HashMap<u64, Vec<Feature>>; pub type FeatureDB = Vec<Features>; // Move it to graph, needed. type CoordToNodeId = HashMap<String, Vec<NodeId>>; // Vec<NodeId> required as sorted by coord. #[derive(Debug, PartialEq, Serialize, Deserialize)] struct NodeId { id: u64, coord: u64, } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct Feature { pub start_offset: u64, pub stop_offset: u64, pub id: u64, pub name: String, pub is_reverse: Option<bool>, pub attributes: Vec<String>, pub value: Option<f32>, } #[derive(Debug, PartialEq, Serialize, Deserialize)] struct FeatureSet { feature_set_id: u64, dataset_id: Vec<u64>, attributes: Option<String>, } fn opt_strand_to_opt_bool(strand: Option<Strand>) -> Option<bool> { strand.and_then(|strand| match strand { Forward => Some(false), Reverse => Some(true), Unknown => None, }) } fn record_to_nodes( record: bed::Record, coord_map: &CoordToNodeId, bed_id: u64, chr_prefix: &Option<String>, ) -> HashMap<u64, Feature> { let mut hash_map: HashMap<u64, Feature> = HashMap::new(); let chr = match *chr_prefix { Some(ref k) => record.chrom().replace(k, ""), None => record.chrom().to_string(), }; let ref vec = match coord_map.get(&chr) { Some(k) => k, None => return hash_map, }; let lower_bound_index = match vec.binary_search_by_key(&record.start(), |b| b.coord) { Ok(x) => x, Err(x) => x, }; hash_map.insert( vec[lower_bound_index].id, Feature { start_offset: vec[lower_bound_index].coord - record.start(), stop_offset: 0, id: bed_id, name: record.name().unwrap_or_default().to_string(), is_reverse: opt_strand_to_opt_bool(record.strand()), attributes: vec![], value: None, }, ); let mut index = lower_bound_index; while vec.len() > index + 1 && vec[index + 1].coord < record.end() { index += 1; hash_map.insert( vec[index].id, Feature { start_offset: 0, stop_offset: 0, id: bed_id, name: record.name().unwrap_or_default().to_string(), is_reverse: opt_strand_to_opt_bool(record.strand()), attributes: vec![], value: None, }, ); } return hash_map; } // tmpNew should be replecated with a novel implementation. // Required input list is sorted by coordinates. //pub fn tmp_new(graph: Arc<Graph>, config: &Config) -> Database { pub fn tmp_new(graph: GraphDB, config: &Config, db_name: String, rocksdb_init: &bool) -> Database { let chroms = vec![ "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "X", "Y", ]; let hashmap = CoordToNodeId::new(); if *rocksdb_init ||!Path::new(&db_name).exists() { if let Ok(cf) = DB::open( &Options::default().map_db_options(|db| db.create_if_missing(true)), db_name.clone(), ) { 'iter: for chr in chroms.iter() { if let Some(ref path) = config.data[0].source.node_index { let ref prefix = config.data[0].chr_prefix; let chr_name = prefix.clone() + chr; let path_string = path.clone().replace("{}", &chr_name); let path = Path::new(&path_string); debug!("Chromosome: {:?}, {:?}", chr, path); let file = match File::open(path) { Ok(f) => f, Err(e) => { debug!("could not open {}; skipping.", e.description()); continue 'iter; } }; /* let file_gz = match extract_file(path) { Ok(f) => f, Err(e) => {continue 'iter;} }; */ let br = BufReader::new(file); let mut last_node: Option<NodeId> = None; for line in br.lines() { match line { Ok(l) => { let items: Vec<u64> = l.split("\t").map(|a| a.parse::<u64>().unwrap()).collect(); if items.len() > 1 { if let Some(item) = last_node { let reg = Region { path: (*chr).to_string(), start: item.coord, stop: items[1], }; let raw_bytes: [u8; 8] = unsafe { transmute(item.id) }; if let Err(err) = cf.put( &WriteOptions::default(), &raw_bytes, reg.uuid().as_bytes(), ) { debug!("{:?} at {}", err, item.id) } } last_node = Some(NodeId { id: items[0], coord: items[1], }); } else { continue; } } Err(e) => { debug!("ignoring error {}", e); continue; } }; } if let Some(item) = last_node { // coord.insert(item.id, Region{ path: (*chr).to_string(), start: item.coord, stop: item.coord + 1000 }); //Todo seems to wrong code. let reg = Region { path: (*chr).to_string(), start: item.coord, stop: item.coord + 1000, }; let raw_bytes: [u8; 8] = unsafe { transmute(item.id) }; if let Err(err) = cf.put(&WriteOptions::default(), &raw_bytes, reg.uuid().as_bytes()) { debug!("{:?} at {}", err, item.id) } } } } } } let mut vec: FeatureDB = FeatureDB::new(); let mut gene_per_ref = GeneNameEachReference::new(); for data in config.reference.data.iter() { let mut gene: GeneNameTree = GeneNameTree::new(); for feature in data.features.iter() { // It limits only "config,reference Items." let path = Path::new(&feature.url); info!("Parsing: {:?}", path); match path.extension().unwrap_or_default().to_str() { Some("bed") => { vec.push(tmp_new_internal(feature, &graph, &hashmap)); } Some("gff3") => { tmp_new_gene_internal(feature, &mut gene, gff::GffType::GFF3); } Some("gtf") => { tmp_new_gene_internal(feature, &mut gene, gff::GffType::GTF2); } _ => println!("Unsupported format {:?}", path), } } gene_per_ref.insert(data.name.clone(), gene); } match graph { VG(graph2) => { let version = graph2.version(config); println!("{}", version); return Database { features: vec, //coordinates: coord, rocks: db_name, gene_name_tree: gene_per_ref, graph: VG(graph2), version: version, }; } }; } // It includes only "gene" row. fn tmp_new_gene_internal(feature: &ConfigFeature, gene: &mut GeneNameTree, gff_type: gff::GffType) { let gff3 = &feature.url; let path = Path::new(&gff3); let mut reader = match gff::Reader::from_file(path, gff_type) { Ok(f) => f, Err(e) => { debug!("could not open {}; skipping.", e.description()); //return result?; return; } }; let mut index = 0;
Ok(rec) => match rec.feature_type() { "gene" => { let reg = match opt_strand_to_opt_bool(rec.strand()) { Some(false) => Region { path: rec.seqname().to_string(), stop: *rec.start(), start: *rec.end(), }, _ => Region { path: rec.seqname().to_string(), start: *rec.start(), stop: *rec.end(), }, }; match rec.attributes().get("gene_name") { Some(name) => gene.insert(name.clone().to_string(), reg), None => continue, }; } _ => continue, }, Err(_) => continue, } } debug!("{} lines processed. end.", index); } fn tmp_new_internal( feature: &ConfigFeature, _graph: &GraphDB, hashmap: &CoordToNodeId, ) -> Features { let bed = &feature.url; let path = Path::new(&bed); let mut features: Features = Features::new(); let mut reader = match bed::Reader::from_file(path) { Ok(f) => f, Err(e) => { debug!("could not open {}; skipping.", e.description()); return features; } }; let mut index: u64 = 0; for record in reader.records() { let rec = record.ok().expect("Error reading record."); let nodes = record_to_nodes(rec, &hashmap, index, &feature.chr_prefix); for (key, value) in nodes.into_iter() { features.entry(key).or_insert(Vec::new()).push(value); } index += 1; } return features; } /* fn extract_file(path_compressed: &Path) -> io::Result<Vec<u8>>{ let mut v = Vec::new(); let f = try!(File::open(path_compressed)); try!(try!(GzDecoder::new(f)).read_to_end(&mut v)); Ok(v) } fn decode_reader(string: &String) -> io::Result<String> { let mut gz = GzDecoder::new(string.as_bytes())?; let mut s = String::new(); gz.read_to_string(&mut s)?; Ok(s) } */
for record in reader.records() { index += 1; match record {
random_line_split
features.rs
use bio::io::{bed, gff}; use bio::utils::Strand; use bio::utils::Strand::*; use crate::lib::{Config, ConfigFeature}; use crate::lib::{Database, GeneNameEachReference, GeneNameTree, Region}; use rocks::rocksdb::*; use std::collections::HashMap; use std::error::Error; use std::fs::File; use std::io::{BufRead, BufReader}; use std::mem::*; use std::path::Path; use crate::vg::GraphDB; use crate::vg::GraphDB::VG; // NodeId to corresponding feature items. type Features = HashMap<u64, Vec<Feature>>; pub type FeatureDB = Vec<Features>; // Move it to graph, needed. type CoordToNodeId = HashMap<String, Vec<NodeId>>; // Vec<NodeId> required as sorted by coord. #[derive(Debug, PartialEq, Serialize, Deserialize)] struct NodeId { id: u64, coord: u64, } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct Feature { pub start_offset: u64, pub stop_offset: u64, pub id: u64, pub name: String, pub is_reverse: Option<bool>, pub attributes: Vec<String>, pub value: Option<f32>, } #[derive(Debug, PartialEq, Serialize, Deserialize)] struct FeatureSet { feature_set_id: u64, dataset_id: Vec<u64>, attributes: Option<String>, } fn opt_strand_to_opt_bool(strand: Option<Strand>) -> Option<bool> { strand.and_then(|strand| match strand { Forward => Some(false), Reverse => Some(true), Unknown => None, }) } fn
( record: bed::Record, coord_map: &CoordToNodeId, bed_id: u64, chr_prefix: &Option<String>, ) -> HashMap<u64, Feature> { let mut hash_map: HashMap<u64, Feature> = HashMap::new(); let chr = match *chr_prefix { Some(ref k) => record.chrom().replace(k, ""), None => record.chrom().to_string(), }; let ref vec = match coord_map.get(&chr) { Some(k) => k, None => return hash_map, }; let lower_bound_index = match vec.binary_search_by_key(&record.start(), |b| b.coord) { Ok(x) => x, Err(x) => x, }; hash_map.insert( vec[lower_bound_index].id, Feature { start_offset: vec[lower_bound_index].coord - record.start(), stop_offset: 0, id: bed_id, name: record.name().unwrap_or_default().to_string(), is_reverse: opt_strand_to_opt_bool(record.strand()), attributes: vec![], value: None, }, ); let mut index = lower_bound_index; while vec.len() > index + 1 && vec[index + 1].coord < record.end() { index += 1; hash_map.insert( vec[index].id, Feature { start_offset: 0, stop_offset: 0, id: bed_id, name: record.name().unwrap_or_default().to_string(), is_reverse: opt_strand_to_opt_bool(record.strand()), attributes: vec![], value: None, }, ); } return hash_map; } // tmpNew should be replecated with a novel implementation. // Required input list is sorted by coordinates. //pub fn tmp_new(graph: Arc<Graph>, config: &Config) -> Database { pub fn tmp_new(graph: GraphDB, config: &Config, db_name: String, rocksdb_init: &bool) -> Database { let chroms = vec![ "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "X", "Y", ]; let hashmap = CoordToNodeId::new(); if *rocksdb_init ||!Path::new(&db_name).exists() { if let Ok(cf) = DB::open( &Options::default().map_db_options(|db| db.create_if_missing(true)), db_name.clone(), ) { 'iter: for chr in chroms.iter() { if let Some(ref path) = config.data[0].source.node_index { let ref prefix = config.data[0].chr_prefix; let chr_name = prefix.clone() + chr; let path_string = path.clone().replace("{}", &chr_name); let path = Path::new(&path_string); debug!("Chromosome: {:?}, {:?}", chr, path); let file = match File::open(path) { Ok(f) => f, Err(e) => { debug!("could not open {}; skipping.", e.description()); continue 'iter; } }; /* let file_gz = match extract_file(path) { Ok(f) => f, Err(e) => {continue 'iter;} }; */ let br = BufReader::new(file); let mut last_node: Option<NodeId> = None; for line in br.lines() { match line { Ok(l) => { let items: Vec<u64> = l.split("\t").map(|a| a.parse::<u64>().unwrap()).collect(); if items.len() > 1 { if let Some(item) = last_node { let reg = Region { path: (*chr).to_string(), start: item.coord, stop: items[1], }; let raw_bytes: [u8; 8] = unsafe { transmute(item.id) }; if let Err(err) = cf.put( &WriteOptions::default(), &raw_bytes, reg.uuid().as_bytes(), ) { debug!("{:?} at {}", err, item.id) } } last_node = Some(NodeId { id: items[0], coord: items[1], }); } else { continue; } } Err(e) => { debug!("ignoring error {}", e); continue; } }; } if let Some(item) = last_node { // coord.insert(item.id, Region{ path: (*chr).to_string(), start: item.coord, stop: item.coord + 1000 }); //Todo seems to wrong code. let reg = Region { path: (*chr).to_string(), start: item.coord, stop: item.coord + 1000, }; let raw_bytes: [u8; 8] = unsafe { transmute(item.id) }; if let Err(err) = cf.put(&WriteOptions::default(), &raw_bytes, reg.uuid().as_bytes()) { debug!("{:?} at {}", err, item.id) } } } } } } let mut vec: FeatureDB = FeatureDB::new(); let mut gene_per_ref = GeneNameEachReference::new(); for data in config.reference.data.iter() { let mut gene: GeneNameTree = GeneNameTree::new(); for feature in data.features.iter() { // It limits only "config,reference Items." let path = Path::new(&feature.url); info!("Parsing: {:?}", path); match path.extension().unwrap_or_default().to_str() { Some("bed") => { vec.push(tmp_new_internal(feature, &graph, &hashmap)); } Some("gff3") => { tmp_new_gene_internal(feature, &mut gene, gff::GffType::GFF3); } Some("gtf") => { tmp_new_gene_internal(feature, &mut gene, gff::GffType::GTF2); } _ => println!("Unsupported format {:?}", path), } } gene_per_ref.insert(data.name.clone(), gene); } match graph { VG(graph2) => { let version = graph2.version(config); println!("{}", version); return Database { features: vec, //coordinates: coord, rocks: db_name, gene_name_tree: gene_per_ref, graph: VG(graph2), version: version, }; } }; } // It includes only "gene" row. fn tmp_new_gene_internal(feature: &ConfigFeature, gene: &mut GeneNameTree, gff_type: gff::GffType) { let gff3 = &feature.url; let path = Path::new(&gff3); let mut reader = match gff::Reader::from_file(path, gff_type) { Ok(f) => f, Err(e) => { debug!("could not open {}; skipping.", e.description()); //return result?; return; } }; let mut index = 0; for record in reader.records() { index += 1; match record { Ok(rec) => match rec.feature_type() { "gene" => { let reg = match opt_strand_to_opt_bool(rec.strand()) { Some(false) => Region { path: rec.seqname().to_string(), stop: *rec.start(), start: *rec.end(), }, _ => Region { path: rec.seqname().to_string(), start: *rec.start(), stop: *rec.end(), }, }; match rec.attributes().get("gene_name") { Some(name) => gene.insert(name.clone().to_string(), reg), None => continue, }; } _ => continue, }, Err(_) => continue, } } debug!("{} lines processed. end.", index); } fn tmp_new_internal( feature: &ConfigFeature, _graph: &GraphDB, hashmap: &CoordToNodeId, ) -> Features { let bed = &feature.url; let path = Path::new(&bed); let mut features: Features = Features::new(); let mut reader = match bed::Reader::from_file(path) { Ok(f) => f, Err(e) => { debug!("could not open {}; skipping.", e.description()); return features; } }; let mut index: u64 = 0; for record in reader.records() { let rec = record.ok().expect("Error reading record."); let nodes = record_to_nodes(rec, &hashmap, index, &feature.chr_prefix); for (key, value) in nodes.into_iter() { features.entry(key).or_insert(Vec::new()).push(value); } index += 1; } return features; } /* fn extract_file(path_compressed: &Path) -> io::Result<Vec<u8>>{ let mut v = Vec::new(); let f = try!(File::open(path_compressed)); try!(try!(GzDecoder::new(f)).read_to_end(&mut v)); Ok(v) } fn decode_reader(string: &String) -> io::Result<String> { let mut gz = GzDecoder::new(string.as_bytes())?; let mut s = String::new(); gz.read_to_string(&mut s)?; Ok(s) } */
record_to_nodes
identifier_name
v0.rs
<CompressionCaches<'tcx>>>, binders: Vec<BinderLevel>, out: String, } impl SymbolMangler<'tcx> { fn push(&mut self, s: &str) { self.out.push_str(s); } /// Push a `_`-terminated base 62 integer, using the format /// specified in the RFC as `<base-62-number>`, that is: /// * `x = 0` is encoded as just the `"_"` terminator /// * `x > 0` is encoded as `x - 1` in base 62, followed by `"_"`, /// e.g. `1` becomes `"0_"`, `62` becomes `"Z_"`, etc. fn push_integer_62(&mut self, x: u64) { if let Some(x) = x.checked_sub(1) { base_n::push_str(x as u128, 62, &mut self.out); } self.push("_"); } /// Push a `tag`-prefixed base 62 integer, when larger than `0`, that is: /// * `x = 0` is encoded as `""` (nothing) /// * `x > 0` is encoded as the `tag` followed by `push_integer_62(x - 1)` /// e.g. `1` becomes `tag + "_"`, `2` becomes `tag + "0_"`, etc. fn push_opt_integer_62(&mut self, tag: &str, x: u64) { if let Some(x) = x.checked_sub(1) { self.push(tag); self.push_integer_62(x); } } fn push_disambiguator(&mut self, dis: u64) { self.push_opt_integer_62("s", dis); } fn push_ident(&mut self, ident: &str)
// Replace `-` with `_`. if let Some(c) = punycode_bytes.iter_mut().rfind(|&&mut c| c == b'-') { *c = b'_'; } // FIXME(eddyb) avoid rechecking UTF-8 validity. punycode_string = String::from_utf8(punycode_bytes).unwrap(); &punycode_string } else { ident }; let _ = write!(self.out, "{}", ident.len()); // Write a separating `_` if necessary (leading digit or `_`). match ident.chars().next() { Some('_') | Some('0'..='9') => { self.push("_"); } _ => {} } self.push(ident); } fn path_append_ns( mut self, print_prefix: impl FnOnce(Self) -> Result<Self,!>, ns: char, disambiguator: u64, name: &str, ) -> Result<Self,!> { self.push("N"); self.out.push(ns); self = print_prefix(self)?; self.push_disambiguator(disambiguator as u64); self.push_ident(name); Ok(self) } fn print_backref(mut self, i: usize) -> Result<Self,!> { self.push("B"); self.push_integer_62((i - self.compress.as_ref().unwrap().start_offset) as u64); Ok(self) } fn in_binder<T>( mut self, value: &ty::Binder<T>, print_value: impl FnOnce(Self, &T) -> Result<Self,!> ) -> Result<Self,!> where T: TypeFoldable<'tcx> { let regions = if value.has_late_bound_regions() { self.tcx.collect_referenced_late_bound_regions(value) } else { FxHashSet::default() }; let mut lifetime_depths = self.binders.last().map(|b| b.lifetime_depths.end).map_or(0..0, |i| i..i); let lifetimes = regions.into_iter().map(|br| { match br { ty::BrAnon(i) => { // FIXME(eddyb) for some reason, `anonymize_late_bound_regions` starts at `1`. assert_ne!(i, 0); i - 1 }, _ => bug!("symbol_names: non-anonymized region `{:?}` in `{:?}`", br, value), } }).max().map_or(0, |max| max + 1); self.push_opt_integer_62("G", lifetimes as u64); lifetime_depths.end += lifetimes; self.binders.push(BinderLevel { lifetime_depths }); self = print_value(self, value.skip_binder())?; self.binders.pop(); Ok(self) } } impl Printer<'tcx> for SymbolMangler<'tcx> { type Error =!; type Path = Self; type Region = Self; type Type = Self; type DynExistential = Self; type Const = Self; fn tcx(&self) -> TyCtxt<'tcx> { self.tcx } fn print_def_path( mut self, def_id: DefId, substs: &'tcx [GenericArg<'tcx>], ) -> Result<Self::Path, Self::Error> { if let Some(&i) = self.compress.as_ref().and_then(|c| c.paths.get(&(def_id, substs))) { return self.print_backref(i); } let start = self.out.len(); self = self.default_print_def_path(def_id, substs)?; // Only cache paths that do not refer to an enclosing // binder (which would change depending on context). if!substs.iter().any(|k| k.has_escaping_bound_vars()) { if let Some(c) = &mut self.compress { c.paths.insert((def_id, substs), start); } } Ok(self) } fn print_impl_path( self, impl_def_id: DefId, substs: &'tcx [GenericArg<'tcx>], mut self_ty: Ty<'tcx>, mut impl_trait_ref: Option<ty::TraitRef<'tcx>>, ) -> Result<Self::Path, Self::Error> { let key = self.tcx.def_key(impl_def_id); let parent_def_id = DefId { index: key.parent.unwrap(),..impl_def_id }; let mut param_env = self.tcx.param_env(impl_def_id) .with_reveal_all(); if!substs.is_empty() { param_env = param_env.subst(self.tcx, substs); } match &mut impl_trait_ref { Some(impl_trait_ref) => { assert_eq!(impl_trait_ref.self_ty(), self_ty); *impl_trait_ref = self.tcx.normalize_erasing_regions(param_env, *impl_trait_ref); self_ty = impl_trait_ref.self_ty(); } None => { self_ty = self.tcx.normalize_erasing_regions(param_env, self_ty); } } self.path_append_impl( |cx| cx.print_def_path(parent_def_id, &[]), &key.disambiguated_data, self_ty, impl_trait_ref, ) } fn print_region( mut self, region: ty::Region<'_>, ) -> Result<Self::Region, Self::Error> { let i = match *region { // Erased lifetimes use the index 0, for a // shorter mangling of `L_`. ty::ReErased => 0, // Late-bound lifetimes use indices starting at 1, // see `BinderLevel` for more details. ty::ReLateBound(debruijn, ty::BrAnon(i)) => { // FIXME(eddyb) for some reason, `anonymize_late_bound_regions` starts at `1`. assert_ne!(i, 0); let i = i - 1; let binder = &self.binders[self.binders.len() - 1 - debruijn.index()]; let depth = binder.lifetime_depths.start + i; 1 + (self.binders.last().unwrap().lifetime_depths.end - 1 - depth) } _ => bug!("symbol_names: non-erased region `{:?}`", region), }; self.push("L"); self.push_integer_62(i as u64); Ok(self) } fn print_type( mut self, ty: Ty<'tcx>, ) -> Result<Self::Type, Self::Error> { // Basic types, never cached (single-character). let basic_type = match ty.kind { ty::Bool => "b", ty::Char => "c", ty::Str => "e", ty::Tuple(_) if ty.is_unit() => "u", ty::Int(IntTy::I8) => "a", ty::Int(IntTy::I16) => "s", ty::Int(IntTy::I32) => "l", ty::Int(IntTy::I64) => "x", ty::Int(IntTy::I128) => "n", ty::Int(IntTy::Isize) => "i", ty::Uint(UintTy::U8) => "h", ty::Uint(UintTy::U16) => "t", ty::Uint(UintTy::U32) => "m", ty::Uint(UintTy::U64) => "y", ty::Uint(UintTy::U128) => "o", ty::Uint(UintTy::Usize) => "j", ty::Float(FloatTy::F32) => "f", ty::Float(FloatTy::F64) => "d", ty::Never => "z", // Placeholders (should be demangled as `_`). ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) | ty::Error => "p", _ => "", }; if!basic_type.is_empty() { self.push(basic_type); return Ok(self); } if let Some(&i) = self.compress.as_ref().and_then(|c| c.types.get(&ty)) { return self.print_backref(i); } let start = self.out.len(); match ty.kind { // Basic types, handled above. ty::Bool | ty::Char | ty::Str | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Never => unreachable!(), ty::Tuple(_) if ty.is_unit() => unreachable!(), // Placeholders, also handled as part of basic types. ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) | ty::Error => unreachable!(), ty::Ref(r, ty, mutbl) => { self.push(match mutbl { hir::MutImmutable => "R", hir::MutMutable => "Q", }); if *r!= ty::ReErased { self = r.print(self)?; } self = ty.print(self)?; } ty::RawPtr(mt) => { self.push(match mt.mutbl { hir::MutImmutable => "P", hir::MutMutable => "O", }); self = mt.ty.print(self)?; } ty::Array(ty, len) => { self.push("A"); self = ty.print(self)?; self = self.print_const(len)?; } ty::Slice(ty) => { self.push("S"); self = ty.print(self)?; } ty::Tuple(tys) => { self.push("T"); for ty in tys.iter().map(|k| k.expect_ty()) { self = ty.print(self)?; } self.push("E"); } // Mangle all nominal types as paths. ty::Adt(&ty::AdtDef { did: def_id,.. }, substs) | ty::FnDef(def_id, substs) | ty::Opaque(def_id, substs) | ty::Projection(ty::ProjectionTy { item_def_id: def_id, substs }) | ty::UnnormalizedProjection(ty::ProjectionTy { item_def_id: def_id, substs }) | ty::Closure(def_id, substs) | ty::Generator(def_id, substs, _) => { self = self.print_def_path(def_id, substs)?; } ty::Foreign(def_id) => { self = self.print_def_path(def_id, &[])?; } ty::FnPtr(sig) => { self.push("F"); self = self.in_binder(&sig, |mut cx, sig| { if sig.unsafety == hir::Unsafety::Unsafe { cx.push("U"); } match sig.abi { Abi::Rust => {} Abi::C => cx.push("KC"), abi => { cx.push("K"); let name = abi.name(); if name.contains('-') { cx.push_ident(&name.replace('-', "_")); } else { cx.push_ident(name); } } } for &ty in sig.inputs() { cx = ty.print(cx)?; } if sig.c_variadic { cx.push("v"); } cx.push("E"); sig.output().print(cx) })?; } ty::Dynamic(predicates, r) => { self.push("D"); self = self.in_binder(&predicates, |cx, predicates| { cx.print_dyn_existential(predicates) })?; self = r.print(self)?; } ty::GeneratorWitness(_) => { bug!("symbol_names: unexpected `GeneratorWitness`") } } // Only cache types that do not refer to an enclosing // binder (which would change depending on context). if!ty.has_escaping_bound_vars() { if let Some(c) = &mut self.compress { c.types.insert(ty, start); } } Ok(self) } fn print_dyn_existential( mut self, predicates: &'tcx ty::List<ty::ExistentialPredicate<'tcx>>, ) -> Result<Self::DynExistential, Self::Error> { for predicate in predicates { match *predicate { ty::ExistentialPredicate::Trait(trait_ref) => { // Use a type that can't appear in defaults of type parameters. let dummy_self = self.tcx.mk_ty_infer(ty::FreshTy(0)); let trait_ref = trait_ref.with_self_ty(self.tcx, dummy_self); self = self.print_def_path(trait_ref.def_id, trait_ref.substs)?; } ty::ExistentialPredicate::Projection(projection) => { let name = self.tcx.associated_item(projection.item_def_id).ident; self.push("p"); self.push_ident(&name.as_str()); self = projection.ty.print(self)?; } ty::ExistentialPredicate::AutoTrait(def_id) => { self = self.print_def_path(def_id, &[])?; } } } self.push("E"); Ok(self) } fn print_const( mut self, ct: &'tcx ty::Const<'tcx>, ) -> Result<Self::Const, Self::Error> { if let Some(&i) = self.compress.as_ref().and_then(|c| c.consts.get(&ct)) { return self.print_backref(i); } let start = self.out.len(); match ct.ty.kind { ty::Uint(_) => {} _ => { bug!("symbol_names: unsupported constant of type `{}` ({:?})", ct.ty, ct); } } self = ct.ty.print(self)?; if let Some(bits) = ct.try_eval_bits(self.tcx, ty::ParamEnv::reveal_all(), ct.ty) { let
{ let mut use_punycode = false; for b in ident.bytes() { match b { b'_' | b'a'..=b'z' | b'A'..=b'Z' | b'0'..=b'9' => {} 0x80..=0xff => use_punycode = true, _ => bug!("symbol_names: bad byte {} in ident {:?}", b, ident), } } let punycode_string; let ident = if use_punycode { self.push("u"); // FIXME(eddyb) we should probably roll our own punycode implementation. let mut punycode_bytes = match ::punycode::encode(ident) { Ok(s) => s.into_bytes(), Err(()) => bug!("symbol_names: punycode encoding failed for ident {:?}", ident), };
identifier_body
v0.rs
Box<CompressionCaches<'tcx>>>, binders: Vec<BinderLevel>, out: String, } impl SymbolMangler<'tcx> { fn push(&mut self, s: &str) { self.out.push_str(s); } /// Push a `_`-terminated base 62 integer, using the format /// specified in the RFC as `<base-62-number>`, that is: /// * `x = 0` is encoded as just the `"_"` terminator /// * `x > 0` is encoded as `x - 1` in base 62, followed by `"_"`, /// e.g. `1` becomes `"0_"`, `62` becomes `"Z_"`, etc. fn push_integer_62(&mut self, x: u64) { if let Some(x) = x.checked_sub(1) { base_n::push_str(x as u128, 62, &mut self.out); } self.push("_"); } /// Push a `tag`-prefixed base 62 integer, when larger than `0`, that is: /// * `x = 0` is encoded as `""` (nothing) /// * `x > 0` is encoded as the `tag` followed by `push_integer_62(x - 1)` /// e.g. `1` becomes `tag + "_"`, `2` becomes `tag + "0_"`, etc. fn push_opt_integer_62(&mut self, tag: &str, x: u64) { if let Some(x) = x.checked_sub(1) { self.push(tag); self.push_integer_62(x); } } fn push_disambiguator(&mut self, dis: u64) { self.push_opt_integer_62("s", dis); } fn push_ident(&mut self, ident: &str) { let mut use_punycode = false; for b in ident.bytes() { match b { b'_' | b'a'..=b'z' | b'A'..=b'Z' | b'0'..=b'9' => {} 0x80..=0xff => use_punycode = true, _ => bug!("symbol_names: bad byte {} in ident {:?}", b, ident), } } let punycode_string; let ident = if use_punycode { self.push("u"); // FIXME(eddyb) we should probably roll our own punycode implementation. let mut punycode_bytes = match ::punycode::encode(ident) { Ok(s) => s.into_bytes(), Err(()) => bug!("symbol_names: punycode encoding failed for ident {:?}", ident), }; // Replace `-` with `_`. if let Some(c) = punycode_bytes.iter_mut().rfind(|&&mut c| c == b'-') { *c = b'_'; } // FIXME(eddyb) avoid rechecking UTF-8 validity. punycode_string = String::from_utf8(punycode_bytes).unwrap(); &punycode_string } else { ident }; let _ = write!(self.out, "{}", ident.len()); // Write a separating `_` if necessary (leading digit or `_`). match ident.chars().next() { Some('_') | Some('0'..='9') => { self.push("_"); } _ => {} } self.push(ident); } fn path_append_ns( mut self, print_prefix: impl FnOnce(Self) -> Result<Self,!>, ns: char, disambiguator: u64, name: &str, ) -> Result<Self,!> { self.push("N"); self.out.push(ns); self = print_prefix(self)?; self.push_disambiguator(disambiguator as u64); self.push_ident(name); Ok(self) } fn print_backref(mut self, i: usize) -> Result<Self,!> { self.push("B"); self.push_integer_62((i - self.compress.as_ref().unwrap().start_offset) as u64); Ok(self) } fn in_binder<T>( mut self, value: &ty::Binder<T>, print_value: impl FnOnce(Self, &T) -> Result<Self,!> ) -> Result<Self,!> where T: TypeFoldable<'tcx> { let regions = if value.has_late_bound_regions() { self.tcx.collect_referenced_late_bound_regions(value) } else { FxHashSet::default() }; let mut lifetime_depths = self.binders.last().map(|b| b.lifetime_depths.end).map_or(0..0, |i| i..i); let lifetimes = regions.into_iter().map(|br| { match br { ty::BrAnon(i) => { // FIXME(eddyb) for some reason, `anonymize_late_bound_regions` starts at `1`. assert_ne!(i, 0); i - 1 }, _ => bug!("symbol_names: non-anonymized region `{:?}` in `{:?}`", br, value), } }).max().map_or(0, |max| max + 1); self.push_opt_integer_62("G", lifetimes as u64); lifetime_depths.end += lifetimes; self.binders.push(BinderLevel { lifetime_depths }); self = print_value(self, value.skip_binder())?; self.binders.pop(); Ok(self) } } impl Printer<'tcx> for SymbolMangler<'tcx> { type Error =!; type Path = Self; type Region = Self; type Type = Self; type DynExistential = Self; type Const = Self; fn tcx(&self) -> TyCtxt<'tcx> { self.tcx } fn print_def_path( mut self, def_id: DefId, substs: &'tcx [GenericArg<'tcx>], ) -> Result<Self::Path, Self::Error> { if let Some(&i) = self.compress.as_ref().and_then(|c| c.paths.get(&(def_id, substs))) { return self.print_backref(i); } let start = self.out.len(); self = self.default_print_def_path(def_id, substs)?; // Only cache paths that do not refer to an enclosing // binder (which would change depending on context). if!substs.iter().any(|k| k.has_escaping_bound_vars()) { if let Some(c) = &mut self.compress { c.paths.insert((def_id, substs), start); } } Ok(self) } fn print_impl_path( self, impl_def_id: DefId, substs: &'tcx [GenericArg<'tcx>], mut self_ty: Ty<'tcx>, mut impl_trait_ref: Option<ty::TraitRef<'tcx>>, ) -> Result<Self::Path, Self::Error> { let key = self.tcx.def_key(impl_def_id); let parent_def_id = DefId { index: key.parent.unwrap(),..impl_def_id }; let mut param_env = self.tcx.param_env(impl_def_id) .with_reveal_all(); if!substs.is_empty() { param_env = param_env.subst(self.tcx, substs); } match &mut impl_trait_ref { Some(impl_trait_ref) => { assert_eq!(impl_trait_ref.self_ty(), self_ty); *impl_trait_ref = self.tcx.normalize_erasing_regions(param_env, *impl_trait_ref); self_ty = impl_trait_ref.self_ty(); } None => { self_ty = self.tcx.normalize_erasing_regions(param_env, self_ty); } } self.path_append_impl( |cx| cx.print_def_path(parent_def_id, &[]), &key.disambiguated_data, self_ty, impl_trait_ref, ) } fn print_region( mut self, region: ty::Region<'_>, ) -> Result<Self::Region, Self::Error> { let i = match *region { // Erased lifetimes use the index 0, for a // shorter mangling of `L_`. ty::ReErased => 0, // Late-bound lifetimes use indices starting at 1, // see `BinderLevel` for more details. ty::ReLateBound(debruijn, ty::BrAnon(i)) => { // FIXME(eddyb) for some reason, `anonymize_late_bound_regions` starts at `1`. assert_ne!(i, 0); let i = i - 1; let binder = &self.binders[self.binders.len() - 1 - debruijn.index()]; let depth = binder.lifetime_depths.start + i; 1 + (self.binders.last().unwrap().lifetime_depths.end - 1 - depth) } _ => bug!("symbol_names: non-erased region `{:?}`", region), }; self.push("L"); self.push_integer_62(i as u64); Ok(self) } fn print_type( mut self, ty: Ty<'tcx>, ) -> Result<Self::Type, Self::Error> { // Basic types, never cached (single-character). let basic_type = match ty.kind { ty::Bool => "b", ty::Char => "c", ty::Str => "e", ty::Tuple(_) if ty.is_unit() => "u", ty::Int(IntTy::I8) => "a", ty::Int(IntTy::I16) => "s", ty::Int(IntTy::I32) => "l", ty::Int(IntTy::I64) => "x", ty::Int(IntTy::I128) => "n", ty::Int(IntTy::Isize) => "i", ty::Uint(UintTy::U8) => "h", ty::Uint(UintTy::U16) => "t", ty::Uint(UintTy::U32) => "m", ty::Uint(UintTy::U64) => "y", ty::Uint(UintTy::U128) => "o", ty::Uint(UintTy::Usize) => "j", ty::Float(FloatTy::F32) => "f", ty::Float(FloatTy::F64) => "d", ty::Never => "z", // Placeholders (should be demangled as `_`). ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) | ty::Error => "p", _ => "", }; if!basic_type.is_empty() { self.push(basic_type); return Ok(self); } if let Some(&i) = self.compress.as_ref().and_then(|c| c.types.get(&ty)) { return self.print_backref(i); } let start = self.out.len(); match ty.kind { // Basic types, handled above. ty::Bool | ty::Char | ty::Str | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Never => unreachable!(), ty::Tuple(_) if ty.is_unit() => unreachable!(), // Placeholders, also handled as part of basic types. ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) | ty::Error => unreachable!(), ty::Ref(r, ty, mutbl) => { self.push(match mutbl { hir::MutImmutable => "R", hir::MutMutable => "Q", }); if *r!= ty::ReErased { self = r.print(self)?; } self = ty.print(self)?; } ty::RawPtr(mt) => { self.push(match mt.mutbl { hir::MutImmutable => "P", hir::MutMutable => "O", }); self = mt.ty.print(self)?; } ty::Array(ty, len) => { self.push("A"); self = ty.print(self)?; self = self.print_const(len)?; } ty::Slice(ty) => { self.push("S"); self = ty.print(self)?; } ty::Tuple(tys) => { self.push("T"); for ty in tys.iter().map(|k| k.expect_ty()) { self = ty.print(self)?; } self.push("E"); } // Mangle all nominal types as paths. ty::Adt(&ty::AdtDef { did: def_id,.. }, substs) | ty::FnDef(def_id, substs) | ty::Opaque(def_id, substs) | ty::Projection(ty::ProjectionTy { item_def_id: def_id, substs }) | ty::UnnormalizedProjection(ty::ProjectionTy { item_def_id: def_id, substs }) | ty::Closure(def_id, substs) | ty::Generator(def_id, substs, _) => { self = self.print_def_path(def_id, substs)?;
self = self.print_def_path(def_id, &[])?; } ty::FnPtr(sig) => { self.push("F"); self = self.in_binder(&sig, |mut cx, sig| { if sig.unsafety == hir::Unsafety::Unsafe { cx.push("U"); } match sig.abi { Abi::Rust => {} Abi::C => cx.push("KC"), abi => { cx.push("K"); let name = abi.name(); if name.contains('-') { cx.push_ident(&name.replace('-', "_")); } else { cx.push_ident(name); } } } for &ty in sig.inputs() { cx = ty.print(cx)?; } if sig.c_variadic { cx.push("v"); } cx.push("E"); sig.output().print(cx) })?; } ty::Dynamic(predicates, r) => { self.push("D"); self = self.in_binder(&predicates, |cx, predicates| { cx.print_dyn_existential(predicates) })?; self = r.print(self)?; } ty::GeneratorWitness(_) => { bug!("symbol_names: unexpected `GeneratorWitness`") } } // Only cache types that do not refer to an enclosing // binder (which would change depending on context). if!ty.has_escaping_bound_vars() { if let Some(c) = &mut self.compress { c.types.insert(ty, start); } } Ok(self) } fn print_dyn_existential( mut self, predicates: &'tcx ty::List<ty::ExistentialPredicate<'tcx>>, ) -> Result<Self::DynExistential, Self::Error> { for predicate in predicates { match *predicate { ty::ExistentialPredicate::Trait(trait_ref) => { // Use a type that can't appear in defaults of type parameters. let dummy_self = self.tcx.mk_ty_infer(ty::FreshTy(0)); let trait_ref = trait_ref.with_self_ty(self.tcx, dummy_self); self = self.print_def_path(trait_ref.def_id, trait_ref.substs)?; } ty::ExistentialPredicate::Projection(projection) => { let name = self.tcx.associated_item(projection.item_def_id).ident; self.push("p"); self.push_ident(&name.as_str()); self = projection.ty.print(self)?; } ty::ExistentialPredicate::AutoTrait(def_id) => { self = self.print_def_path(def_id, &[])?; } } } self.push("E"); Ok(self) } fn print_const( mut self, ct: &'tcx ty::Const<'tcx>, ) -> Result<Self::Const, Self::Error> { if let Some(&i) = self.compress.as_ref().and_then(|c| c.consts.get(&ct)) { return self.print_backref(i); } let start = self.out.len(); match ct.ty.kind { ty::Uint(_) => {} _ => { bug!("symbol_names: unsupported constant of type `{}` ({:?})", ct.ty, ct); } } self = ct.ty.print(self)?; if let Some(bits) = ct.try_eval_bits(self.tcx, ty::ParamEnv::reveal_all(), ct.ty) { let _ =
} ty::Foreign(def_id) => {
random_line_split
v0.rs
<CompressionCaches<'tcx>>>, binders: Vec<BinderLevel>, out: String, } impl SymbolMangler<'tcx> { fn push(&mut self, s: &str) { self.out.push_str(s); } /// Push a `_`-terminated base 62 integer, using the format /// specified in the RFC as `<base-62-number>`, that is: /// * `x = 0` is encoded as just the `"_"` terminator /// * `x > 0` is encoded as `x - 1` in base 62, followed by `"_"`, /// e.g. `1` becomes `"0_"`, `62` becomes `"Z_"`, etc. fn push_integer_62(&mut self, x: u64) { if let Some(x) = x.checked_sub(1) { base_n::push_str(x as u128, 62, &mut self.out); } self.push("_"); } /// Push a `tag`-prefixed base 62 integer, when larger than `0`, that is: /// * `x = 0` is encoded as `""` (nothing) /// * `x > 0` is encoded as the `tag` followed by `push_integer_62(x - 1)` /// e.g. `1` becomes `tag + "_"`, `2` becomes `tag + "0_"`, etc. fn push_opt_integer_62(&mut self, tag: &str, x: u64) { if let Some(x) = x.checked_sub(1) { self.push(tag); self.push_integer_62(x); } } fn push_disambiguator(&mut self, dis: u64) { self.push_opt_integer_62("s", dis); } fn push_ident(&mut self, ident: &str) { let mut use_punycode = false; for b in ident.bytes() { match b { b'_' | b'a'..=b'z' | b'A'..=b'Z' | b'0'..=b'9' => {} 0x80..=0xff => use_punycode = true, _ => bug!("symbol_names: bad byte {} in ident {:?}", b, ident), } } let punycode_string; let ident = if use_punycode { self.push("u"); // FIXME(eddyb) we should probably roll our own punycode implementation. let mut punycode_bytes = match ::punycode::encode(ident) { Ok(s) => s.into_bytes(), Err(()) => bug!("symbol_names: punycode encoding failed for ident {:?}", ident), }; // Replace `-` with `_`. if let Some(c) = punycode_bytes.iter_mut().rfind(|&&mut c| c == b'-') { *c = b'_'; } // FIXME(eddyb) avoid rechecking UTF-8 validity. punycode_string = String::from_utf8(punycode_bytes).unwrap(); &punycode_string } else { ident }; let _ = write!(self.out, "{}", ident.len()); // Write a separating `_` if necessary (leading digit or `_`). match ident.chars().next() { Some('_') | Some('0'..='9') => { self.push("_"); } _ => {} } self.push(ident); } fn path_append_ns( mut self, print_prefix: impl FnOnce(Self) -> Result<Self,!>, ns: char, disambiguator: u64, name: &str, ) -> Result<Self,!> { self.push("N"); self.out.push(ns); self = print_prefix(self)?; self.push_disambiguator(disambiguator as u64); self.push_ident(name); Ok(self) } fn print_backref(mut self, i: usize) -> Result<Self,!> { self.push("B"); self.push_integer_62((i - self.compress.as_ref().unwrap().start_offset) as u64); Ok(self) } fn in_binder<T>( mut self, value: &ty::Binder<T>, print_value: impl FnOnce(Self, &T) -> Result<Self,!> ) -> Result<Self,!> where T: TypeFoldable<'tcx> { let regions = if value.has_late_bound_regions() { self.tcx.collect_referenced_late_bound_regions(value) } else { FxHashSet::default() }; let mut lifetime_depths = self.binders.last().map(|b| b.lifetime_depths.end).map_or(0..0, |i| i..i); let lifetimes = regions.into_iter().map(|br| { match br { ty::BrAnon(i) => { // FIXME(eddyb) for some reason, `anonymize_late_bound_regions` starts at `1`. assert_ne!(i, 0); i - 1 }, _ => bug!("symbol_names: non-anonymized region `{:?}` in `{:?}`", br, value), } }).max().map_or(0, |max| max + 1); self.push_opt_integer_62("G", lifetimes as u64); lifetime_depths.end += lifetimes; self.binders.push(BinderLevel { lifetime_depths }); self = print_value(self, value.skip_binder())?; self.binders.pop(); Ok(self) } } impl Printer<'tcx> for SymbolMangler<'tcx> { type Error =!; type Path = Self; type Region = Self; type Type = Self; type DynExistential = Self; type Const = Self; fn tcx(&self) -> TyCtxt<'tcx> { self.tcx } fn print_def_path( mut self, def_id: DefId, substs: &'tcx [GenericArg<'tcx>], ) -> Result<Self::Path, Self::Error> { if let Some(&i) = self.compress.as_ref().and_then(|c| c.paths.get(&(def_id, substs))) { return self.print_backref(i); } let start = self.out.len(); self = self.default_print_def_path(def_id, substs)?; // Only cache paths that do not refer to an enclosing // binder (which would change depending on context). if!substs.iter().any(|k| k.has_escaping_bound_vars()) { if let Some(c) = &mut self.compress { c.paths.insert((def_id, substs), start); } } Ok(self) } fn print_impl_path( self, impl_def_id: DefId, substs: &'tcx [GenericArg<'tcx>], mut self_ty: Ty<'tcx>, mut impl_trait_ref: Option<ty::TraitRef<'tcx>>, ) -> Result<Self::Path, Self::Error> { let key = self.tcx.def_key(impl_def_id); let parent_def_id = DefId { index: key.parent.unwrap(),..impl_def_id }; let mut param_env = self.tcx.param_env(impl_def_id) .with_reveal_all(); if!substs.is_empty() { param_env = param_env.subst(self.tcx, substs); } match &mut impl_trait_ref { Some(impl_trait_ref) => { assert_eq!(impl_trait_ref.self_ty(), self_ty); *impl_trait_ref = self.tcx.normalize_erasing_regions(param_env, *impl_trait_ref); self_ty = impl_trait_ref.self_ty(); } None => { self_ty = self.tcx.normalize_erasing_regions(param_env, self_ty); } } self.path_append_impl( |cx| cx.print_def_path(parent_def_id, &[]), &key.disambiguated_data, self_ty, impl_trait_ref, ) } fn print_region( mut self, region: ty::Region<'_>, ) -> Result<Self::Region, Self::Error> { let i = match *region { // Erased lifetimes use the index 0, for a // shorter mangling of `L_`. ty::ReErased => 0, // Late-bound lifetimes use indices starting at 1, // see `BinderLevel` for more details. ty::ReLateBound(debruijn, ty::BrAnon(i)) => { // FIXME(eddyb) for some reason, `anonymize_late_bound_regions` starts at `1`. assert_ne!(i, 0); let i = i - 1; let binder = &self.binders[self.binders.len() - 1 - debruijn.index()]; let depth = binder.lifetime_depths.start + i; 1 + (self.binders.last().unwrap().lifetime_depths.end - 1 - depth) } _ => bug!("symbol_names: non-erased region `{:?}`", region), }; self.push("L"); self.push_integer_62(i as u64); Ok(self) } fn print_type( mut self, ty: Ty<'tcx>, ) -> Result<Self::Type, Self::Error> { // Basic types, never cached (single-character). let basic_type = match ty.kind { ty::Bool => "b", ty::Char => "c", ty::Str => "e", ty::Tuple(_) if ty.is_unit() => "u", ty::Int(IntTy::I8) => "a", ty::Int(IntTy::I16) => "s", ty::Int(IntTy::I32) => "l", ty::Int(IntTy::I64) => "x", ty::Int(IntTy::I128) => "n", ty::Int(IntTy::Isize) => "i", ty::Uint(UintTy::U8) => "h", ty::Uint(UintTy::U16) => "t", ty::Uint(UintTy::U32) => "m", ty::Uint(UintTy::U64) => "y", ty::Uint(UintTy::U128) => "o", ty::Uint(UintTy::Usize) => "j", ty::Float(FloatTy::F32) => "f", ty::Float(FloatTy::F64) => "d", ty::Never => "z", // Placeholders (should be demangled as `_`). ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) | ty::Error => "p", _ => "", }; if!basic_type.is_empty() { self.push(basic_type); return Ok(self); } if let Some(&i) = self.compress.as_ref().and_then(|c| c.types.get(&ty)) { return self.print_backref(i); } let start = self.out.len(); match ty.kind { // Basic types, handled above. ty::Bool | ty::Char | ty::Str | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Never => unreachable!(), ty::Tuple(_) if ty.is_unit() => unreachable!(), // Placeholders, also handled as part of basic types. ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) | ty::Error => unreachable!(), ty::Ref(r, ty, mutbl) => { self.push(match mutbl { hir::MutImmutable => "R", hir::MutMutable => "Q", }); if *r!= ty::ReErased { self = r.print(self)?; } self = ty.print(self)?; } ty::RawPtr(mt) => { self.push(match mt.mutbl { hir::MutImmutable => "P", hir::MutMutable => "O", }); self = mt.ty.print(self)?; } ty::Array(ty, len) => { self.push("A"); self = ty.print(self)?; self = self.print_const(len)?; } ty::Slice(ty) => { self.push("S"); self = ty.print(self)?; } ty::Tuple(tys) => { self.push("T"); for ty in tys.iter().map(|k| k.expect_ty()) { self = ty.print(self)?; } self.push("E"); } // Mangle all nominal types as paths. ty::Adt(&ty::AdtDef { did: def_id,.. }, substs) | ty::FnDef(def_id, substs) | ty::Opaque(def_id, substs) | ty::Projection(ty::ProjectionTy { item_def_id: def_id, substs }) | ty::UnnormalizedProjection(ty::ProjectionTy { item_def_id: def_id, substs }) | ty::Closure(def_id, substs) | ty::Generator(def_id, substs, _) => { self = self.print_def_path(def_id, substs)?; } ty::Foreign(def_id) => { self = self.print_def_path(def_id, &[])?; } ty::FnPtr(sig) => { self.push("F"); self = self.in_binder(&sig, |mut cx, sig| { if sig.unsafety == hir::Unsafety::Unsafe { cx.push("U"); } match sig.abi { Abi::Rust => {} Abi::C => cx.push("KC"), abi => { cx.push("K"); let name = abi.name(); if name.contains('-') { cx.push_ident(&name.replace('-', "_")); } else { cx.push_ident(name); } } } for &ty in sig.inputs() { cx = ty.print(cx)?; } if sig.c_variadic { cx.push("v"); } cx.push("E"); sig.output().print(cx) })?; } ty::Dynamic(predicates, r) => { self.push("D"); self = self.in_binder(&predicates, |cx, predicates| { cx.print_dyn_existential(predicates) })?; self = r.print(self)?; } ty::GeneratorWitness(_) => { bug!("symbol_names: unexpected `GeneratorWitness`") } } // Only cache types that do not refer to an enclosing // binder (which would change depending on context). if!ty.has_escaping_bound_vars() { if let Some(c) = &mut self.compress { c.types.insert(ty, start); } } Ok(self) } fn
( mut self, predicates: &'tcx ty::List<ty::ExistentialPredicate<'tcx>>, ) -> Result<Self::DynExistential, Self::Error> { for predicate in predicates { match *predicate { ty::ExistentialPredicate::Trait(trait_ref) => { // Use a type that can't appear in defaults of type parameters. let dummy_self = self.tcx.mk_ty_infer(ty::FreshTy(0)); let trait_ref = trait_ref.with_self_ty(self.tcx, dummy_self); self = self.print_def_path(trait_ref.def_id, trait_ref.substs)?; } ty::ExistentialPredicate::Projection(projection) => { let name = self.tcx.associated_item(projection.item_def_id).ident; self.push("p"); self.push_ident(&name.as_str()); self = projection.ty.print(self)?; } ty::ExistentialPredicate::AutoTrait(def_id) => { self = self.print_def_path(def_id, &[])?; } } } self.push("E"); Ok(self) } fn print_const( mut self, ct: &'tcx ty::Const<'tcx>, ) -> Result<Self::Const, Self::Error> { if let Some(&i) = self.compress.as_ref().and_then(|c| c.consts.get(&ct)) { return self.print_backref(i); } let start = self.out.len(); match ct.ty.kind { ty::Uint(_) => {} _ => { bug!("symbol_names: unsupported constant of type `{}` ({:?})", ct.ty, ct); } } self = ct.ty.print(self)?; if let Some(bits) = ct.try_eval_bits(self.tcx, ty::ParamEnv::reveal_all(), ct.ty) { let
print_dyn_existential
identifier_name
v0.rs
<CompressionCaches<'tcx>>>, binders: Vec<BinderLevel>, out: String, } impl SymbolMangler<'tcx> { fn push(&mut self, s: &str) { self.out.push_str(s); } /// Push a `_`-terminated base 62 integer, using the format /// specified in the RFC as `<base-62-number>`, that is: /// * `x = 0` is encoded as just the `"_"` terminator /// * `x > 0` is encoded as `x - 1` in base 62, followed by `"_"`, /// e.g. `1` becomes `"0_"`, `62` becomes `"Z_"`, etc. fn push_integer_62(&mut self, x: u64) { if let Some(x) = x.checked_sub(1) { base_n::push_str(x as u128, 62, &mut self.out); } self.push("_"); } /// Push a `tag`-prefixed base 62 integer, when larger than `0`, that is: /// * `x = 0` is encoded as `""` (nothing) /// * `x > 0` is encoded as the `tag` followed by `push_integer_62(x - 1)` /// e.g. `1` becomes `tag + "_"`, `2` becomes `tag + "0_"`, etc. fn push_opt_integer_62(&mut self, tag: &str, x: u64) { if let Some(x) = x.checked_sub(1) { self.push(tag); self.push_integer_62(x); } } fn push_disambiguator(&mut self, dis: u64) { self.push_opt_integer_62("s", dis); } fn push_ident(&mut self, ident: &str) { let mut use_punycode = false; for b in ident.bytes() { match b { b'_' | b'a'..=b'z' | b'A'..=b'Z' | b'0'..=b'9' => {} 0x80..=0xff => use_punycode = true, _ => bug!("symbol_names: bad byte {} in ident {:?}", b, ident), } } let punycode_string; let ident = if use_punycode { self.push("u"); // FIXME(eddyb) we should probably roll our own punycode implementation. let mut punycode_bytes = match ::punycode::encode(ident) { Ok(s) => s.into_bytes(), Err(()) => bug!("symbol_names: punycode encoding failed for ident {:?}", ident), }; // Replace `-` with `_`. if let Some(c) = punycode_bytes.iter_mut().rfind(|&&mut c| c == b'-')
// FIXME(eddyb) avoid rechecking UTF-8 validity. punycode_string = String::from_utf8(punycode_bytes).unwrap(); &punycode_string } else { ident }; let _ = write!(self.out, "{}", ident.len()); // Write a separating `_` if necessary (leading digit or `_`). match ident.chars().next() { Some('_') | Some('0'..='9') => { self.push("_"); } _ => {} } self.push(ident); } fn path_append_ns( mut self, print_prefix: impl FnOnce(Self) -> Result<Self,!>, ns: char, disambiguator: u64, name: &str, ) -> Result<Self,!> { self.push("N"); self.out.push(ns); self = print_prefix(self)?; self.push_disambiguator(disambiguator as u64); self.push_ident(name); Ok(self) } fn print_backref(mut self, i: usize) -> Result<Self,!> { self.push("B"); self.push_integer_62((i - self.compress.as_ref().unwrap().start_offset) as u64); Ok(self) } fn in_binder<T>( mut self, value: &ty::Binder<T>, print_value: impl FnOnce(Self, &T) -> Result<Self,!> ) -> Result<Self,!> where T: TypeFoldable<'tcx> { let regions = if value.has_late_bound_regions() { self.tcx.collect_referenced_late_bound_regions(value) } else { FxHashSet::default() }; let mut lifetime_depths = self.binders.last().map(|b| b.lifetime_depths.end).map_or(0..0, |i| i..i); let lifetimes = regions.into_iter().map(|br| { match br { ty::BrAnon(i) => { // FIXME(eddyb) for some reason, `anonymize_late_bound_regions` starts at `1`. assert_ne!(i, 0); i - 1 }, _ => bug!("symbol_names: non-anonymized region `{:?}` in `{:?}`", br, value), } }).max().map_or(0, |max| max + 1); self.push_opt_integer_62("G", lifetimes as u64); lifetime_depths.end += lifetimes; self.binders.push(BinderLevel { lifetime_depths }); self = print_value(self, value.skip_binder())?; self.binders.pop(); Ok(self) } } impl Printer<'tcx> for SymbolMangler<'tcx> { type Error =!; type Path = Self; type Region = Self; type Type = Self; type DynExistential = Self; type Const = Self; fn tcx(&self) -> TyCtxt<'tcx> { self.tcx } fn print_def_path( mut self, def_id: DefId, substs: &'tcx [GenericArg<'tcx>], ) -> Result<Self::Path, Self::Error> { if let Some(&i) = self.compress.as_ref().and_then(|c| c.paths.get(&(def_id, substs))) { return self.print_backref(i); } let start = self.out.len(); self = self.default_print_def_path(def_id, substs)?; // Only cache paths that do not refer to an enclosing // binder (which would change depending on context). if!substs.iter().any(|k| k.has_escaping_bound_vars()) { if let Some(c) = &mut self.compress { c.paths.insert((def_id, substs), start); } } Ok(self) } fn print_impl_path( self, impl_def_id: DefId, substs: &'tcx [GenericArg<'tcx>], mut self_ty: Ty<'tcx>, mut impl_trait_ref: Option<ty::TraitRef<'tcx>>, ) -> Result<Self::Path, Self::Error> { let key = self.tcx.def_key(impl_def_id); let parent_def_id = DefId { index: key.parent.unwrap(),..impl_def_id }; let mut param_env = self.tcx.param_env(impl_def_id) .with_reveal_all(); if!substs.is_empty() { param_env = param_env.subst(self.tcx, substs); } match &mut impl_trait_ref { Some(impl_trait_ref) => { assert_eq!(impl_trait_ref.self_ty(), self_ty); *impl_trait_ref = self.tcx.normalize_erasing_regions(param_env, *impl_trait_ref); self_ty = impl_trait_ref.self_ty(); } None => { self_ty = self.tcx.normalize_erasing_regions(param_env, self_ty); } } self.path_append_impl( |cx| cx.print_def_path(parent_def_id, &[]), &key.disambiguated_data, self_ty, impl_trait_ref, ) } fn print_region( mut self, region: ty::Region<'_>, ) -> Result<Self::Region, Self::Error> { let i = match *region { // Erased lifetimes use the index 0, for a // shorter mangling of `L_`. ty::ReErased => 0, // Late-bound lifetimes use indices starting at 1, // see `BinderLevel` for more details. ty::ReLateBound(debruijn, ty::BrAnon(i)) => { // FIXME(eddyb) for some reason, `anonymize_late_bound_regions` starts at `1`. assert_ne!(i, 0); let i = i - 1; let binder = &self.binders[self.binders.len() - 1 - debruijn.index()]; let depth = binder.lifetime_depths.start + i; 1 + (self.binders.last().unwrap().lifetime_depths.end - 1 - depth) } _ => bug!("symbol_names: non-erased region `{:?}`", region), }; self.push("L"); self.push_integer_62(i as u64); Ok(self) } fn print_type( mut self, ty: Ty<'tcx>, ) -> Result<Self::Type, Self::Error> { // Basic types, never cached (single-character). let basic_type = match ty.kind { ty::Bool => "b", ty::Char => "c", ty::Str => "e", ty::Tuple(_) if ty.is_unit() => "u", ty::Int(IntTy::I8) => "a", ty::Int(IntTy::I16) => "s", ty::Int(IntTy::I32) => "l", ty::Int(IntTy::I64) => "x", ty::Int(IntTy::I128) => "n", ty::Int(IntTy::Isize) => "i", ty::Uint(UintTy::U8) => "h", ty::Uint(UintTy::U16) => "t", ty::Uint(UintTy::U32) => "m", ty::Uint(UintTy::U64) => "y", ty::Uint(UintTy::U128) => "o", ty::Uint(UintTy::Usize) => "j", ty::Float(FloatTy::F32) => "f", ty::Float(FloatTy::F64) => "d", ty::Never => "z", // Placeholders (should be demangled as `_`). ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) | ty::Error => "p", _ => "", }; if!basic_type.is_empty() { self.push(basic_type); return Ok(self); } if let Some(&i) = self.compress.as_ref().and_then(|c| c.types.get(&ty)) { return self.print_backref(i); } let start = self.out.len(); match ty.kind { // Basic types, handled above. ty::Bool | ty::Char | ty::Str | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Never => unreachable!(), ty::Tuple(_) if ty.is_unit() => unreachable!(), // Placeholders, also handled as part of basic types. ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) | ty::Error => unreachable!(), ty::Ref(r, ty, mutbl) => { self.push(match mutbl { hir::MutImmutable => "R", hir::MutMutable => "Q", }); if *r!= ty::ReErased { self = r.print(self)?; } self = ty.print(self)?; } ty::RawPtr(mt) => { self.push(match mt.mutbl { hir::MutImmutable => "P", hir::MutMutable => "O", }); self = mt.ty.print(self)?; } ty::Array(ty, len) => { self.push("A"); self = ty.print(self)?; self = self.print_const(len)?; } ty::Slice(ty) => { self.push("S"); self = ty.print(self)?; } ty::Tuple(tys) => { self.push("T"); for ty in tys.iter().map(|k| k.expect_ty()) { self = ty.print(self)?; } self.push("E"); } // Mangle all nominal types as paths. ty::Adt(&ty::AdtDef { did: def_id,.. }, substs) | ty::FnDef(def_id, substs) | ty::Opaque(def_id, substs) | ty::Projection(ty::ProjectionTy { item_def_id: def_id, substs }) | ty::UnnormalizedProjection(ty::ProjectionTy { item_def_id: def_id, substs }) | ty::Closure(def_id, substs) | ty::Generator(def_id, substs, _) => { self = self.print_def_path(def_id, substs)?; } ty::Foreign(def_id) => { self = self.print_def_path(def_id, &[])?; } ty::FnPtr(sig) => { self.push("F"); self = self.in_binder(&sig, |mut cx, sig| { if sig.unsafety == hir::Unsafety::Unsafe { cx.push("U"); } match sig.abi { Abi::Rust => {} Abi::C => cx.push("KC"), abi => { cx.push("K"); let name = abi.name(); if name.contains('-') { cx.push_ident(&name.replace('-', "_")); } else { cx.push_ident(name); } } } for &ty in sig.inputs() { cx = ty.print(cx)?; } if sig.c_variadic { cx.push("v"); } cx.push("E"); sig.output().print(cx) })?; } ty::Dynamic(predicates, r) => { self.push("D"); self = self.in_binder(&predicates, |cx, predicates| { cx.print_dyn_existential(predicates) })?; self = r.print(self)?; } ty::GeneratorWitness(_) => { bug!("symbol_names: unexpected `GeneratorWitness`") } } // Only cache types that do not refer to an enclosing // binder (which would change depending on context). if!ty.has_escaping_bound_vars() { if let Some(c) = &mut self.compress { c.types.insert(ty, start); } } Ok(self) } fn print_dyn_existential( mut self, predicates: &'tcx ty::List<ty::ExistentialPredicate<'tcx>>, ) -> Result<Self::DynExistential, Self::Error> { for predicate in predicates { match *predicate { ty::ExistentialPredicate::Trait(trait_ref) => { // Use a type that can't appear in defaults of type parameters. let dummy_self = self.tcx.mk_ty_infer(ty::FreshTy(0)); let trait_ref = trait_ref.with_self_ty(self.tcx, dummy_self); self = self.print_def_path(trait_ref.def_id, trait_ref.substs)?; } ty::ExistentialPredicate::Projection(projection) => { let name = self.tcx.associated_item(projection.item_def_id).ident; self.push("p"); self.push_ident(&name.as_str()); self = projection.ty.print(self)?; } ty::ExistentialPredicate::AutoTrait(def_id) => { self = self.print_def_path(def_id, &[])?; } } } self.push("E"); Ok(self) } fn print_const( mut self, ct: &'tcx ty::Const<'tcx>, ) -> Result<Self::Const, Self::Error> { if let Some(&i) = self.compress.as_ref().and_then(|c| c.consts.get(&ct)) { return self.print_backref(i); } let start = self.out.len(); match ct.ty.kind { ty::Uint(_) => {} _ => { bug!("symbol_names: unsupported constant of type `{}` ({:?})", ct.ty, ct); } } self = ct.ty.print(self)?; if let Some(bits) = ct.try_eval_bits(self.tcx, ty::ParamEnv::reveal_all(), ct.ty) { let
{ *c = b'_'; }
conditional_block
executor.rs
//! The executor runs on all nodes and is resonsible for reconciling the requested state (from the //! cluster's master), and the locally running containers. //! //! When a new state is received, it queries Docker Engine to see if: //! 1) any scheduled containers are not currently running //! 2) and running containers aren't still scheduled //! //! This actor is not responsible for modifying the cluster state. For that, see Scheduler. use crate::scheduler::*; use actix::fut::{ActorFuture, WrapFuture}; use actix::prelude::*; use actix::registry::SystemService; use actix_web::client; use failure::{err_msg, Error}; use futures::future::{err, join_all, ok, Future}; use futures::stream::Stream; use shiplift::builder::*; use shiplift::Docker; use std::collections::HashMap; use std::fmt::Debug; use std::net::{SocketAddr, ToSocketAddrs}; use sysinfo::{ProcessorExt, SystemExt}; // Labels to apply to containers const LABEL_NODE_ID: &str = "com.aluminous.cacophony.node-id"; const LABEL_ALLOCATION_ID: &str = "com.aluminous.cacophony.allocation-id"; /// Details about the current resource usage of a node #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] pub struct NodeResources { pub total_memory: u64, pub used_memory: u64, pub cpu_usage: Vec<f32>, } /// Updates the local state (running containers) to match the cluster state. #[derive(Default)] pub struct
{ state: ClusterState, node_id: NodeId, system: sysinfo::System, } impl Executor { fn join<S: ToSocketAddrs>( &mut self, join_host: S, local_port: u16, ) -> impl Future<Item = (), Error = Error> { let join_host = join_host.to_socket_addrs().unwrap().next().unwrap(); info!("Attempting to join cluster at {}", join_host); client::post(format!( "http://{}/cluster/node/{}", join_host, self.node_id )) .json(local_port) .unwrap() .send() .from_err() .and_then(|res| { if res.status().is_success() { info!("Announced our presence to cluster."); Ok(()) } else { Err(format_err!("Failed to join cluster: {:?}", res)) } }) } /// Update currently running containers to match allocated services. fn update_state( &mut self, state: ClusterState, ) -> impl ActorFuture<Item = (), Error = (), Actor = Self> { self.state = state; let docker = Docker::new(); docker .containers() .list( // Get all the containers created by this node &ContainerListOptions::builder() .filter(vec![ContainerFilter::Label( String::from(LABEL_NODE_ID), self.node_id.clone(), )]) .all() .build(), ) .map_err(|_| ()) .into_actor(self) .and_then(move |containers, executor, _ctx| { let desired_allocs: HashMap<&str, &Allocation> = executor .state .allocations .values() .filter(|a| a.node_id == executor.node_id) .map(|a| (&*a.allocation_id, a)) .collect(); let current_containers: HashMap<&str, &str> = containers .iter() .map(|c| (&*c.labels[LABEL_ALLOCATION_ID], &*c.id)) .collect(); // Remove containers which exist but aren't part of an allocataion let remove_fut: Vec<_> = containers .iter() .filter(|container| { !desired_allocs.contains_key(&*container.labels[LABEL_ALLOCATION_ID]) }) .map(|container| { docker .containers() .get(&container.id) .remove(RmContainerOptions::builder().force(true).build()) .from_err() }) .collect(); // Create containers which are allocated but aren't known to Docker let create_fut: Vec<_> = desired_allocs .iter() .filter(|(id, _)|!current_containers.contains_key(*id)) .map(|(_, alloc)| executor.create_container(alloc)) .collect(); info!( "Updating running containers: {} -> {} (create {}, kill {})", containers.len(), desired_allocs.len(), create_fut.len(), remove_fut.len() ); join_all(create_fut) .join(join_all(remove_fut)) .then(|res| check_err("Execute containers", res)) .into_actor(executor) }) } /// Create and start a container for an allocation. Pulls the image if needed. fn create_container(&self, alloc: &Allocation) -> Box<Future<Item = (), Error = Error>> { let docker = Docker::new(); let job_services = &self.state.jobs.get(&alloc.job_id); let service = match job_services.and_then(|j| j.services.get(&alloc.service_name)) { Some(service) => service, None => { return Box::new(err(format_err!( "Service '{}' '{}' allocated but not defined", alloc.job_id, alloc.service_name ))); } }; let image = if service.image.contains(':') { service.image.clone() } else { format!("{}:latest", service.image) }; let mut labels = HashMap::new(); labels.insert(LABEL_ALLOCATION_ID, &*alloc.allocation_id); labels.insert(LABEL_NODE_ID, &self.node_id); let pull_opts = PullOptions::builder().image(&*image).build(); let create_opts = service .build_container_options() .unwrap() .labels(&labels) .restart_policy("on-failure", 4) .build(); Box::new( docker .images() .get(&image) .inspect() .map(move |_| info!("Image already pulled: {:?}", image)) .or_else(move |_| { docker.images().pull(&pull_opts).for_each(|p| { debug!("Pull: {:?}", p); Ok(()) }) }) .and_then(move |_| Docker::new().containers().create(&create_opts)) .and_then(|res| Docker::new().containers().get(&*res.id).start()) .from_err(), ) } } impl Actor for Executor { type Context = Context<Self>; } impl Supervised for Executor {} impl SystemService for Executor {} /// Fire-and-forget command messages for Executor #[derive(Clone, Debug)] pub enum ExecutorCommand { UpdateState(ClusterState), JoinCluster { local_port: u16, join_addr: SocketAddr, }, SetNodeId(NodeId), } impl Message for ExecutorCommand { type Result = Result<(), Error>; } impl Handler<ExecutorCommand> for Executor { type Result = ResponseFuture<(), Error>; fn handle(&mut self, cmd: ExecutorCommand, ctx: &mut Context<Self>) -> Self::Result { debug!("Executor handling command: {:?}", cmd); match cmd { ExecutorCommand::UpdateState(state) => { ctx.spawn(self.update_state(state)); Box::new(ok(())) } ExecutorCommand::JoinCluster { local_port, join_addr, } => Box::new(self.join(join_addr, local_port)), ExecutorCommand::SetNodeId(node_id) => { self.node_id = node_id; Box::new(ok(())) } } } } /// Get the address of the master node (if this node is not master) pub struct GetRemoteMaster; impl Message for GetRemoteMaster { type Result = Result<Option<SocketAddr>, Error>; } impl Handler<GetRemoteMaster> for Executor { type Result = Result<Option<SocketAddr>, Error>; fn handle(&mut self, _: GetRemoteMaster, _: &mut Context<Self>) -> Self::Result { match self.state.master_node() { Some(master) => { if master.node_id == self.node_id { Ok(None) } else { Ok(Some(master.cluster_address)) } } None => Err(err_msg("Master unknown.")), } } } /// Message requesting resource usage of the local node pub struct GetNodeResources; impl Message for GetNodeResources { type Result = Result<NodeResources, Error>; } impl Handler<GetNodeResources> for Executor { type Result = Result<NodeResources, Error>; fn handle(&mut self, _: GetNodeResources, _: &mut Context<Self>) -> Self::Result { self.system.refresh_system(); Ok(NodeResources { total_memory: self.system.get_total_memory(), used_memory: self.system.get_used_memory(), cpu_usage: self.system.get_processor_list()[1..] .iter() .map(|p| p.get_cpu_usage()) .collect(), }) } } /// Logs the result of async fire-and-forget futures. pub fn check_err<T, U>(msg: &str, res: Result<T, U>) -> impl Future<Item = (), Error = ()> where T: Debug, U: Debug, { match res { Ok(ok_res) => debug!("{}: {:?}", msg, ok_res), Err(err_res) => error!("{}: {:?}", msg, err_res), }; ok(()) } #[cfg(test)] mod test { use crate::executor::*; use crate::test_support::*; #[test] fn test_node_resources() { with_node("127.0.0.1:9001", || { Executor::from_registry() .send(GetNodeResources) .and_then(|res| { let resources = res.expect("Get resources failed"); assert!(resources.total_memory - resources.used_memory > 0); assert!(!resources.cpu_usage.is_empty()); Ok(()) }) }); } }
Executor
identifier_name
executor.rs
//! The executor runs on all nodes and is resonsible for reconciling the requested state (from the //! cluster's master), and the locally running containers. //! //! When a new state is received, it queries Docker Engine to see if: //! 1) any scheduled containers are not currently running //! 2) and running containers aren't still scheduled //! //! This actor is not responsible for modifying the cluster state. For that, see Scheduler. use crate::scheduler::*; use actix::fut::{ActorFuture, WrapFuture}; use actix::prelude::*; use actix::registry::SystemService; use actix_web::client; use failure::{err_msg, Error}; use futures::future::{err, join_all, ok, Future}; use futures::stream::Stream; use shiplift::builder::*; use shiplift::Docker; use std::collections::HashMap; use std::fmt::Debug; use std::net::{SocketAddr, ToSocketAddrs}; use sysinfo::{ProcessorExt, SystemExt}; // Labels to apply to containers const LABEL_NODE_ID: &str = "com.aluminous.cacophony.node-id"; const LABEL_ALLOCATION_ID: &str = "com.aluminous.cacophony.allocation-id"; /// Details about the current resource usage of a node #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] pub struct NodeResources { pub total_memory: u64, pub used_memory: u64, pub cpu_usage: Vec<f32>, } /// Updates the local state (running containers) to match the cluster state. #[derive(Default)] pub struct Executor { state: ClusterState, node_id: NodeId, system: sysinfo::System, } impl Executor { fn join<S: ToSocketAddrs>( &mut self, join_host: S, local_port: u16, ) -> impl Future<Item = (), Error = Error> { let join_host = join_host.to_socket_addrs().unwrap().next().unwrap(); info!("Attempting to join cluster at {}", join_host); client::post(format!( "http://{}/cluster/node/{}", join_host, self.node_id )) .json(local_port) .unwrap() .send() .from_err() .and_then(|res| { if res.status().is_success() { info!("Announced our presence to cluster."); Ok(()) } else { Err(format_err!("Failed to join cluster: {:?}", res)) } }) } /// Update currently running containers to match allocated services. fn update_state( &mut self, state: ClusterState, ) -> impl ActorFuture<Item = (), Error = (), Actor = Self> { self.state = state; let docker = Docker::new(); docker .containers() .list( // Get all the containers created by this node &ContainerListOptions::builder() .filter(vec![ContainerFilter::Label( String::from(LABEL_NODE_ID), self.node_id.clone(), )]) .all() .build(), ) .map_err(|_| ()) .into_actor(self) .and_then(move |containers, executor, _ctx| { let desired_allocs: HashMap<&str, &Allocation> = executor .state .allocations .values() .filter(|a| a.node_id == executor.node_id) .map(|a| (&*a.allocation_id, a)) .collect(); let current_containers: HashMap<&str, &str> = containers .iter() .map(|c| (&*c.labels[LABEL_ALLOCATION_ID], &*c.id)) .collect(); // Remove containers which exist but aren't part of an allocataion let remove_fut: Vec<_> = containers .iter() .filter(|container| { !desired_allocs.contains_key(&*container.labels[LABEL_ALLOCATION_ID]) }) .map(|container| { docker .containers() .get(&container.id) .remove(RmContainerOptions::builder().force(true).build()) .from_err() }) .collect(); // Create containers which are allocated but aren't known to Docker let create_fut: Vec<_> = desired_allocs .iter() .filter(|(id, _)|!current_containers.contains_key(*id)) .map(|(_, alloc)| executor.create_container(alloc)) .collect(); info!( "Updating running containers: {} -> {} (create {}, kill {})", containers.len(), desired_allocs.len(), create_fut.len(), remove_fut.len() ); join_all(create_fut) .join(join_all(remove_fut)) .then(|res| check_err("Execute containers", res)) .into_actor(executor) }) } /// Create and start a container for an allocation. Pulls the image if needed. fn create_container(&self, alloc: &Allocation) -> Box<Future<Item = (), Error = Error>> { let docker = Docker::new(); let job_services = &self.state.jobs.get(&alloc.job_id); let service = match job_services.and_then(|j| j.services.get(&alloc.service_name)) { Some(service) => service, None => { return Box::new(err(format_err!( "Service '{}' '{}' allocated but not defined", alloc.job_id, alloc.service_name ))); } }; let image = if service.image.contains(':') { service.image.clone() } else { format!("{}:latest", service.image) }; let mut labels = HashMap::new(); labels.insert(LABEL_ALLOCATION_ID, &*alloc.allocation_id); labels.insert(LABEL_NODE_ID, &self.node_id); let pull_opts = PullOptions::builder().image(&*image).build(); let create_opts = service .build_container_options() .unwrap() .labels(&labels) .restart_policy("on-failure", 4) .build(); Box::new( docker .images() .get(&image) .inspect() .map(move |_| info!("Image already pulled: {:?}", image)) .or_else(move |_| { docker.images().pull(&pull_opts).for_each(|p| { debug!("Pull: {:?}", p); Ok(()) }) }) .and_then(move |_| Docker::new().containers().create(&create_opts)) .and_then(|res| Docker::new().containers().get(&*res.id).start()) .from_err(), ) } } impl Actor for Executor { type Context = Context<Self>; } impl Supervised for Executor {} impl SystemService for Executor {} /// Fire-and-forget command messages for Executor #[derive(Clone, Debug)] pub enum ExecutorCommand { UpdateState(ClusterState), JoinCluster { local_port: u16, join_addr: SocketAddr, }, SetNodeId(NodeId), } impl Message for ExecutorCommand { type Result = Result<(), Error>; } impl Handler<ExecutorCommand> for Executor { type Result = ResponseFuture<(), Error>; fn handle(&mut self, cmd: ExecutorCommand, ctx: &mut Context<Self>) -> Self::Result { debug!("Executor handling command: {:?}", cmd); match cmd { ExecutorCommand::UpdateState(state) => { ctx.spawn(self.update_state(state)); Box::new(ok(())) } ExecutorCommand::JoinCluster { local_port, join_addr, } => Box::new(self.join(join_addr, local_port)), ExecutorCommand::SetNodeId(node_id) => { self.node_id = node_id; Box::new(ok(())) } } } } /// Get the address of the master node (if this node is not master) pub struct GetRemoteMaster; impl Message for GetRemoteMaster { type Result = Result<Option<SocketAddr>, Error>; } impl Handler<GetRemoteMaster> for Executor { type Result = Result<Option<SocketAddr>, Error>; fn handle(&mut self, _: GetRemoteMaster, _: &mut Context<Self>) -> Self::Result { match self.state.master_node() { Some(master) => { if master.node_id == self.node_id { Ok(None) } else
} None => Err(err_msg("Master unknown.")), } } } /// Message requesting resource usage of the local node pub struct GetNodeResources; impl Message for GetNodeResources { type Result = Result<NodeResources, Error>; } impl Handler<GetNodeResources> for Executor { type Result = Result<NodeResources, Error>; fn handle(&mut self, _: GetNodeResources, _: &mut Context<Self>) -> Self::Result { self.system.refresh_system(); Ok(NodeResources { total_memory: self.system.get_total_memory(), used_memory: self.system.get_used_memory(), cpu_usage: self.system.get_processor_list()[1..] .iter() .map(|p| p.get_cpu_usage()) .collect(), }) } } /// Logs the result of async fire-and-forget futures. pub fn check_err<T, U>(msg: &str, res: Result<T, U>) -> impl Future<Item = (), Error = ()> where T: Debug, U: Debug, { match res { Ok(ok_res) => debug!("{}: {:?}", msg, ok_res), Err(err_res) => error!("{}: {:?}", msg, err_res), }; ok(()) } #[cfg(test)] mod test { use crate::executor::*; use crate::test_support::*; #[test] fn test_node_resources() { with_node("127.0.0.1:9001", || { Executor::from_registry() .send(GetNodeResources) .and_then(|res| { let resources = res.expect("Get resources failed"); assert!(resources.total_memory - resources.used_memory > 0); assert!(!resources.cpu_usage.is_empty()); Ok(()) }) }); } }
{ Ok(Some(master.cluster_address)) }
conditional_block
executor.rs
//! The executor runs on all nodes and is resonsible for reconciling the requested state (from the //! cluster's master), and the locally running containers. //! //! When a new state is received, it queries Docker Engine to see if: //! 1) any scheduled containers are not currently running //! 2) and running containers aren't still scheduled //! //! This actor is not responsible for modifying the cluster state. For that, see Scheduler. use crate::scheduler::*; use actix::fut::{ActorFuture, WrapFuture}; use actix::prelude::*; use actix::registry::SystemService; use actix_web::client; use failure::{err_msg, Error}; use futures::future::{err, join_all, ok, Future}; use futures::stream::Stream; use shiplift::builder::*; use shiplift::Docker; use std::collections::HashMap; use std::fmt::Debug; use std::net::{SocketAddr, ToSocketAddrs}; use sysinfo::{ProcessorExt, SystemExt}; // Labels to apply to containers const LABEL_NODE_ID: &str = "com.aluminous.cacophony.node-id"; const LABEL_ALLOCATION_ID: &str = "com.aluminous.cacophony.allocation-id"; /// Details about the current resource usage of a node #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] pub struct NodeResources { pub total_memory: u64, pub used_memory: u64, pub cpu_usage: Vec<f32>, } /// Updates the local state (running containers) to match the cluster state. #[derive(Default)] pub struct Executor { state: ClusterState, node_id: NodeId, system: sysinfo::System, } impl Executor { fn join<S: ToSocketAddrs>( &mut self, join_host: S, local_port: u16, ) -> impl Future<Item = (), Error = Error> { let join_host = join_host.to_socket_addrs().unwrap().next().unwrap(); info!("Attempting to join cluster at {}", join_host); client::post(format!( "http://{}/cluster/node/{}", join_host, self.node_id )) .json(local_port) .unwrap() .send() .from_err() .and_then(|res| { if res.status().is_success() { info!("Announced our presence to cluster."); Ok(()) } else { Err(format_err!("Failed to join cluster: {:?}", res)) } }) } /// Update currently running containers to match allocated services. fn update_state( &mut self, state: ClusterState, ) -> impl ActorFuture<Item = (), Error = (), Actor = Self> { self.state = state; let docker = Docker::new(); docker .containers() .list( // Get all the containers created by this node &ContainerListOptions::builder() .filter(vec![ContainerFilter::Label( String::from(LABEL_NODE_ID), self.node_id.clone(), )]) .all() .build(), ) .map_err(|_| ()) .into_actor(self) .and_then(move |containers, executor, _ctx| { let desired_allocs: HashMap<&str, &Allocation> = executor .state .allocations .values() .filter(|a| a.node_id == executor.node_id) .map(|a| (&*a.allocation_id, a)) .collect(); let current_containers: HashMap<&str, &str> = containers .iter() .map(|c| (&*c.labels[LABEL_ALLOCATION_ID], &*c.id)) .collect(); // Remove containers which exist but aren't part of an allocataion let remove_fut: Vec<_> = containers .iter() .filter(|container| { !desired_allocs.contains_key(&*container.labels[LABEL_ALLOCATION_ID]) }) .map(|container| { docker .containers() .get(&container.id) .remove(RmContainerOptions::builder().force(true).build()) .from_err() }) .collect(); // Create containers which are allocated but aren't known to Docker let create_fut: Vec<_> = desired_allocs .iter() .filter(|(id, _)|!current_containers.contains_key(*id)) .map(|(_, alloc)| executor.create_container(alloc)) .collect(); info!( "Updating running containers: {} -> {} (create {}, kill {})", containers.len(), desired_allocs.len(), create_fut.len(), remove_fut.len() ); join_all(create_fut) .join(join_all(remove_fut)) .then(|res| check_err("Execute containers", res)) .into_actor(executor) }) } /// Create and start a container for an allocation. Pulls the image if needed. fn create_container(&self, alloc: &Allocation) -> Box<Future<Item = (), Error = Error>> { let docker = Docker::new(); let job_services = &self.state.jobs.get(&alloc.job_id); let service = match job_services.and_then(|j| j.services.get(&alloc.service_name)) { Some(service) => service, None => { return Box::new(err(format_err!( "Service '{}' '{}' allocated but not defined", alloc.job_id, alloc.service_name ))); } }; let image = if service.image.contains(':') { service.image.clone() } else { format!("{}:latest", service.image) }; let mut labels = HashMap::new(); labels.insert(LABEL_ALLOCATION_ID, &*alloc.allocation_id); labels.insert(LABEL_NODE_ID, &self.node_id); let pull_opts = PullOptions::builder().image(&*image).build(); let create_opts = service .build_container_options() .unwrap() .labels(&labels) .restart_policy("on-failure", 4) .build(); Box::new( docker .images() .get(&image)
.map(move |_| info!("Image already pulled: {:?}", image)) .or_else(move |_| { docker.images().pull(&pull_opts).for_each(|p| { debug!("Pull: {:?}", p); Ok(()) }) }) .and_then(move |_| Docker::new().containers().create(&create_opts)) .and_then(|res| Docker::new().containers().get(&*res.id).start()) .from_err(), ) } } impl Actor for Executor { type Context = Context<Self>; } impl Supervised for Executor {} impl SystemService for Executor {} /// Fire-and-forget command messages for Executor #[derive(Clone, Debug)] pub enum ExecutorCommand { UpdateState(ClusterState), JoinCluster { local_port: u16, join_addr: SocketAddr, }, SetNodeId(NodeId), } impl Message for ExecutorCommand { type Result = Result<(), Error>; } impl Handler<ExecutorCommand> for Executor { type Result = ResponseFuture<(), Error>; fn handle(&mut self, cmd: ExecutorCommand, ctx: &mut Context<Self>) -> Self::Result { debug!("Executor handling command: {:?}", cmd); match cmd { ExecutorCommand::UpdateState(state) => { ctx.spawn(self.update_state(state)); Box::new(ok(())) } ExecutorCommand::JoinCluster { local_port, join_addr, } => Box::new(self.join(join_addr, local_port)), ExecutorCommand::SetNodeId(node_id) => { self.node_id = node_id; Box::new(ok(())) } } } } /// Get the address of the master node (if this node is not master) pub struct GetRemoteMaster; impl Message for GetRemoteMaster { type Result = Result<Option<SocketAddr>, Error>; } impl Handler<GetRemoteMaster> for Executor { type Result = Result<Option<SocketAddr>, Error>; fn handle(&mut self, _: GetRemoteMaster, _: &mut Context<Self>) -> Self::Result { match self.state.master_node() { Some(master) => { if master.node_id == self.node_id { Ok(None) } else { Ok(Some(master.cluster_address)) } } None => Err(err_msg("Master unknown.")), } } } /// Message requesting resource usage of the local node pub struct GetNodeResources; impl Message for GetNodeResources { type Result = Result<NodeResources, Error>; } impl Handler<GetNodeResources> for Executor { type Result = Result<NodeResources, Error>; fn handle(&mut self, _: GetNodeResources, _: &mut Context<Self>) -> Self::Result { self.system.refresh_system(); Ok(NodeResources { total_memory: self.system.get_total_memory(), used_memory: self.system.get_used_memory(), cpu_usage: self.system.get_processor_list()[1..] .iter() .map(|p| p.get_cpu_usage()) .collect(), }) } } /// Logs the result of async fire-and-forget futures. pub fn check_err<T, U>(msg: &str, res: Result<T, U>) -> impl Future<Item = (), Error = ()> where T: Debug, U: Debug, { match res { Ok(ok_res) => debug!("{}: {:?}", msg, ok_res), Err(err_res) => error!("{}: {:?}", msg, err_res), }; ok(()) } #[cfg(test)] mod test { use crate::executor::*; use crate::test_support::*; #[test] fn test_node_resources() { with_node("127.0.0.1:9001", || { Executor::from_registry() .send(GetNodeResources) .and_then(|res| { let resources = res.expect("Get resources failed"); assert!(resources.total_memory - resources.used_memory > 0); assert!(!resources.cpu_usage.is_empty()); Ok(()) }) }); } }
.inspect()
random_line_split
messages.rs
//! Structures for some of the messages used in the Marionette protocol, these can //! be used with the traits in serde to convert into the corresponding json. //! #![allow(non_snake_case)] use std::fmt; use std::path::Path; use std::collections::HashMap; use serde::{Serialize, Serializer, Deserialize, Deserializer}; use serde::ser::SerializeStruct; use serde_json::{Value, to_value}; use serde::de::{Visitor, MapAccess}; use serde::de::Error as DeError; use super::MarionetteError; #[derive(Deserialize, Debug)] pub struct ServerInfo { pub marionetteProtocol: u64, } #[derive(Deserialize, Debug)] pub struct ErrorObject { pub error: String, pub message: String, pub stacktrace: String, } pub enum Capability { PageLoadStrategy(String), } #[derive(Serialize, Debug)] pub struct CapabilityRequest { requiredCapabilities: HashMap<String, Value>, } #[derive(Deserialize, Debug)] pub struct Capabilities { pub timeouts: Option<Timeouts>, } #[derive(Serialize, Debug)] pub struct NewSessionRequest { capabilities: CapabilityRequest, } impl NewSessionRequest { pub fn new() -> Self { NewSessionRequest { capabilities: CapabilityRequest { requiredCapabilities: HashMap::new(), } } } pub fn required(&mut self, cap: Capability) { match cap { Capability::PageLoadStrategy(s) => self.capabilities.requiredCapabilities.insert("pageLoadStrategy".to_string(), Value::String(s)), }; } } #[derive(Deserialize, Debug)] pub struct NewSessionResponse { pub sessionId: String, pub capabilities: Capabilities, } #[derive(Deserialize, Debug, Serialize)] pub struct Empty {} /// Sets various timeout parameters (in ms) #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)] pub struct Timeouts { /// when to interrupt a script that is being evaluated pub script: u64, /// the timeout limit used to interrupt navigation of the browsing context pub pageLoad: u64, /// the timeout of when to abort when locating an element pub implicit: u64, } /// Some responses use a type wrapped in a json object /// with the value attribute #[derive(Deserialize, Serialize, Debug)] pub struct ResponseValue<T> { pub value: T, } #[derive(Serialize, Debug)] pub struct GetCommand { pub url: String, } impl GetCommand { pub fn from(url: &str) -> Self { Self { url: url.to_owned() } } } /// A log message to push to the marionette server. The message /// includes an arbitrary level (INFO, DEBUG, etc). #[derive(Serialize, Debug)] pub struct LogMsg { value: String, level: String, } impl LogMsg { pub fn new(msg: &str, lvl: &str) -> Self { LogMsg { value: msg.to_owned(), level: lvl.to_owned(), } } } /// A log entry as returned by the getLogs command. This includes a message, /// an arbitrary log level and a date. #[derive(Deserialize, Debug)] pub struct LogEntry(String, String, String); impl LogEntry { pub fn level(&self) -> &str { &self.0 } pub fn msg(&self) -> &str { &self.1 } } /// An opaque handle to a window /// /// This is deserialized from a regular string. But serialization creates /// an object `{'name': 'handle'}`. #[derive(Deserialize, Debug, PartialEq)] pub struct WindowHandle(String); impl WindowHandle { pub fn from_str(handle: &str) -> Self { WindowHandle(handle.to_owned()) } } impl fmt::Display for WindowHandle { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) } } impl Serialize for WindowHandle { fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> { let mut ss = s.serialize_struct("WindowHandle", 1)?; ss.serialize_field("name", &self.0)?; // Starting with firefox 81, name is ignored and // handle is used instead ss.serialize_field("handle", &self.0)?; ss.end() } } /// The execution context pub type ContextValue = ResponseValue<String>; #[derive(Serialize, Debug)] pub struct Script { script: String, sandbox: String, args: Value, scriptTimeout: Option<u64>, } impl Script { pub fn new(src: &str) -> Self { Script { script: src.to_owned(), sandbox: "default".to_owned(), // execute_script accepts null here, but execute_async_script does not // default to an empty array args: Value::Array(Vec::new()), scriptTimeout: None, } } /// Set arguments for this script. This is usually an array that /// is used as the `arguments` variable. pub fn arguments<S: Serialize>(&mut self, args: S) -> Result<(), MarionetteError>{ self.args = to_value(args)?; Ok(()) } /// Execute the script in a named sandbox pub fn sandbox(&mut self, name: &str) { self.sandbox = name.to_owned() } /// Set execution timeout for script (ms) /// /// This value overrides the global scriptTimeout. /// /// This option was removed from firefox in Jan/2019, see /// /// 9ed472d43600ca6ba1ced8a563dbaa4abdef5eaa /// /// https://bugzilla.mozilla.org/show_bug.cgi?id=1510929 /// https://phabricator.services.mozilla.com/D15584 /// #[deprecated = "Unsupported since Jan/2009 see bug 1510929"] pub fn timeout(&mut self, timeout_ms: u64) { self.scriptTimeout = Some(timeout_ms) } } #[derive(Debug)] pub enum QueryMethod { Id, Name, ClassName, TagName, CssSelector, LinkText, PartialLinkText, XPath, } impl Serialize for QueryMethod { fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> { match self { &QueryMethod::Id => s.serialize_str("id"), &QueryMethod::Name => s.serialize_str("name"), &QueryMethod::ClassName => s.serialize_str("class name"), &QueryMethod::TagName => s.serialize_str("tag name"), &QueryMethod::CssSelector => s.serialize_str("css selector"), &QueryMethod::LinkText => s.serialize_str("link text"), &QueryMethod::PartialLinkText => s.serialize_str("partial link text"), &QueryMethod::XPath => s.serialize_str("xpath"), } } } #[derive(Serialize, Debug)] pub struct FindElementQuery { /// A query pub value: String, /// The method use to perform the query pub using: QueryMethod, // In recent versions of firefox (60) this field must not // be set to null, skip it instead #[serde(skip_serializing_if = "Option::is_none")] pub element: Option<String>, } #[derive(PartialEq, Debug, Clone)] pub struct ElementRef { pub reference: String, } impl ElementRef { pub fn from_str(handle: &str) -> ElementRef { ElementRef { reference: handle.to_string() } } } impl Serialize for ElementRef { fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> { let mut ss = s.serialize_struct("ElementRef", 2)?; ss.serialize_field("ELEMENT", &self.reference)?; ss.serialize_field("element-6066-11e4-a52e-4f735466cecf", &self.reference)?; ss.end() } } impl<'a> Deserialize<'a> for ElementRef { fn deserialize<D: Deserializer<'a>>(d: D) -> Result<Self, D::Error> { enum Field { Reference, Ignored } impl<'b> Deserialize<'b> for Field { fn deserialize<D: Deserializer<'b>>(d: D) -> Result<Self, D::Error> { struct FieldVisitor; impl<'c> Visitor<'c> for FieldVisitor { type Value = Field; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("element-6066-11e4-a52e-4f735466cecf") } fn visit_str<E: DeError>(self, value: &str) -> Result<Field, E> { match value { "element-6066-11e4-a52e-4f735466cecf" => Ok(Field::Reference), // Ignore all other fields _ => Ok(Field::Ignored), } } } d.deserialize_identifier(FieldVisitor) } } struct ElementRefVisitor; impl<'d> Visitor<'d> for ElementRefVisitor { type Value = ElementRef; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("struct ElementRef") } fn visit_map<V>(self, mut visitor: V) -> Result<ElementRef, V::Error> where V: MapAccess<'d> { let mut reference = None; while let Some(key) = visitor.next_key()? { match key { Field::Reference => { if reference.is_some() { return Err(DeError::duplicate_field("element-6066-11e4-a52e-4f735466cecf")); } reference = Some(visitor.next_value()?); } Field::Ignored => (), } } match reference { Some(r) => Ok(ElementRef { reference: r }), None => return Err(DeError::missing_field("element-6066-11e4-a52e-4f735466cecf")), } } } const FIELDS: &'static [&'static str] = &["element-6066-11e4-a52e-4f735466cecf"]; d.deserialize_struct("ElementRef", FIELDS, ElementRefVisitor) } } /// Element operations are use a named id to select the Element /// and other attributes to specify the operation. #[derive(Serialize, Debug)] pub struct ElementOp { /// The element identifier pub id: String, /// The name of the attribute/property pub name: Option<String>, } /// A `switchToFrame` request #[derive(Serialize, Debug)] pub struct
{ focus: bool, element: Option<String>, } impl FrameSwitch { /// Switch to the top level frame pub fn top(focus: bool) -> Self { FrameSwitch { focus: focus, element: None, } } /// Switch to the frame given by passed element pub fn from_element(focus: bool, element: Option<ElementRef>) -> Self { FrameSwitch { focus: focus, element: element.map(|elem| elem.reference.to_owned()), } } } #[derive(Serialize, Debug)] pub struct AddonInstall<'a> { pub path: &'a Path, } #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Cookie { pub name: String, pub value: String, #[serde(skip_serializing_if = "Option::is_none")] pub path: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub domain: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub secure: Option<bool>, } #[derive(Serialize, Debug, PartialEq)] pub struct AddCookie<'a> { pub cookie: &'a Cookie, }
FrameSwitch
identifier_name
messages.rs
//! Structures for some of the messages used in the Marionette protocol, these can //! be used with the traits in serde to convert into the corresponding json. //! #![allow(non_snake_case)] use std::fmt; use std::path::Path; use std::collections::HashMap; use serde::{Serialize, Serializer, Deserialize, Deserializer}; use serde::ser::SerializeStruct; use serde_json::{Value, to_value}; use serde::de::{Visitor, MapAccess}; use serde::de::Error as DeError; use super::MarionetteError; #[derive(Deserialize, Debug)] pub struct ServerInfo { pub marionetteProtocol: u64, } #[derive(Deserialize, Debug)] pub struct ErrorObject { pub error: String, pub message: String, pub stacktrace: String, } pub enum Capability { PageLoadStrategy(String), } #[derive(Serialize, Debug)] pub struct CapabilityRequest { requiredCapabilities: HashMap<String, Value>, } #[derive(Deserialize, Debug)] pub struct Capabilities { pub timeouts: Option<Timeouts>, } #[derive(Serialize, Debug)] pub struct NewSessionRequest { capabilities: CapabilityRequest, } impl NewSessionRequest { pub fn new() -> Self { NewSessionRequest { capabilities: CapabilityRequest { requiredCapabilities: HashMap::new(), } } } pub fn required(&mut self, cap: Capability) { match cap { Capability::PageLoadStrategy(s) => self.capabilities.requiredCapabilities.insert("pageLoadStrategy".to_string(), Value::String(s)), }; } } #[derive(Deserialize, Debug)] pub struct NewSessionResponse { pub sessionId: String, pub capabilities: Capabilities, } #[derive(Deserialize, Debug, Serialize)] pub struct Empty {} /// Sets various timeout parameters (in ms) #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)] pub struct Timeouts { /// when to interrupt a script that is being evaluated pub script: u64, /// the timeout limit used to interrupt navigation of the browsing context pub pageLoad: u64, /// the timeout of when to abort when locating an element pub implicit: u64, } /// Some responses use a type wrapped in a json object /// with the value attribute #[derive(Deserialize, Serialize, Debug)] pub struct ResponseValue<T> { pub value: T, } #[derive(Serialize, Debug)] pub struct GetCommand { pub url: String, } impl GetCommand { pub fn from(url: &str) -> Self { Self { url: url.to_owned() } } } /// A log message to push to the marionette server. The message /// includes an arbitrary level (INFO, DEBUG, etc). #[derive(Serialize, Debug)] pub struct LogMsg { value: String, level: String, } impl LogMsg { pub fn new(msg: &str, lvl: &str) -> Self { LogMsg { value: msg.to_owned(), level: lvl.to_owned(), } } } /// A log entry as returned by the getLogs command. This includes a message, /// an arbitrary log level and a date. #[derive(Deserialize, Debug)] pub struct LogEntry(String, String, String); impl LogEntry { pub fn level(&self) -> &str { &self.0 } pub fn msg(&self) -> &str { &self.1 } } /// An opaque handle to a window /// /// This is deserialized from a regular string. But serialization creates /// an object `{'name': 'handle'}`. #[derive(Deserialize, Debug, PartialEq)] pub struct WindowHandle(String); impl WindowHandle { pub fn from_str(handle: &str) -> Self { WindowHandle(handle.to_owned()) } } impl fmt::Display for WindowHandle { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) } } impl Serialize for WindowHandle { fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> { let mut ss = s.serialize_struct("WindowHandle", 1)?; ss.serialize_field("name", &self.0)?; // Starting with firefox 81, name is ignored and // handle is used instead ss.serialize_field("handle", &self.0)?; ss.end() } } /// The execution context pub type ContextValue = ResponseValue<String>; #[derive(Serialize, Debug)] pub struct Script { script: String, sandbox: String, args: Value, scriptTimeout: Option<u64>, } impl Script { pub fn new(src: &str) -> Self { Script { script: src.to_owned(), sandbox: "default".to_owned(), // execute_script accepts null here, but execute_async_script does not // default to an empty array args: Value::Array(Vec::new()), scriptTimeout: None, } } /// Set arguments for this script. This is usually an array that /// is used as the `arguments` variable. pub fn arguments<S: Serialize>(&mut self, args: S) -> Result<(), MarionetteError>{ self.args = to_value(args)?; Ok(()) } /// Execute the script in a named sandbox pub fn sandbox(&mut self, name: &str) { self.sandbox = name.to_owned() } /// Set execution timeout for script (ms) /// /// This value overrides the global scriptTimeout. /// /// This option was removed from firefox in Jan/2019, see /// /// 9ed472d43600ca6ba1ced8a563dbaa4abdef5eaa /// /// https://bugzilla.mozilla.org/show_bug.cgi?id=1510929 /// https://phabricator.services.mozilla.com/D15584 /// #[deprecated = "Unsupported since Jan/2009 see bug 1510929"] pub fn timeout(&mut self, timeout_ms: u64) { self.scriptTimeout = Some(timeout_ms) } } #[derive(Debug)] pub enum QueryMethod { Id, Name, ClassName, TagName, CssSelector, LinkText, PartialLinkText, XPath, } impl Serialize for QueryMethod { fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> { match self { &QueryMethod::Id => s.serialize_str("id"), &QueryMethod::Name => s.serialize_str("name"), &QueryMethod::ClassName => s.serialize_str("class name"), &QueryMethod::TagName => s.serialize_str("tag name"), &QueryMethod::CssSelector => s.serialize_str("css selector"), &QueryMethod::LinkText => s.serialize_str("link text"), &QueryMethod::PartialLinkText => s.serialize_str("partial link text"), &QueryMethod::XPath => s.serialize_str("xpath"), } } } #[derive(Serialize, Debug)] pub struct FindElementQuery { /// A query pub value: String, /// The method use to perform the query pub using: QueryMethod, // In recent versions of firefox (60) this field must not // be set to null, skip it instead #[serde(skip_serializing_if = "Option::is_none")] pub element: Option<String>, } #[derive(PartialEq, Debug, Clone)] pub struct ElementRef { pub reference: String, } impl ElementRef { pub fn from_str(handle: &str) -> ElementRef { ElementRef { reference: handle.to_string() } } } impl Serialize for ElementRef { fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> { let mut ss = s.serialize_struct("ElementRef", 2)?; ss.serialize_field("ELEMENT", &self.reference)?; ss.serialize_field("element-6066-11e4-a52e-4f735466cecf", &self.reference)?; ss.end() } } impl<'a> Deserialize<'a> for ElementRef { fn deserialize<D: Deserializer<'a>>(d: D) -> Result<Self, D::Error> { enum Field { Reference, Ignored } impl<'b> Deserialize<'b> for Field { fn deserialize<D: Deserializer<'b>>(d: D) -> Result<Self, D::Error> { struct FieldVisitor; impl<'c> Visitor<'c> for FieldVisitor { type Value = Field; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("element-6066-11e4-a52e-4f735466cecf") } fn visit_str<E: DeError>(self, value: &str) -> Result<Field, E> { match value { "element-6066-11e4-a52e-4f735466cecf" => Ok(Field::Reference), // Ignore all other fields _ => Ok(Field::Ignored), } } } d.deserialize_identifier(FieldVisitor) } } struct ElementRefVisitor; impl<'d> Visitor<'d> for ElementRefVisitor { type Value = ElementRef; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("struct ElementRef") } fn visit_map<V>(self, mut visitor: V) -> Result<ElementRef, V::Error> where V: MapAccess<'d> { let mut reference = None; while let Some(key) = visitor.next_key()? { match key { Field::Reference => { if reference.is_some() { return Err(DeError::duplicate_field("element-6066-11e4-a52e-4f735466cecf")); } reference = Some(visitor.next_value()?); } Field::Ignored => (), } } match reference { Some(r) => Ok(ElementRef { reference: r }), None => return Err(DeError::missing_field("element-6066-11e4-a52e-4f735466cecf")), } } } const FIELDS: &'static [&'static str] = &["element-6066-11e4-a52e-4f735466cecf"]; d.deserialize_struct("ElementRef", FIELDS, ElementRefVisitor) } } /// Element operations are use a named id to select the Element /// and other attributes to specify the operation. #[derive(Serialize, Debug)] pub struct ElementOp { /// The element identifier pub id: String, /// The name of the attribute/property pub name: Option<String>, } /// A `switchToFrame` request #[derive(Serialize, Debug)] pub struct FrameSwitch { focus: bool, element: Option<String>, } impl FrameSwitch { /// Switch to the top level frame pub fn top(focus: bool) -> Self { FrameSwitch { focus: focus, element: None, } }
/// Switch to the frame given by passed element pub fn from_element(focus: bool, element: Option<ElementRef>) -> Self { FrameSwitch { focus: focus, element: element.map(|elem| elem.reference.to_owned()), } } } #[derive(Serialize, Debug)] pub struct AddonInstall<'a> { pub path: &'a Path, } #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Cookie { pub name: String, pub value: String, #[serde(skip_serializing_if = "Option::is_none")] pub path: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub domain: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub secure: Option<bool>, } #[derive(Serialize, Debug, PartialEq)] pub struct AddCookie<'a> { pub cookie: &'a Cookie, }
random_line_split
rpc.rs
//! Types related the `ActorRef` Remote Procedure Call (RPC) mechanism. //! //! RPC is implemented by sending a [`RpcMessage`] to the actor, which contains //! the request message and a [`RpcResponse`]. The `RpcResponse` allows the //! receiving actor to send back a response to the sending actor. //! //! To support RPC the receiving actor needs to implement //! [`From`]`<`[`RpcMessage`]`<Req, Res>>`, where `Req` is the type of the //! request message and `Res` the type of the response. This can be done easily //! by using the [`from_message`] macro. The RPC message can then be received //! like any other message. //! //! The sending actor needs to call [`ActorRef::rpc`] with the correct request //! type. That will return an [`Rpc`] [`Future`] which returns the response to //! the call, or [`RpcError`] in case of an error.
//! [`from_message`]: crate::from_message //! //! # Examples //! //! Using RPC to communicate with another actor. //! //! ``` //! # #![feature(never_type)] //! # //! use heph::actor; //! use heph::actor_ref::{ActorRef, RpcMessage}; //! use heph::rt::{self, Runtime, ThreadLocal}; //! use heph::spawn::ActorOptions; //! use heph::supervisor::NoSupervisor; //! //! /// Message type for [`counter`]. //! struct Add(RpcMessage<usize, usize>); //! //! /// Required to support RPC. //! impl From<RpcMessage<usize, usize>> for Add { //! fn from(msg: RpcMessage<usize, usize>) -> Add { //! Add(msg) //! } //! } //! //! /// Receiving actor of the RPC. //! async fn counter(mut ctx: actor::Context<Add, ThreadLocal>) { //! // State of the counter. //! let mut count: usize = 0; //! // Receive a message like normal. //! while let Ok(Add(RpcMessage { request, response })) = ctx.receive_next().await { //! count += request; //! // Send back the current state, ignoring any errors. //! let _ = response.respond(count); //! } //! } //! //! /// Sending actor of the RPC. //! async fn requester(_: actor::Context<!, ThreadLocal>, actor_ref: ActorRef<Add>) { //! // Make the procedure call. //! let response = actor_ref.rpc(10).await; //! # assert!(response.is_ok()); //! match response { //! // We got a response. //! Ok(count) => println!("Current count: {}", count), //! // Actor failed to respond. //! Err(err) => eprintln!("Counter didn't reply: {}", err), //! } //! } //! //! # fn main() -> Result<(), rt::Error> { //! # let mut runtime = Runtime::new()?; //! # runtime.run_on_workers(|mut runtime_ref| -> Result<(),!> { //! # let counter = counter as fn(_) -> _; //! # let actor_ref = runtime_ref.spawn_local(NoSupervisor, counter, (), ActorOptions::default()); //! # //! # let requester = requester as fn(_, _) -> _; //! # runtime_ref.spawn_local(NoSupervisor, requester, actor_ref, ActorOptions::default()); //! # Ok(()) //! # })?; //! # runtime.start() //! # } //! ``` //! //! Supporting multiple procedure within the same actor is possible by making //! the message an `enum` as the example below shows. Furthermore synchronous //! actors are supported. //! // FIXME: doesn't stop on CI. //! ```ignore //! # #![feature(never_type)] //! # //! use heph::actor::{self, SyncContext}; //! use heph::actor_ref::{ActorRef, RpcMessage}; //! use heph::from_message; //! use heph::rt::{self, Runtime, ActorOptions, SyncActorOptions}; //! use heph::supervisor::NoSupervisor; //! //! /// Message type for [`counter`]. //! enum Message { //! /// Increase the counter, returning the current state. //! Add(RpcMessage<usize, usize>), //! /// Get the current state of the counter. //! Get(RpcMessage<(), usize>), //! } //! //! // Implement the `From` trait for `Message`. //! from_message!(Message::Add(usize) -> usize); //! from_message!(Message::Get(()) -> usize); //! //! /// Receiving synchronous actor of the RPC. //! fn counter(mut ctx: SyncContext<Message>) { //! // State of the counter. //! let mut count: usize = 0; //! //! // Receive messages in a loop. //! while let Ok(msg) = ctx.receive_next() { //! match msg { //! Message::Add(RpcMessage { request, response }) => { //! count += request; //! // Send back the current state, ignoring any errors. //! let _ = response.respond(count); //! }, //! Message::Get(RpcMessage { response,.. }) => { //! // Send back the current state, ignoring any errors. //! let _ = response.respond(count); //! }, //! } //! } //! } //! //! /// Sending actor of the RPC. //! async fn requester(_: actor::Context<!>, actor_ref: ActorRef<Message>) { //! // Increase the counter by ten. //! // NOTE: do handle the errors correctly in practice, this is just an //! // example. //! let count = actor_ref.rpc(10).await.unwrap(); //! println!("Increased count to {}", count); //! //! // Retrieve the current count. //! let count = actor_ref.rpc(()).await.unwrap(); //! # assert_eq!(count, 10); //! println!("Current count {}", count); //! } //! //! # fn main() -> Result<(), rt::Error> { //! # let mut runtime = Runtime::new()?; //! # let counter = counter as fn(_) -> _; //! # let options = SyncActorOptions::default(); //! # let actor_ref = runtime.spawn_sync_actor(NoSupervisor, counter, (), options)?; //! # runtime.run_on_workers(move |mut runtime_ref| -> Result<(),!> { //! # let requester = requester as fn(_, _) -> _; //! # runtime_ref.spawn_local(NoSupervisor, requester, actor_ref, ActorOptions::default()); //! # Ok(()) //! # })?; //! # runtime.start() //! # } //! ``` use std::error::Error; use std::fmt; use std::future::Future; use std::pin::Pin; use std::task::{self, Poll}; use inbox::oneshot::{new_oneshot, RecvOnce, Sender}; use crate::actor_ref::{ActorRef, SendError, SendValue}; /// [`Future`] that resolves to a Remote Procedure Call (RPC) response. /// /// Created by [`ActorRef::rpc`]. #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct Rpc<'r, 'fut, M, Res> { send: Option<SendValue<'r, 'fut, M>>, recv: RecvOnce<Res>, } impl<'r, 'fut, M, Res> Rpc<'r, 'fut, M, Res> where 'r: 'fut, { /// Create a new RPC. pub(super) fn new<Req>(actor_ref: &'r ActorRef<M>, request: Req) -> Rpc<'r, 'fut, M, Res> where M: From<RpcMessage<Req, Res>>, { let (sender, receiver) = new_oneshot(); let response = RpcResponse { sender }; let msg = RpcMessage { request, response }; let send = actor_ref.send(msg); Rpc { send: Some(send), recv: receiver.recv_once(), } } } impl<'r, 'fut, M, Res> Future for Rpc<'r, 'fut, M, Res> { type Output = Result<Res, RpcError>; #[track_caller] fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll<Self::Output> { // Safety: we're not moving `send` so this is safe. let send = unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.send) }.as_pin_mut(); if let Some(send) = send { match send.poll(ctx) { Poll::Ready(Ok(())) => {} Poll::Ready(Err(err)) => return Poll::Ready(Err(err.into())), Poll::Pending => return Poll::Pending, } // Don't take this branch again. // Safety: we're not moving `send` so this is safe. unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.send) }.set(None); } // Safety: we're not moving `recv` so this is safe. match unsafe { self.map_unchecked_mut(|s| &mut s.recv) }.poll(ctx) { Poll::Ready(Some(response)) => Poll::Ready(Ok(response)), Poll::Ready(None) => Poll::Ready(Err(RpcError::NoResponse)), Poll::Pending => Poll::Pending, } } } /// Error returned by [`Rpc`]. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum RpcError { /// Same error as [`SendError`]. SendError, /// Returned when the other side returned no response. NoResponse, } impl From<SendError> for RpcError { fn from(_: SendError) -> RpcError { RpcError::SendError } } impl fmt::Display for RpcError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { RpcError::SendError => SendError.fmt(f), RpcError::NoResponse => f.write_str("no RPC response"), } } } impl Error for RpcError {} /// Message type that holds an RPC request. /// /// It holds both the request (`Req`) and the way to respond [`RpcResponse`]. #[derive(Debug)] pub struct RpcMessage<Req, Res> { /// The request object. pub request: Req, /// A way to [`respond`] to the call. /// /// [`respond`]: RpcResponse::respond pub response: RpcResponse<Res>, } impl<Req, Res> RpcMessage<Req, Res> { /// Convenience method to handle a `Req`uest and return a `Res`ponse. /// /// The function `f` is called with [`self.request`], the response returned by /// the function `f` is than returned to the request maker via /// [`self.response.respond`]. /// /// [`self.request`]: RpcMessage::request /// [`self.response.respond`]: RpcResponse::respond /// /// # Notes /// /// If the receiving end is [no longer connected] the function `f` is not /// called and `Ok(())` is returned instead. /// /// [no longer connected]: RpcResponse::is_connected pub fn handle<F>(self, f: F) -> Result<(), SendError> where F: FnOnce(Req) -> Res, { if self.response.is_connected() { let response = f(self.request); self.response.respond(response) } else { // If the receiving actor is no longer waiting we can skip the // request. Ok(()) } } } /// Structure to respond to an [`Rpc`] request. #[derive(Debug)] pub struct RpcResponse<Res> { sender: Sender<Res>, } impl<Res> RpcResponse<Res> { /// Respond to a RPC request. pub fn respond(self, response: Res) -> Result<(), SendError> { self.sender.try_send(response).map_err(|_| SendError) } /// Returns `false` if the receiving side is disconnected. /// /// # Notes /// /// If this method returns `true` it doesn't mean that `respond` will /// succeed. In fact the moment this function returns a result it could /// already be invalid. pub fn is_connected(&self) -> bool { self.sender.is_connected() } }
//!
random_line_split
rpc.rs
//! Types related the `ActorRef` Remote Procedure Call (RPC) mechanism. //! //! RPC is implemented by sending a [`RpcMessage`] to the actor, which contains //! the request message and a [`RpcResponse`]. The `RpcResponse` allows the //! receiving actor to send back a response to the sending actor. //! //! To support RPC the receiving actor needs to implement //! [`From`]`<`[`RpcMessage`]`<Req, Res>>`, where `Req` is the type of the //! request message and `Res` the type of the response. This can be done easily //! by using the [`from_message`] macro. The RPC message can then be received //! like any other message. //! //! The sending actor needs to call [`ActorRef::rpc`] with the correct request //! type. That will return an [`Rpc`] [`Future`] which returns the response to //! the call, or [`RpcError`] in case of an error. //! //! [`from_message`]: crate::from_message //! //! # Examples //! //! Using RPC to communicate with another actor. //! //! ``` //! # #![feature(never_type)] //! # //! use heph::actor; //! use heph::actor_ref::{ActorRef, RpcMessage}; //! use heph::rt::{self, Runtime, ThreadLocal}; //! use heph::spawn::ActorOptions; //! use heph::supervisor::NoSupervisor; //! //! /// Message type for [`counter`]. //! struct Add(RpcMessage<usize, usize>); //! //! /// Required to support RPC. //! impl From<RpcMessage<usize, usize>> for Add { //! fn from(msg: RpcMessage<usize, usize>) -> Add { //! Add(msg) //! } //! } //! //! /// Receiving actor of the RPC. //! async fn counter(mut ctx: actor::Context<Add, ThreadLocal>) { //! // State of the counter. //! let mut count: usize = 0; //! // Receive a message like normal. //! while let Ok(Add(RpcMessage { request, response })) = ctx.receive_next().await { //! count += request; //! // Send back the current state, ignoring any errors. //! let _ = response.respond(count); //! } //! } //! //! /// Sending actor of the RPC. //! async fn requester(_: actor::Context<!, ThreadLocal>, actor_ref: ActorRef<Add>) { //! // Make the procedure call. //! let response = actor_ref.rpc(10).await; //! # assert!(response.is_ok()); //! match response { //! // We got a response. //! Ok(count) => println!("Current count: {}", count), //! // Actor failed to respond. //! Err(err) => eprintln!("Counter didn't reply: {}", err), //! } //! } //! //! # fn main() -> Result<(), rt::Error> { //! # let mut runtime = Runtime::new()?; //! # runtime.run_on_workers(|mut runtime_ref| -> Result<(),!> { //! # let counter = counter as fn(_) -> _; //! # let actor_ref = runtime_ref.spawn_local(NoSupervisor, counter, (), ActorOptions::default()); //! # //! # let requester = requester as fn(_, _) -> _; //! # runtime_ref.spawn_local(NoSupervisor, requester, actor_ref, ActorOptions::default()); //! # Ok(()) //! # })?; //! # runtime.start() //! # } //! ``` //! //! Supporting multiple procedure within the same actor is possible by making //! the message an `enum` as the example below shows. Furthermore synchronous //! actors are supported. //! // FIXME: doesn't stop on CI. //! ```ignore //! # #![feature(never_type)] //! # //! use heph::actor::{self, SyncContext}; //! use heph::actor_ref::{ActorRef, RpcMessage}; //! use heph::from_message; //! use heph::rt::{self, Runtime, ActorOptions, SyncActorOptions}; //! use heph::supervisor::NoSupervisor; //! //! /// Message type for [`counter`]. //! enum Message { //! /// Increase the counter, returning the current state. //! Add(RpcMessage<usize, usize>), //! /// Get the current state of the counter. //! Get(RpcMessage<(), usize>), //! } //! //! // Implement the `From` trait for `Message`. //! from_message!(Message::Add(usize) -> usize); //! from_message!(Message::Get(()) -> usize); //! //! /// Receiving synchronous actor of the RPC. //! fn counter(mut ctx: SyncContext<Message>) { //! // State of the counter. //! let mut count: usize = 0; //! //! // Receive messages in a loop. //! while let Ok(msg) = ctx.receive_next() { //! match msg { //! Message::Add(RpcMessage { request, response }) => { //! count += request; //! // Send back the current state, ignoring any errors. //! let _ = response.respond(count); //! }, //! Message::Get(RpcMessage { response,.. }) => { //! // Send back the current state, ignoring any errors. //! let _ = response.respond(count); //! }, //! } //! } //! } //! //! /// Sending actor of the RPC. //! async fn requester(_: actor::Context<!>, actor_ref: ActorRef<Message>) { //! // Increase the counter by ten. //! // NOTE: do handle the errors correctly in practice, this is just an //! // example. //! let count = actor_ref.rpc(10).await.unwrap(); //! println!("Increased count to {}", count); //! //! // Retrieve the current count. //! let count = actor_ref.rpc(()).await.unwrap(); //! # assert_eq!(count, 10); //! println!("Current count {}", count); //! } //! //! # fn main() -> Result<(), rt::Error> { //! # let mut runtime = Runtime::new()?; //! # let counter = counter as fn(_) -> _; //! # let options = SyncActorOptions::default(); //! # let actor_ref = runtime.spawn_sync_actor(NoSupervisor, counter, (), options)?; //! # runtime.run_on_workers(move |mut runtime_ref| -> Result<(),!> { //! # let requester = requester as fn(_, _) -> _; //! # runtime_ref.spawn_local(NoSupervisor, requester, actor_ref, ActorOptions::default()); //! # Ok(()) //! # })?; //! # runtime.start() //! # } //! ``` use std::error::Error; use std::fmt; use std::future::Future; use std::pin::Pin; use std::task::{self, Poll}; use inbox::oneshot::{new_oneshot, RecvOnce, Sender}; use crate::actor_ref::{ActorRef, SendError, SendValue}; /// [`Future`] that resolves to a Remote Procedure Call (RPC) response. /// /// Created by [`ActorRef::rpc`]. #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct Rpc<'r, 'fut, M, Res> { send: Option<SendValue<'r, 'fut, M>>, recv: RecvOnce<Res>, } impl<'r, 'fut, M, Res> Rpc<'r, 'fut, M, Res> where 'r: 'fut, { /// Create a new RPC. pub(super) fn new<Req>(actor_ref: &'r ActorRef<M>, request: Req) -> Rpc<'r, 'fut, M, Res> where M: From<RpcMessage<Req, Res>>, { let (sender, receiver) = new_oneshot(); let response = RpcResponse { sender }; let msg = RpcMessage { request, response }; let send = actor_ref.send(msg); Rpc { send: Some(send), recv: receiver.recv_once(), } } } impl<'r, 'fut, M, Res> Future for Rpc<'r, 'fut, M, Res> { type Output = Result<Res, RpcError>; #[track_caller] fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll<Self::Output> { // Safety: we're not moving `send` so this is safe. let send = unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.send) }.as_pin_mut(); if let Some(send) = send { match send.poll(ctx) { Poll::Ready(Ok(())) => {} Poll::Ready(Err(err)) => return Poll::Ready(Err(err.into())), Poll::Pending => return Poll::Pending, } // Don't take this branch again. // Safety: we're not moving `send` so this is safe. unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.send) }.set(None); } // Safety: we're not moving `recv` so this is safe. match unsafe { self.map_unchecked_mut(|s| &mut s.recv) }.poll(ctx) { Poll::Ready(Some(response)) => Poll::Ready(Ok(response)), Poll::Ready(None) => Poll::Ready(Err(RpcError::NoResponse)), Poll::Pending => Poll::Pending, } } } /// Error returned by [`Rpc`]. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum RpcError { /// Same error as [`SendError`]. SendError, /// Returned when the other side returned no response. NoResponse, } impl From<SendError> for RpcError { fn from(_: SendError) -> RpcError { RpcError::SendError } } impl fmt::Display for RpcError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { RpcError::SendError => SendError.fmt(f), RpcError::NoResponse => f.write_str("no RPC response"), } } } impl Error for RpcError {} /// Message type that holds an RPC request. /// /// It holds both the request (`Req`) and the way to respond [`RpcResponse`]. #[derive(Debug)] pub struct
<Req, Res> { /// The request object. pub request: Req, /// A way to [`respond`] to the call. /// /// [`respond`]: RpcResponse::respond pub response: RpcResponse<Res>, } impl<Req, Res> RpcMessage<Req, Res> { /// Convenience method to handle a `Req`uest and return a `Res`ponse. /// /// The function `f` is called with [`self.request`], the response returned by /// the function `f` is than returned to the request maker via /// [`self.response.respond`]. /// /// [`self.request`]: RpcMessage::request /// [`self.response.respond`]: RpcResponse::respond /// /// # Notes /// /// If the receiving end is [no longer connected] the function `f` is not /// called and `Ok(())` is returned instead. /// /// [no longer connected]: RpcResponse::is_connected pub fn handle<F>(self, f: F) -> Result<(), SendError> where F: FnOnce(Req) -> Res, { if self.response.is_connected() { let response = f(self.request); self.response.respond(response) } else { // If the receiving actor is no longer waiting we can skip the // request. Ok(()) } } } /// Structure to respond to an [`Rpc`] request. #[derive(Debug)] pub struct RpcResponse<Res> { sender: Sender<Res>, } impl<Res> RpcResponse<Res> { /// Respond to a RPC request. pub fn respond(self, response: Res) -> Result<(), SendError> { self.sender.try_send(response).map_err(|_| SendError) } /// Returns `false` if the receiving side is disconnected. /// /// # Notes /// /// If this method returns `true` it doesn't mean that `respond` will /// succeed. In fact the moment this function returns a result it could /// already be invalid. pub fn is_connected(&self) -> bool { self.sender.is_connected() } }
RpcMessage
identifier_name
rpc.rs
//! Types related the `ActorRef` Remote Procedure Call (RPC) mechanism. //! //! RPC is implemented by sending a [`RpcMessage`] to the actor, which contains //! the request message and a [`RpcResponse`]. The `RpcResponse` allows the //! receiving actor to send back a response to the sending actor. //! //! To support RPC the receiving actor needs to implement //! [`From`]`<`[`RpcMessage`]`<Req, Res>>`, where `Req` is the type of the //! request message and `Res` the type of the response. This can be done easily //! by using the [`from_message`] macro. The RPC message can then be received //! like any other message. //! //! The sending actor needs to call [`ActorRef::rpc`] with the correct request //! type. That will return an [`Rpc`] [`Future`] which returns the response to //! the call, or [`RpcError`] in case of an error. //! //! [`from_message`]: crate::from_message //! //! # Examples //! //! Using RPC to communicate with another actor. //! //! ``` //! # #![feature(never_type)] //! # //! use heph::actor; //! use heph::actor_ref::{ActorRef, RpcMessage}; //! use heph::rt::{self, Runtime, ThreadLocal}; //! use heph::spawn::ActorOptions; //! use heph::supervisor::NoSupervisor; //! //! /// Message type for [`counter`]. //! struct Add(RpcMessage<usize, usize>); //! //! /// Required to support RPC. //! impl From<RpcMessage<usize, usize>> for Add { //! fn from(msg: RpcMessage<usize, usize>) -> Add { //! Add(msg) //! } //! } //! //! /// Receiving actor of the RPC. //! async fn counter(mut ctx: actor::Context<Add, ThreadLocal>) { //! // State of the counter. //! let mut count: usize = 0; //! // Receive a message like normal. //! while let Ok(Add(RpcMessage { request, response })) = ctx.receive_next().await { //! count += request; //! // Send back the current state, ignoring any errors. //! let _ = response.respond(count); //! } //! } //! //! /// Sending actor of the RPC. //! async fn requester(_: actor::Context<!, ThreadLocal>, actor_ref: ActorRef<Add>) { //! // Make the procedure call. //! let response = actor_ref.rpc(10).await; //! # assert!(response.is_ok()); //! match response { //! // We got a response. //! Ok(count) => println!("Current count: {}", count), //! // Actor failed to respond. //! Err(err) => eprintln!("Counter didn't reply: {}", err), //! } //! } //! //! # fn main() -> Result<(), rt::Error> { //! # let mut runtime = Runtime::new()?; //! # runtime.run_on_workers(|mut runtime_ref| -> Result<(),!> { //! # let counter = counter as fn(_) -> _; //! # let actor_ref = runtime_ref.spawn_local(NoSupervisor, counter, (), ActorOptions::default()); //! # //! # let requester = requester as fn(_, _) -> _; //! # runtime_ref.spawn_local(NoSupervisor, requester, actor_ref, ActorOptions::default()); //! # Ok(()) //! # })?; //! # runtime.start() //! # } //! ``` //! //! Supporting multiple procedure within the same actor is possible by making //! the message an `enum` as the example below shows. Furthermore synchronous //! actors are supported. //! // FIXME: doesn't stop on CI. //! ```ignore //! # #![feature(never_type)] //! # //! use heph::actor::{self, SyncContext}; //! use heph::actor_ref::{ActorRef, RpcMessage}; //! use heph::from_message; //! use heph::rt::{self, Runtime, ActorOptions, SyncActorOptions}; //! use heph::supervisor::NoSupervisor; //! //! /// Message type for [`counter`]. //! enum Message { //! /// Increase the counter, returning the current state. //! Add(RpcMessage<usize, usize>), //! /// Get the current state of the counter. //! Get(RpcMessage<(), usize>), //! } //! //! // Implement the `From` trait for `Message`. //! from_message!(Message::Add(usize) -> usize); //! from_message!(Message::Get(()) -> usize); //! //! /// Receiving synchronous actor of the RPC. //! fn counter(mut ctx: SyncContext<Message>) { //! // State of the counter. //! let mut count: usize = 0; //! //! // Receive messages in a loop. //! while let Ok(msg) = ctx.receive_next() { //! match msg { //! Message::Add(RpcMessage { request, response }) => { //! count += request; //! // Send back the current state, ignoring any errors. //! let _ = response.respond(count); //! }, //! Message::Get(RpcMessage { response,.. }) => { //! // Send back the current state, ignoring any errors. //! let _ = response.respond(count); //! }, //! } //! } //! } //! //! /// Sending actor of the RPC. //! async fn requester(_: actor::Context<!>, actor_ref: ActorRef<Message>) { //! // Increase the counter by ten. //! // NOTE: do handle the errors correctly in practice, this is just an //! // example. //! let count = actor_ref.rpc(10).await.unwrap(); //! println!("Increased count to {}", count); //! //! // Retrieve the current count. //! let count = actor_ref.rpc(()).await.unwrap(); //! # assert_eq!(count, 10); //! println!("Current count {}", count); //! } //! //! # fn main() -> Result<(), rt::Error> { //! # let mut runtime = Runtime::new()?; //! # let counter = counter as fn(_) -> _; //! # let options = SyncActorOptions::default(); //! # let actor_ref = runtime.spawn_sync_actor(NoSupervisor, counter, (), options)?; //! # runtime.run_on_workers(move |mut runtime_ref| -> Result<(),!> { //! # let requester = requester as fn(_, _) -> _; //! # runtime_ref.spawn_local(NoSupervisor, requester, actor_ref, ActorOptions::default()); //! # Ok(()) //! # })?; //! # runtime.start() //! # } //! ``` use std::error::Error; use std::fmt; use std::future::Future; use std::pin::Pin; use std::task::{self, Poll}; use inbox::oneshot::{new_oneshot, RecvOnce, Sender}; use crate::actor_ref::{ActorRef, SendError, SendValue}; /// [`Future`] that resolves to a Remote Procedure Call (RPC) response. /// /// Created by [`ActorRef::rpc`]. #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct Rpc<'r, 'fut, M, Res> { send: Option<SendValue<'r, 'fut, M>>, recv: RecvOnce<Res>, } impl<'r, 'fut, M, Res> Rpc<'r, 'fut, M, Res> where 'r: 'fut, { /// Create a new RPC. pub(super) fn new<Req>(actor_ref: &'r ActorRef<M>, request: Req) -> Rpc<'r, 'fut, M, Res> where M: From<RpcMessage<Req, Res>>, { let (sender, receiver) = new_oneshot(); let response = RpcResponse { sender }; let msg = RpcMessage { request, response }; let send = actor_ref.send(msg); Rpc { send: Some(send), recv: receiver.recv_once(), } } } impl<'r, 'fut, M, Res> Future for Rpc<'r, 'fut, M, Res> { type Output = Result<Res, RpcError>; #[track_caller] fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll<Self::Output> { // Safety: we're not moving `send` so this is safe. let send = unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.send) }.as_pin_mut(); if let Some(send) = send { match send.poll(ctx) { Poll::Ready(Ok(())) => {} Poll::Ready(Err(err)) => return Poll::Ready(Err(err.into())), Poll::Pending => return Poll::Pending, } // Don't take this branch again. // Safety: we're not moving `send` so this is safe. unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.send) }.set(None); } // Safety: we're not moving `recv` so this is safe. match unsafe { self.map_unchecked_mut(|s| &mut s.recv) }.poll(ctx) { Poll::Ready(Some(response)) => Poll::Ready(Ok(response)), Poll::Ready(None) => Poll::Ready(Err(RpcError::NoResponse)), Poll::Pending => Poll::Pending, } } } /// Error returned by [`Rpc`]. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum RpcError { /// Same error as [`SendError`]. SendError, /// Returned when the other side returned no response. NoResponse, } impl From<SendError> for RpcError { fn from(_: SendError) -> RpcError { RpcError::SendError } } impl fmt::Display for RpcError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { RpcError::SendError => SendError.fmt(f), RpcError::NoResponse => f.write_str("no RPC response"), } } } impl Error for RpcError {} /// Message type that holds an RPC request. /// /// It holds both the request (`Req`) and the way to respond [`RpcResponse`]. #[derive(Debug)] pub struct RpcMessage<Req, Res> { /// The request object. pub request: Req, /// A way to [`respond`] to the call. /// /// [`respond`]: RpcResponse::respond pub response: RpcResponse<Res>, } impl<Req, Res> RpcMessage<Req, Res> { /// Convenience method to handle a `Req`uest and return a `Res`ponse. /// /// The function `f` is called with [`self.request`], the response returned by /// the function `f` is than returned to the request maker via /// [`self.response.respond`]. /// /// [`self.request`]: RpcMessage::request /// [`self.response.respond`]: RpcResponse::respond /// /// # Notes /// /// If the receiving end is [no longer connected] the function `f` is not /// called and `Ok(())` is returned instead. /// /// [no longer connected]: RpcResponse::is_connected pub fn handle<F>(self, f: F) -> Result<(), SendError> where F: FnOnce(Req) -> Res,
} /// Structure to respond to an [`Rpc`] request. #[derive(Debug)] pub struct RpcResponse<Res> { sender: Sender<Res>, } impl<Res> RpcResponse<Res> { /// Respond to a RPC request. pub fn respond(self, response: Res) -> Result<(), SendError> { self.sender.try_send(response).map_err(|_| SendError) } /// Returns `false` if the receiving side is disconnected. /// /// # Notes /// /// If this method returns `true` it doesn't mean that `respond` will /// succeed. In fact the moment this function returns a result it could /// already be invalid. pub fn is_connected(&self) -> bool { self.sender.is_connected() } }
{ if self.response.is_connected() { let response = f(self.request); self.response.respond(response) } else { // If the receiving actor is no longer waiting we can skip the // request. Ok(()) } }
identifier_body
build.rs
use std::{ env, fs::{self, File}, io::Write, path::{Path, PathBuf}, process::{self, Command}, }; use toml::Value; let target = env::var("TARGET").expect("TARGET not set"); let (firmware, expected_target) = if cfg!(feature = "uefi_bin") { ("UEFI", "x86_64-unknown-uefi") } else if cfg!(feature = "bios_bin") { ("BIOS", "x86_64-bootloader") } else { panic!( "Either the `uefi_bin` or `bios_bin` feature must be enabled when \ the `binary` feature is enabled" ); }; if Path::new(&target) .file_stem() .expect("target has no file stem") != expected_target
let out_dir = PathBuf::from(env::var("OUT_DIR").expect("OUT_DIR not set")); let kernel = PathBuf::from(match env::var("KERNEL") { Ok(kernel) => kernel, Err(_) => { eprintln!( "The KERNEL environment variable must be set for building the bootloader.\n\n\ Please use the `cargo builder` command for building." ); process::exit(1); } }); let kernel_file_name = kernel .file_name() .expect("KERNEL has no valid file name") .to_str() .expect("kernel file name not valid utf8"); // check that the kernel file exists assert!( kernel.exists(), "KERNEL does not exist: {}", kernel.display() ); // get access to llvm tools shipped in the llvm-tools-preview rustup component let llvm_tools = match llvm_tools::LlvmTools::new() { Ok(tools) => tools, Err(llvm_tools::Error::NotFound) => { eprintln!("Error: llvm-tools not found"); eprintln!("Maybe the rustup component `llvm-tools-preview` is missing?"); eprintln!(" Install it through: `rustup component add llvm-tools-preview`"); process::exit(1); } Err(err) => { eprintln!("Failed to retrieve llvm-tools component: {:?}", err); process::exit(1); } }; // check that kernel executable has code in it let llvm_size = llvm_tools .tool(&llvm_tools::exe("llvm-size")) .expect("llvm-size not found in llvm-tools"); let mut cmd = Command::new(llvm_size); cmd.arg(&kernel); let output = cmd.output().expect("failed to run llvm-size"); let output_str = String::from_utf8_lossy(&output.stdout); let second_line_opt = output_str.lines().skip(1).next(); let second_line = second_line_opt.expect(&format!( "unexpected llvm-size line output:\n{}", output_str )); let text_size_opt = second_line.split_ascii_whitespace().next(); let text_size = text_size_opt.expect(&format!("unexpected llvm-size output:\n{}", output_str)); if text_size == "0" { panic!("Kernel executable has an empty text section. Perhaps the entry point was set incorrectly?\n\n\ Kernel executable at `{}`\n", kernel.display()); } // strip debug symbols from kernel for faster loading let stripped_kernel_file_name = format!("kernel_stripped-{}", kernel_file_name); let stripped_kernel = out_dir.join(&stripped_kernel_file_name); let objcopy = llvm_tools .tool(&llvm_tools::exe("llvm-objcopy")) .expect("llvm-objcopy not found in llvm-tools"); let mut cmd = Command::new(&objcopy); cmd.arg("--strip-debug"); cmd.arg(&kernel); cmd.arg(&stripped_kernel); let exit_status = cmd .status() .expect("failed to run objcopy to strip debug symbols"); if!exit_status.success() { eprintln!("Error: Stripping debug symbols failed"); process::exit(1); } if cfg!(feature = "uefi_bin") { // write file for including kernel in binary let file_path = out_dir.join("kernel_info.rs"); let mut file = File::create(file_path).expect("failed to create kernel_info.rs"); let kernel_size = fs::metadata(&stripped_kernel) .expect("Failed to read file metadata of stripped kernel") .len(); file.write_all( format!( "const KERNEL_SIZE: usize = {}; const KERNEL_BYTES: [u8; KERNEL_SIZE] = *include_bytes!(r\"{}\");", kernel_size, stripped_kernel.display(), ) .as_bytes(), ) .expect("write to kernel_info.rs failed"); } if cfg!(feature = "bios_bin") { // wrap the kernel executable as binary in a new ELF file let stripped_kernel_file_name_replaced = stripped_kernel_file_name .replace('-', "_") .replace('.', "_"); let kernel_bin = out_dir.join(format!("kernel_bin-{}.o", kernel_file_name)); let kernel_archive = out_dir.join(format!("libkernel_bin-{}.a", kernel_file_name)); let mut cmd = Command::new(&objcopy); cmd.arg("-I").arg("binary"); cmd.arg("-O").arg("elf64-x86-64"); cmd.arg("--binary-architecture=i386:x86-64"); cmd.arg("--rename-section").arg(".data=.kernel"); cmd.arg("--redefine-sym").arg(format!( "_binary_{}_start=_kernel_start_addr", stripped_kernel_file_name_replaced )); cmd.arg("--redefine-sym").arg(format!( "_binary_{}_end=_kernel_end_addr", stripped_kernel_file_name_replaced )); cmd.arg("--redefine-sym").arg(format!( "_binary_{}_size=_kernel_size", stripped_kernel_file_name_replaced )); cmd.current_dir(&out_dir); cmd.arg(&stripped_kernel_file_name); cmd.arg(&kernel_bin); let exit_status = cmd.status().expect("failed to run objcopy"); if!exit_status.success() { eprintln!("Error: Running objcopy failed"); process::exit(1); } // create an archive for linking let ar = llvm_tools .tool(&llvm_tools::exe("llvm-ar")) .unwrap_or_else(|| { eprintln!("Failed to retrieve llvm-ar component"); eprint!("This component is available since nightly-2019-03-29,"); eprintln!("so try updating your toolchain if you're using an older nightly"); process::exit(1); }); let mut cmd = Command::new(ar); cmd.arg("crs"); cmd.arg(&kernel_archive); cmd.arg(&kernel_bin); let exit_status = cmd.status().expect("failed to run ar"); if!exit_status.success() { eprintln!("Error: Running ar failed"); process::exit(1); } // pass link arguments to rustc println!("cargo:rustc-link-search=native={}", out_dir.display()); println!( "cargo:rustc-link-lib=static=kernel_bin-{}", kernel_file_name ); } // Parse configuration from the kernel's Cargo.toml let config = match env::var("KERNEL_MANIFEST") { Err(env::VarError::NotPresent) => { panic!("The KERNEL_MANIFEST environment variable must be set for building the bootloader.\n\n\ Please use `cargo builder` for building."); } Err(env::VarError::NotUnicode(_)) => { panic!("The KERNEL_MANIFEST environment variable contains invalid unicode") } Ok(path) if Path::new(&path).file_name().and_then(|s| s.to_str())!= Some("Cargo.toml") => { let err = format!( "The given `--kernel-manifest` path `{}` does not \ point to a `Cargo.toml`", path, ); quote! { compile_error!(#err) } } Ok(path) if!Path::new(&path).exists() => { let err = format!( "The given `--kernel-manifest` path `{}` does not exist.", path ); quote! { compile_error!(#err) } } Ok(path) => { println!("cargo:rerun-if-changed={}", path); let contents = fs::read_to_string(&path).expect(&format!( "failed to read kernel manifest file (path: {})", path )); let manifest = contents .parse::<Value>() .expect("failed to parse kernel's Cargo.toml"); if manifest .get("dependencies") .and_then(|d| d.get("bootloader")) .is_some() { // it seems to be the correct Cargo.toml let config_table = manifest .get("package") .and_then(|table| table.get("metadata")) .and_then(|table| table.get("bootloader")) .cloned() .unwrap_or_else(|| toml::Value::Table(toml::map::Map::new())); config_table .try_into::<ParsedConfig>() .map(|c| quote! { #c }) .unwrap_or_else(|err| { let err = format!( "failed to parse bootloader config in {}:\n\n{}", path, err.to_string() ); quote! { compile_error!(#err) } }) } else { let err = format!( "no bootloader dependency in {}\n\n The \ `--kernel-manifest` path should point to the `Cargo.toml` \ of the kernel.", path ); quote! { compile_error!(#err) } } } }; // Write config to file let file_path = out_dir.join("bootloader_config.rs"); let mut file = File::create(file_path).expect("failed to create bootloader_config.rs"); file.write_all( quote::quote! { mod parsed_config { use crate::config::Config; pub const CONFIG: Config = #config; } } .to_string() .as_bytes(), ) .expect("write to bootloader_config.rs failed"); println!("cargo:rerun-if-env-changed=KERNEL"); println!("cargo:rerun-if-env-changed=KERNEL_MANIFEST"); println!("cargo:rerun-if-changed={}", kernel.display()); println!("cargo:rerun-if-changed=build.rs"); } fn val_true() -> bool { true } /// Must be always identical with the struct in `src/config.rs` /// /// This copy is needed because we can't derive Deserialize in the `src/config.rs` /// module itself, since cargo currently unifies dependencies (the `toml` crate enables /// serde's standard feature). Also, it allows to separate the parsing special cases /// such as `AlignedAddress` more cleanly. #[derive(Debug, serde::Deserialize)] #[serde(rename_all = "kebab-case", deny_unknown_fields)] struct ParsedConfig { #[serde(default)] pub map_physical_memory: bool, #[serde(default)] pub map_page_table_recursively: bool, #[serde(default = "val_true")] pub map_framebuffer: bool, pub kernel_stack_size: Option<AlignedAddress>, pub physical_memory_offset: Option<AlignedAddress>, pub recursive_index: Option<u16>, pub kernel_stack_address: Option<AlignedAddress>, pub boot_info_address: Option<AlignedAddress>, pub framebuffer_address: Option<AlignedAddress>, } /// Convert to tokens suitable for initializing the `Config` struct. impl quote::ToTokens for ParsedConfig { fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { fn optional(value: Option<impl quote::ToTokens>) -> proc_macro2::TokenStream { value.map(|v| quote!(Some(#v))).unwrap_or(quote!(None)) } let map_physical_memory = self.map_physical_memory; let map_page_table_recursively = self.map_page_table_recursively; let map_framebuffer = self.map_framebuffer; let kernel_stack_size = optional(self.kernel_stack_size); let physical_memory_offset = optional(self.physical_memory_offset); let recursive_index = optional(self.recursive_index); let kernel_stack_address = optional(self.kernel_stack_address); let boot_info_address = optional(self.boot_info_address); let framebuffer_address = optional(self.framebuffer_address); tokens.extend(quote! { Config { map_physical_memory: #map_physical_memory, map_page_table_recursively: #map_page_table_recursively, map_framebuffer: #map_framebuffer, kernel_stack_size: #kernel_stack_size, physical_memory_offset: #physical_memory_offset, recursive_index: #recursive_index, kernel_stack_address: #kernel_stack_address, boot_info_address: #boot_info_address, framebuffer_address: #framebuffer_address, }}); } } #[derive(Debug, Clone, Copy)] struct AlignedAddress(u64); impl quote::ToTokens for AlignedAddress { fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { self.0.to_tokens(tokens); } } impl<'de> serde::Deserialize<'de> for AlignedAddress { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>, { deserializer.deserialize_str(AlignedAddressVisitor) } } /// Helper struct for implementing the `optional_version_deserialize` function. struct AlignedAddressVisitor; impl serde::de::Visitor<'_> for AlignedAddressVisitor { type Value = AlignedAddress; fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { write!( formatter, "a page-aligned memory address, either as integer or as decimal or hexadecimal \ string (e.g. \"0xffff0000\"); large addresses must be given as string because \ TOML does not support unsigned 64-bit integers" ) } fn visit_u64<E>(self, num: u64) -> Result<Self::Value, E> where E: serde::de::Error, { if num % 0x1000 == 0 { Ok(AlignedAddress(num)) } else { Err(serde::de::Error::custom(format!( "address {:#x} is not page aligned", num ))) } } fn visit_i64<E>(self, num: i64) -> Result<Self::Value, E> where E: serde::de::Error, { let unsigned: u64 = num .try_into() .map_err(|_| serde::de::Error::custom(format!("address {} is negative", num)))?; self.visit_u64(unsigned) } fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> where E: serde::de::Error, { // ignore any `_` (used for digit grouping) let value = &value.replace('_', ""); let num = if value.starts_with("0x") {
{ panic!( "The {} bootloader must be compiled for the `{}` target.", firmware, expected_target, ); }
conditional_block
build.rs
use std::{ env, fs::{self, File}, io::Write, path::{Path, PathBuf}, process::{self, Command}, }; use toml::Value; let target = env::var("TARGET").expect("TARGET not set"); let (firmware, expected_target) = if cfg!(feature = "uefi_bin") { ("UEFI", "x86_64-unknown-uefi") } else if cfg!(feature = "bios_bin") { ("BIOS", "x86_64-bootloader") } else { panic!( "Either the `uefi_bin` or `bios_bin` feature must be enabled when \ the `binary` feature is enabled" ); }; if Path::new(&target) .file_stem() .expect("target has no file stem") != expected_target { panic!( "The {} bootloader must be compiled for the `{}` target.", firmware, expected_target, ); } let out_dir = PathBuf::from(env::var("OUT_DIR").expect("OUT_DIR not set")); let kernel = PathBuf::from(match env::var("KERNEL") { Ok(kernel) => kernel, Err(_) => { eprintln!( "The KERNEL environment variable must be set for building the bootloader.\n\n\ Please use the `cargo builder` command for building." ); process::exit(1); } }); let kernel_file_name = kernel .file_name() .expect("KERNEL has no valid file name") .to_str() .expect("kernel file name not valid utf8"); // check that the kernel file exists assert!( kernel.exists(), "KERNEL does not exist: {}", kernel.display() ); // get access to llvm tools shipped in the llvm-tools-preview rustup component let llvm_tools = match llvm_tools::LlvmTools::new() { Ok(tools) => tools, Err(llvm_tools::Error::NotFound) => { eprintln!("Error: llvm-tools not found"); eprintln!("Maybe the rustup component `llvm-tools-preview` is missing?"); eprintln!(" Install it through: `rustup component add llvm-tools-preview`"); process::exit(1); } Err(err) => { eprintln!("Failed to retrieve llvm-tools component: {:?}", err); process::exit(1); } }; // check that kernel executable has code in it let llvm_size = llvm_tools .tool(&llvm_tools::exe("llvm-size")) .expect("llvm-size not found in llvm-tools"); let mut cmd = Command::new(llvm_size); cmd.arg(&kernel); let output = cmd.output().expect("failed to run llvm-size"); let output_str = String::from_utf8_lossy(&output.stdout); let second_line_opt = output_str.lines().skip(1).next(); let second_line = second_line_opt.expect(&format!( "unexpected llvm-size line output:\n{}", output_str )); let text_size_opt = second_line.split_ascii_whitespace().next(); let text_size = text_size_opt.expect(&format!("unexpected llvm-size output:\n{}", output_str)); if text_size == "0" { panic!("Kernel executable has an empty text section. Perhaps the entry point was set incorrectly?\n\n\ Kernel executable at `{}`\n", kernel.display()); } // strip debug symbols from kernel for faster loading let stripped_kernel_file_name = format!("kernel_stripped-{}", kernel_file_name); let stripped_kernel = out_dir.join(&stripped_kernel_file_name); let objcopy = llvm_tools .tool(&llvm_tools::exe("llvm-objcopy")) .expect("llvm-objcopy not found in llvm-tools"); let mut cmd = Command::new(&objcopy); cmd.arg("--strip-debug"); cmd.arg(&kernel); cmd.arg(&stripped_kernel); let exit_status = cmd .status() .expect("failed to run objcopy to strip debug symbols"); if!exit_status.success() { eprintln!("Error: Stripping debug symbols failed"); process::exit(1); } if cfg!(feature = "uefi_bin") { // write file for including kernel in binary let file_path = out_dir.join("kernel_info.rs"); let mut file = File::create(file_path).expect("failed to create kernel_info.rs"); let kernel_size = fs::metadata(&stripped_kernel) .expect("Failed to read file metadata of stripped kernel") .len(); file.write_all( format!( "const KERNEL_SIZE: usize = {}; const KERNEL_BYTES: [u8; KERNEL_SIZE] = *include_bytes!(r\"{}\");", kernel_size, stripped_kernel.display(), ) .as_bytes(), ) .expect("write to kernel_info.rs failed"); } if cfg!(feature = "bios_bin") { // wrap the kernel executable as binary in a new ELF file let stripped_kernel_file_name_replaced = stripped_kernel_file_name .replace('-', "_") .replace('.', "_"); let kernel_bin = out_dir.join(format!("kernel_bin-{}.o", kernel_file_name)); let kernel_archive = out_dir.join(format!("libkernel_bin-{}.a", kernel_file_name)); let mut cmd = Command::new(&objcopy); cmd.arg("-I").arg("binary"); cmd.arg("-O").arg("elf64-x86-64"); cmd.arg("--binary-architecture=i386:x86-64"); cmd.arg("--rename-section").arg(".data=.kernel"); cmd.arg("--redefine-sym").arg(format!( "_binary_{}_start=_kernel_start_addr", stripped_kernel_file_name_replaced )); cmd.arg("--redefine-sym").arg(format!( "_binary_{}_end=_kernel_end_addr", stripped_kernel_file_name_replaced )); cmd.arg("--redefine-sym").arg(format!( "_binary_{}_size=_kernel_size", stripped_kernel_file_name_replaced )); cmd.current_dir(&out_dir); cmd.arg(&stripped_kernel_file_name); cmd.arg(&kernel_bin); let exit_status = cmd.status().expect("failed to run objcopy"); if!exit_status.success() { eprintln!("Error: Running objcopy failed"); process::exit(1); } // create an archive for linking let ar = llvm_tools .tool(&llvm_tools::exe("llvm-ar")) .unwrap_or_else(|| { eprintln!("Failed to retrieve llvm-ar component"); eprint!("This component is available since nightly-2019-03-29,"); eprintln!("so try updating your toolchain if you're using an older nightly"); process::exit(1); }); let mut cmd = Command::new(ar); cmd.arg("crs"); cmd.arg(&kernel_archive); cmd.arg(&kernel_bin); let exit_status = cmd.status().expect("failed to run ar"); if!exit_status.success() { eprintln!("Error: Running ar failed"); process::exit(1); } // pass link arguments to rustc println!("cargo:rustc-link-search=native={}", out_dir.display()); println!( "cargo:rustc-link-lib=static=kernel_bin-{}", kernel_file_name ); } // Parse configuration from the kernel's Cargo.toml let config = match env::var("KERNEL_MANIFEST") { Err(env::VarError::NotPresent) => { panic!("The KERNEL_MANIFEST environment variable must be set for building the bootloader.\n\n\ Please use `cargo builder` for building."); } Err(env::VarError::NotUnicode(_)) => { panic!("The KERNEL_MANIFEST environment variable contains invalid unicode") } Ok(path) if Path::new(&path).file_name().and_then(|s| s.to_str())!= Some("Cargo.toml") => { let err = format!( "The given `--kernel-manifest` path `{}` does not \ point to a `Cargo.toml`", path, ); quote! { compile_error!(#err) } } Ok(path) if!Path::new(&path).exists() => { let err = format!( "The given `--kernel-manifest` path `{}` does not exist.", path ); quote! { compile_error!(#err) } } Ok(path) => { println!("cargo:rerun-if-changed={}", path); let contents = fs::read_to_string(&path).expect(&format!( "failed to read kernel manifest file (path: {})", path )); let manifest = contents .parse::<Value>() .expect("failed to parse kernel's Cargo.toml"); if manifest .get("dependencies") .and_then(|d| d.get("bootloader")) .is_some() { // it seems to be the correct Cargo.toml let config_table = manifest .get("package") .and_then(|table| table.get("metadata")) .and_then(|table| table.get("bootloader")) .cloned() .unwrap_or_else(|| toml::Value::Table(toml::map::Map::new())); config_table .try_into::<ParsedConfig>() .map(|c| quote! { #c }) .unwrap_or_else(|err| { let err = format!( "failed to parse bootloader config in {}:\n\n{}", path, err.to_string() ); quote! { compile_error!(#err) } }) } else { let err = format!( "no bootloader dependency in {}\n\n The \ `--kernel-manifest` path should point to the `Cargo.toml` \ of the kernel.", path ); quote! { compile_error!(#err) } } } }; // Write config to file let file_path = out_dir.join("bootloader_config.rs"); let mut file = File::create(file_path).expect("failed to create bootloader_config.rs"); file.write_all( quote::quote! { mod parsed_config { use crate::config::Config; pub const CONFIG: Config = #config; } } .to_string() .as_bytes(), ) .expect("write to bootloader_config.rs failed"); println!("cargo:rerun-if-env-changed=KERNEL"); println!("cargo:rerun-if-env-changed=KERNEL_MANIFEST"); println!("cargo:rerun-if-changed={}", kernel.display()); println!("cargo:rerun-if-changed=build.rs"); } fn val_true() -> bool { true } /// Must be always identical with the struct in `src/config.rs` /// /// This copy is needed because we can't derive Deserialize in the `src/config.rs` /// module itself, since cargo currently unifies dependencies (the `toml` crate enables /// serde's standard feature). Also, it allows to separate the parsing special cases /// such as `AlignedAddress` more cleanly. #[derive(Debug, serde::Deserialize)] #[serde(rename_all = "kebab-case", deny_unknown_fields)] struct ParsedConfig { #[serde(default)] pub map_physical_memory: bool, #[serde(default)] pub map_page_table_recursively: bool, #[serde(default = "val_true")] pub map_framebuffer: bool, pub kernel_stack_size: Option<AlignedAddress>, pub physical_memory_offset: Option<AlignedAddress>, pub recursive_index: Option<u16>, pub kernel_stack_address: Option<AlignedAddress>, pub boot_info_address: Option<AlignedAddress>, pub framebuffer_address: Option<AlignedAddress>, } /// Convert to tokens suitable for initializing the `Config` struct. impl quote::ToTokens for ParsedConfig { fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { fn optional(value: Option<impl quote::ToTokens>) -> proc_macro2::TokenStream { value.map(|v| quote!(Some(#v))).unwrap_or(quote!(None)) } let map_physical_memory = self.map_physical_memory; let map_page_table_recursively = self.map_page_table_recursively; let map_framebuffer = self.map_framebuffer; let kernel_stack_size = optional(self.kernel_stack_size); let physical_memory_offset = optional(self.physical_memory_offset); let recursive_index = optional(self.recursive_index); let kernel_stack_address = optional(self.kernel_stack_address); let boot_info_address = optional(self.boot_info_address); let framebuffer_address = optional(self.framebuffer_address); tokens.extend(quote! { Config { map_physical_memory: #map_physical_memory, map_page_table_recursively: #map_page_table_recursively, map_framebuffer: #map_framebuffer, kernel_stack_size: #kernel_stack_size, physical_memory_offset: #physical_memory_offset, recursive_index: #recursive_index, kernel_stack_address: #kernel_stack_address, boot_info_address: #boot_info_address, framebuffer_address: #framebuffer_address, }}); } } #[derive(Debug, Clone, Copy)] struct AlignedAddress(u64); impl quote::ToTokens for AlignedAddress { fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { self.0.to_tokens(tokens); } } impl<'de> serde::Deserialize<'de> for AlignedAddress { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>, { deserializer.deserialize_str(AlignedAddressVisitor) } } /// Helper struct for implementing the `optional_version_deserialize` function. struct
; impl serde::de::Visitor<'_> for AlignedAddressVisitor { type Value = AlignedAddress; fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { write!( formatter, "a page-aligned memory address, either as integer or as decimal or hexadecimal \ string (e.g. \"0xffff0000\"); large addresses must be given as string because \ TOML does not support unsigned 64-bit integers" ) } fn visit_u64<E>(self, num: u64) -> Result<Self::Value, E> where E: serde::de::Error, { if num % 0x1000 == 0 { Ok(AlignedAddress(num)) } else { Err(serde::de::Error::custom(format!( "address {:#x} is not page aligned", num ))) } } fn visit_i64<E>(self, num: i64) -> Result<Self::Value, E> where E: serde::de::Error, { let unsigned: u64 = num .try_into() .map_err(|_| serde::de::Error::custom(format!("address {} is negative", num)))?; self.visit_u64(unsigned) } fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> where E: serde::de::Error, { // ignore any `_` (used for digit grouping) let value = &value.replace('_', ""); let num = if value.starts_with("0x") {
AlignedAddressVisitor
identifier_name
build.rs
use std::{ env, fs::{self, File}, io::Write, path::{Path, PathBuf}, process::{self, Command}, }; use toml::Value; let target = env::var("TARGET").expect("TARGET not set"); let (firmware, expected_target) = if cfg!(feature = "uefi_bin") { ("UEFI", "x86_64-unknown-uefi") } else if cfg!(feature = "bios_bin") { ("BIOS", "x86_64-bootloader") } else { panic!( "Either the `uefi_bin` or `bios_bin` feature must be enabled when \ the `binary` feature is enabled" ); }; if Path::new(&target) .file_stem() .expect("target has no file stem") != expected_target { panic!( "The {} bootloader must be compiled for the `{}` target.", firmware, expected_target, ); } let out_dir = PathBuf::from(env::var("OUT_DIR").expect("OUT_DIR not set")); let kernel = PathBuf::from(match env::var("KERNEL") { Ok(kernel) => kernel, Err(_) => { eprintln!( "The KERNEL environment variable must be set for building the bootloader.\n\n\ Please use the `cargo builder` command for building." ); process::exit(1); } }); let kernel_file_name = kernel .file_name() .expect("KERNEL has no valid file name") .to_str() .expect("kernel file name not valid utf8"); // check that the kernel file exists assert!( kernel.exists(), "KERNEL does not exist: {}", kernel.display() ); // get access to llvm tools shipped in the llvm-tools-preview rustup component let llvm_tools = match llvm_tools::LlvmTools::new() { Ok(tools) => tools, Err(llvm_tools::Error::NotFound) => { eprintln!("Error: llvm-tools not found"); eprintln!("Maybe the rustup component `llvm-tools-preview` is missing?"); eprintln!(" Install it through: `rustup component add llvm-tools-preview`"); process::exit(1); } Err(err) => { eprintln!("Failed to retrieve llvm-tools component: {:?}", err); process::exit(1); } }; // check that kernel executable has code in it let llvm_size = llvm_tools .tool(&llvm_tools::exe("llvm-size")) .expect("llvm-size not found in llvm-tools"); let mut cmd = Command::new(llvm_size); cmd.arg(&kernel); let output = cmd.output().expect("failed to run llvm-size"); let output_str = String::from_utf8_lossy(&output.stdout); let second_line_opt = output_str.lines().skip(1).next(); let second_line = second_line_opt.expect(&format!( "unexpected llvm-size line output:\n{}", output_str )); let text_size_opt = second_line.split_ascii_whitespace().next(); let text_size = text_size_opt.expect(&format!("unexpected llvm-size output:\n{}", output_str)); if text_size == "0" { panic!("Kernel executable has an empty text section. Perhaps the entry point was set incorrectly?\n\n\ Kernel executable at `{}`\n", kernel.display()); } // strip debug symbols from kernel for faster loading let stripped_kernel_file_name = format!("kernel_stripped-{}", kernel_file_name); let stripped_kernel = out_dir.join(&stripped_kernel_file_name); let objcopy = llvm_tools .tool(&llvm_tools::exe("llvm-objcopy")) .expect("llvm-objcopy not found in llvm-tools"); let mut cmd = Command::new(&objcopy); cmd.arg("--strip-debug"); cmd.arg(&kernel); cmd.arg(&stripped_kernel); let exit_status = cmd .status() .expect("failed to run objcopy to strip debug symbols"); if!exit_status.success() { eprintln!("Error: Stripping debug symbols failed"); process::exit(1); } if cfg!(feature = "uefi_bin") { // write file for including kernel in binary let file_path = out_dir.join("kernel_info.rs"); let mut file = File::create(file_path).expect("failed to create kernel_info.rs"); let kernel_size = fs::metadata(&stripped_kernel) .expect("Failed to read file metadata of stripped kernel") .len(); file.write_all( format!( "const KERNEL_SIZE: usize = {}; const KERNEL_BYTES: [u8; KERNEL_SIZE] = *include_bytes!(r\"{}\");", kernel_size, stripped_kernel.display(), ) .as_bytes(), ) .expect("write to kernel_info.rs failed"); } if cfg!(feature = "bios_bin") { // wrap the kernel executable as binary in a new ELF file let stripped_kernel_file_name_replaced = stripped_kernel_file_name .replace('-', "_") .replace('.', "_"); let kernel_bin = out_dir.join(format!("kernel_bin-{}.o", kernel_file_name)); let kernel_archive = out_dir.join(format!("libkernel_bin-{}.a", kernel_file_name)); let mut cmd = Command::new(&objcopy); cmd.arg("-I").arg("binary"); cmd.arg("-O").arg("elf64-x86-64"); cmd.arg("--binary-architecture=i386:x86-64"); cmd.arg("--rename-section").arg(".data=.kernel"); cmd.arg("--redefine-sym").arg(format!( "_binary_{}_start=_kernel_start_addr", stripped_kernel_file_name_replaced )); cmd.arg("--redefine-sym").arg(format!( "_binary_{}_end=_kernel_end_addr", stripped_kernel_file_name_replaced )); cmd.arg("--redefine-sym").arg(format!( "_binary_{}_size=_kernel_size", stripped_kernel_file_name_replaced )); cmd.current_dir(&out_dir); cmd.arg(&stripped_kernel_file_name); cmd.arg(&kernel_bin); let exit_status = cmd.status().expect("failed to run objcopy"); if!exit_status.success() { eprintln!("Error: Running objcopy failed"); process::exit(1); } // create an archive for linking let ar = llvm_tools .tool(&llvm_tools::exe("llvm-ar")) .unwrap_or_else(|| { eprintln!("Failed to retrieve llvm-ar component"); eprint!("This component is available since nightly-2019-03-29,"); eprintln!("so try updating your toolchain if you're using an older nightly"); process::exit(1); }); let mut cmd = Command::new(ar); cmd.arg("crs"); cmd.arg(&kernel_archive); cmd.arg(&kernel_bin); let exit_status = cmd.status().expect("failed to run ar"); if!exit_status.success() { eprintln!("Error: Running ar failed"); process::exit(1); } // pass link arguments to rustc println!("cargo:rustc-link-search=native={}", out_dir.display()); println!( "cargo:rustc-link-lib=static=kernel_bin-{}",
// Parse configuration from the kernel's Cargo.toml let config = match env::var("KERNEL_MANIFEST") { Err(env::VarError::NotPresent) => { panic!("The KERNEL_MANIFEST environment variable must be set for building the bootloader.\n\n\ Please use `cargo builder` for building."); } Err(env::VarError::NotUnicode(_)) => { panic!("The KERNEL_MANIFEST environment variable contains invalid unicode") } Ok(path) if Path::new(&path).file_name().and_then(|s| s.to_str())!= Some("Cargo.toml") => { let err = format!( "The given `--kernel-manifest` path `{}` does not \ point to a `Cargo.toml`", path, ); quote! { compile_error!(#err) } } Ok(path) if!Path::new(&path).exists() => { let err = format!( "The given `--kernel-manifest` path `{}` does not exist.", path ); quote! { compile_error!(#err) } } Ok(path) => { println!("cargo:rerun-if-changed={}", path); let contents = fs::read_to_string(&path).expect(&format!( "failed to read kernel manifest file (path: {})", path )); let manifest = contents .parse::<Value>() .expect("failed to parse kernel's Cargo.toml"); if manifest .get("dependencies") .and_then(|d| d.get("bootloader")) .is_some() { // it seems to be the correct Cargo.toml let config_table = manifest .get("package") .and_then(|table| table.get("metadata")) .and_then(|table| table.get("bootloader")) .cloned() .unwrap_or_else(|| toml::Value::Table(toml::map::Map::new())); config_table .try_into::<ParsedConfig>() .map(|c| quote! { #c }) .unwrap_or_else(|err| { let err = format!( "failed to parse bootloader config in {}:\n\n{}", path, err.to_string() ); quote! { compile_error!(#err) } }) } else { let err = format!( "no bootloader dependency in {}\n\n The \ `--kernel-manifest` path should point to the `Cargo.toml` \ of the kernel.", path ); quote! { compile_error!(#err) } } } }; // Write config to file let file_path = out_dir.join("bootloader_config.rs"); let mut file = File::create(file_path).expect("failed to create bootloader_config.rs"); file.write_all( quote::quote! { mod parsed_config { use crate::config::Config; pub const CONFIG: Config = #config; } } .to_string() .as_bytes(), ) .expect("write to bootloader_config.rs failed"); println!("cargo:rerun-if-env-changed=KERNEL"); println!("cargo:rerun-if-env-changed=KERNEL_MANIFEST"); println!("cargo:rerun-if-changed={}", kernel.display()); println!("cargo:rerun-if-changed=build.rs"); } fn val_true() -> bool { true } /// Must be always identical with the struct in `src/config.rs` /// /// This copy is needed because we can't derive Deserialize in the `src/config.rs` /// module itself, since cargo currently unifies dependencies (the `toml` crate enables /// serde's standard feature). Also, it allows to separate the parsing special cases /// such as `AlignedAddress` more cleanly. #[derive(Debug, serde::Deserialize)] #[serde(rename_all = "kebab-case", deny_unknown_fields)] struct ParsedConfig { #[serde(default)] pub map_physical_memory: bool, #[serde(default)] pub map_page_table_recursively: bool, #[serde(default = "val_true")] pub map_framebuffer: bool, pub kernel_stack_size: Option<AlignedAddress>, pub physical_memory_offset: Option<AlignedAddress>, pub recursive_index: Option<u16>, pub kernel_stack_address: Option<AlignedAddress>, pub boot_info_address: Option<AlignedAddress>, pub framebuffer_address: Option<AlignedAddress>, } /// Convert to tokens suitable for initializing the `Config` struct. impl quote::ToTokens for ParsedConfig { fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { fn optional(value: Option<impl quote::ToTokens>) -> proc_macro2::TokenStream { value.map(|v| quote!(Some(#v))).unwrap_or(quote!(None)) } let map_physical_memory = self.map_physical_memory; let map_page_table_recursively = self.map_page_table_recursively; let map_framebuffer = self.map_framebuffer; let kernel_stack_size = optional(self.kernel_stack_size); let physical_memory_offset = optional(self.physical_memory_offset); let recursive_index = optional(self.recursive_index); let kernel_stack_address = optional(self.kernel_stack_address); let boot_info_address = optional(self.boot_info_address); let framebuffer_address = optional(self.framebuffer_address); tokens.extend(quote! { Config { map_physical_memory: #map_physical_memory, map_page_table_recursively: #map_page_table_recursively, map_framebuffer: #map_framebuffer, kernel_stack_size: #kernel_stack_size, physical_memory_offset: #physical_memory_offset, recursive_index: #recursive_index, kernel_stack_address: #kernel_stack_address, boot_info_address: #boot_info_address, framebuffer_address: #framebuffer_address, }}); } } #[derive(Debug, Clone, Copy)] struct AlignedAddress(u64); impl quote::ToTokens for AlignedAddress { fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { self.0.to_tokens(tokens); } } impl<'de> serde::Deserialize<'de> for AlignedAddress { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>, { deserializer.deserialize_str(AlignedAddressVisitor) } } /// Helper struct for implementing the `optional_version_deserialize` function. struct AlignedAddressVisitor; impl serde::de::Visitor<'_> for AlignedAddressVisitor { type Value = AlignedAddress; fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { write!( formatter, "a page-aligned memory address, either as integer or as decimal or hexadecimal \ string (e.g. \"0xffff0000\"); large addresses must be given as string because \ TOML does not support unsigned 64-bit integers" ) } fn visit_u64<E>(self, num: u64) -> Result<Self::Value, E> where E: serde::de::Error, { if num % 0x1000 == 0 { Ok(AlignedAddress(num)) } else { Err(serde::de::Error::custom(format!( "address {:#x} is not page aligned", num ))) } } fn visit_i64<E>(self, num: i64) -> Result<Self::Value, E> where E: serde::de::Error, { let unsigned: u64 = num .try_into() .map_err(|_| serde::de::Error::custom(format!("address {} is negative", num)))?; self.visit_u64(unsigned) } fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> where E: serde::de::Error, { // ignore any `_` (used for digit grouping) let value = &value.replace('_', ""); let num = if value.starts_with("0x") {
kernel_file_name ); }
random_line_split
state.rs
use std::collections::HashMap; use num_traits::{AsPrimitive, Float}; use sdl2::{ keyboard::Keycode, pixels::Color, rect::{Point, Rect}, render::Canvas, video::Window, }; use crate::{ ext::ColorExt, key_state_handler::KeyStateHandler, map::Map, math::vector::Vec2D, WINDOW_HEIGHT, WINDOW_WIDTH, }; pub fn remap< T:'static + Float + Copy, ON: AsPrimitive<T> + Copy, OX: AsPrimitive<T> + Copy, NN: AsPrimitive<T> + Copy, NX: AsPrimitive<T> + Copy, >( value: T, old_min: ON, old_max: OX, new_min: NN, new_max: NX, ) -> T { new_min.as_() + (new_max.as_() - new_min.as_()) * ((value - old_min.as_()) / (old_max.as_() - old_min.as_())) } pub fn remap_minz< T:'static + Float + Copy + AsPrimitive<T>, OX: AsPrimitive<T> + Copy, NX: AsPrimitive<T> + Copy, >( value: T, old_max: OX, new_max: NX, ) -> T { remap(value, T::zero(), old_max, T::zero(), new_max) } const MOUSE_SENSITIVITY: f64 = 0.01; const MOVE_SPEED: f64 = 2.5; const PLAYER_WALL_PADDING: f64 = 10.; const WALL_ACTUAL_HEIGHT: f64 = 48.; pub struct State { pub(crate) position: Vec2D, pub(crate) angle: f64, pub(crate) fov: f64, pub(crate) wall_colors: Vec<Color>, pub(crate) map: Map, pub(crate) keys: KeyStateHandler, pub(crate) columns: Vec<(usize, u32)>, pub(crate) resolution: usize, pub(crate) projection_factor: f64, pub(crate) radian_per_column: f64, pub(crate) column_width: u32, } impl State { pub fn new() -> Self { let wall_colors = vec![ Color::RGB(128, 255, 0), Color::RGB(0, 128, 255), Color::RGB(255, 0, 128), Color::RGB(0, 255, 0), Color::RGB(0, 0, 255), Color::WHITE, ]; // let map = Map::default(); let map = Map::load("./assets/maps/many_walls.json").unwrap(); let (w, h) = map.dims; // let movement_vector = Line::new(origin, origin + geo::Point::new(delta_x, delta_y)); let position = Vec2D::new(w as f64 / 2., 50. + h as f64 / 2.); let fov = 60.; let projection_plane_distance = ((WINDOW_WIDTH / 2) as f64 / (fov.to_radians() / 2.).tan()) as f64; let resolution = WINDOW_WIDTH as usize; Self { position, angle: std::f64::consts::PI, fov, wall_colors, map, keys: KeyStateHandler::new(), columns: Vec::with_capacity(resolution), resolution, projection_factor: projection_plane_distance * WALL_ACTUAL_HEIGHT, radian_per_column: fov.to_radians() / resolution as f64, column_width: WINDOW_WIDTH / resolution as u32, } } fn get_color(&self, index: usize) -> Color
pub fn mouse_motion(&mut self, dx: i32) { self.angle += MOUSE_SENSITIVITY * dx as f64; } fn calculate_collisions(&mut self) { let mut current_angle = self.angle - (self.fov.to_radians() / 2.); let end_angle = current_angle + (self.radian_per_column * self.resolution as f64); self.columns.clear(); for _ in 0..self.resolution { let mut ray = Vec2D::from_angle(current_angle); ray.translate(&self.position); let mut max_height = f64::NEG_INFINITY; let mut wall_color_index = 0; for wall in self.map.walls.iter() { if let Some(intersection_vector) = ray.intersects(wall) { let raw_distance = ray.dist(&intersection_vector); let delta = current_angle - self.angle; let corrected_distance = raw_distance * (delta.cos() as f64); let projected_height = self.projection_factor / corrected_distance; if projected_height > max_height { max_height = projected_height; wall_color_index = wall.color_index; } } } if max_height.is_infinite() { self.columns.push((0, 0)); } else { self.columns .push((wall_color_index, max_height.round() as u32)); } current_angle += self.radian_per_column; } } fn update_camera(&mut self) { let mut delta = Vec2D::Origin; let par = Vec2D::from_angle(self.angle as f64); let perp = Vec2D::from_angle(self.angle + (90f64).to_radians()); if self.keys.is_pressed(Keycode::W) { delta += par; } if self.keys.is_pressed(Keycode::S) { delta += -par; } if self.keys.is_pressed(Keycode::A) { delta += -perp; } if self.keys.is_pressed(Keycode::D) { delta += perp; } // Normalize delta so that the player doesn't move faster moving in a diagonal direction if!delta.is_origin() { delta = delta.normalize() * MOVE_SPEED; } self.position.add_x_y_raw(delta.x_y()); self.position.clamp(self.map.dims, PLAYER_WALL_PADDING); } pub fn update(&mut self) { self.update_camera(); self.calculate_collisions(); } pub fn draw_minimap( &self, canvas: &mut Canvas<Window>, dims: (f64, f64), ) -> Result<(), String> { let minimap_offset = (dims.0.max(dims.1) / 4.); let minimap_base = Vec2D::new(minimap_offset, minimap_offset); // Background canvas.set_draw_color(Color::BLACK); canvas.fill_rect(Rect::new( minimap_offset as i32, minimap_offset as i32, dims.0 as u32, dims.1 as u32, ))?; canvas.set_draw_color(Color::WHITE); // Player position let position_mapped = self.position.remap(self.map.dims, dims) + minimap_base; canvas.fill_rect(Rect::from_center(position_mapped, 8, 8))?; // Player lines let ray_scale = dims.0.max(dims.1) / 2.; let half_fov = self.fov.to_radians() / 2.; let forward_end = position_mapped + (Vec2D::from_angle(self.angle) * ray_scale); let left_end = position_mapped + (Vec2D::from_angle(self.angle - half_fov) * ray_scale); let right_end = position_mapped + (Vec2D::from_angle(self.angle + half_fov) * ray_scale); canvas.draw_lines(&[ position_mapped.into(), forward_end.into(), position_mapped.into(), left_end.into(), position_mapped.into(), right_end.into(), ] as &[Point])?; // TODO: FOV lines // Walls for wall in self.map.walls.iter() { canvas.set_draw_color(self.get_color(wall.color_index)); let start = wall.a.remap(self.map.dims, dims) + minimap_base; let end = wall.b.remap(self.map.dims, dims) + minimap_base; canvas.draw_line(start, end)?; } // let mut current_angle = self.angle + (self.fov.to_radians() / 2.); // for _ in 0..self.resolution { // let mut ray = self.position; // ray.set_angle(current_angle); // ray += self.position; // let mut max_height = f64::NEG_INFINITY; // let mut collisions: Vec<(bool, Vec2D)> = vec![]; // let mut collision = Vec2D::Origin; // for wall in self.map.walls.iter() { // if let Some(intersection_vector) = ray.intersects(wall) { // let raw_distance = ray.dist(&intersection_vector); // let delta = current_angle - self.angle; // let corrected_distance = raw_distance * (delta.cos() as f64); // let projected_height = self.projection_factor / corrected_distance; // if projected_height > max_height { // max_height = projected_height; // collisions = collisions.into_iter().fold(vec![], |mut acc, cur| { // acc.push((false, cur.1)); // acc // }); // collisions.push((true, collision)); // collision = intersection_vector; // } else { // collisions.push((false, intersection_vector)); // } // } // } // if!max_height.is_infinite() { // canvas.set_draw_color(Color::RED); // canvas.draw_rects( // collisions // .into_iter() // .map(|(_, v)| { // Rect::from_center( // Point::new( // remap(v.x(), 0., self.map.dims.0, 0., dims.0).floor() as i32, // remap(v.y(), 0., self.map.dims.1, 0., dims.1).floor() as i32, // ) + minimap_base, // 2, // 2, // ) // }) // .collect::<Vec<Rect>>() // .as_slice(), // ); // } // current_angle -= self.radian_per_column; // } Ok(()) } fn render_frame(&self, canvas: &mut Canvas<Window>) -> Result<(), String> { let column_width_signed = self.column_width as i32; // TODO: Draw background let mut current_index = usize::MAX; let mut current_color = Color::BLACK; for (idx, (color_index, height)) in self.columns.iter().copied().enumerate() { if color_index!= current_index { current_color = self.get_color(color_index); current_index = color_index; } let dim_amt = remap(height as f64, 0, WINDOW_HEIGHT, 255, 0).floor() as u8; canvas.set_draw_color(current_color.dim(dim_amt)); canvas.fill_rect(Rect::from_center( Point::new( idx as i32 * column_width_signed + (column_width_signed / 2), WINDOW_HEIGHT as i32 / 2, ), self.column_width, height, ))?; } Ok(()) } pub fn draw(&self, canvas: &mut Canvas<Window>) -> Result<(), String> { self.render_frame(canvas)?; self.draw_minimap(canvas, (WINDOW_WIDTH as f64 / 5., WINDOW_WIDTH as f64 / 5.))?; Ok(()) } }
{ self.wall_colors.get(index).copied().unwrap_or(Color::WHITE) }
identifier_body
state.rs
use std::collections::HashMap; use num_traits::{AsPrimitive, Float}; use sdl2::{ keyboard::Keycode, pixels::Color, rect::{Point, Rect}, render::Canvas, video::Window, }; use crate::{ ext::ColorExt, key_state_handler::KeyStateHandler, map::Map, math::vector::Vec2D, WINDOW_HEIGHT, WINDOW_WIDTH, }; pub fn remap< T:'static + Float + Copy, ON: AsPrimitive<T> + Copy, OX: AsPrimitive<T> + Copy, NN: AsPrimitive<T> + Copy, NX: AsPrimitive<T> + Copy, >( value: T, old_min: ON, old_max: OX, new_min: NN, new_max: NX, ) -> T { new_min.as_() + (new_max.as_() - new_min.as_()) * ((value - old_min.as_()) / (old_max.as_() - old_min.as_())) } pub fn remap_minz< T:'static + Float + Copy + AsPrimitive<T>, OX: AsPrimitive<T> + Copy, NX: AsPrimitive<T> + Copy, >( value: T, old_max: OX, new_max: NX, ) -> T { remap(value, T::zero(), old_max, T::zero(), new_max) } const MOUSE_SENSITIVITY: f64 = 0.01; const MOVE_SPEED: f64 = 2.5; const PLAYER_WALL_PADDING: f64 = 10.; const WALL_ACTUAL_HEIGHT: f64 = 48.; pub struct State { pub(crate) position: Vec2D, pub(crate) angle: f64, pub(crate) fov: f64, pub(crate) wall_colors: Vec<Color>, pub(crate) map: Map, pub(crate) keys: KeyStateHandler, pub(crate) columns: Vec<(usize, u32)>, pub(crate) resolution: usize, pub(crate) projection_factor: f64, pub(crate) radian_per_column: f64, pub(crate) column_width: u32, } impl State { pub fn new() -> Self { let wall_colors = vec![ Color::RGB(128, 255, 0), Color::RGB(0, 128, 255), Color::RGB(255, 0, 128), Color::RGB(0, 255, 0), Color::RGB(0, 0, 255), Color::WHITE, ]; // let map = Map::default(); let map = Map::load("./assets/maps/many_walls.json").unwrap(); let (w, h) = map.dims; // let movement_vector = Line::new(origin, origin + geo::Point::new(delta_x, delta_y)); let position = Vec2D::new(w as f64 / 2., 50. + h as f64 / 2.); let fov = 60.; let projection_plane_distance = ((WINDOW_WIDTH / 2) as f64 / (fov.to_radians() / 2.).tan()) as f64; let resolution = WINDOW_WIDTH as usize; Self { position, angle: std::f64::consts::PI, fov, wall_colors, map, keys: KeyStateHandler::new(), columns: Vec::with_capacity(resolution), resolution, projection_factor: projection_plane_distance * WALL_ACTUAL_HEIGHT, radian_per_column: fov.to_radians() / resolution as f64, column_width: WINDOW_WIDTH / resolution as u32, } } fn get_color(&self, index: usize) -> Color { self.wall_colors.get(index).copied().unwrap_or(Color::WHITE) } pub fn mouse_motion(&mut self, dx: i32) { self.angle += MOUSE_SENSITIVITY * dx as f64; } fn calculate_collisions(&mut self) { let mut current_angle = self.angle - (self.fov.to_radians() / 2.); let end_angle = current_angle + (self.radian_per_column * self.resolution as f64); self.columns.clear(); for _ in 0..self.resolution { let mut ray = Vec2D::from_angle(current_angle); ray.translate(&self.position); let mut max_height = f64::NEG_INFINITY; let mut wall_color_index = 0; for wall in self.map.walls.iter() { if let Some(intersection_vector) = ray.intersects(wall) { let raw_distance = ray.dist(&intersection_vector); let delta = current_angle - self.angle; let corrected_distance = raw_distance * (delta.cos() as f64); let projected_height = self.projection_factor / corrected_distance; if projected_height > max_height { max_height = projected_height; wall_color_index = wall.color_index; } } } if max_height.is_infinite() { self.columns.push((0, 0)); } else { self.columns .push((wall_color_index, max_height.round() as u32)); } current_angle += self.radian_per_column; } } fn update_camera(&mut self) { let mut delta = Vec2D::Origin; let par = Vec2D::from_angle(self.angle as f64); let perp = Vec2D::from_angle(self.angle + (90f64).to_radians()); if self.keys.is_pressed(Keycode::W) { delta += par; } if self.keys.is_pressed(Keycode::S) { delta += -par; } if self.keys.is_pressed(Keycode::A) { delta += -perp; } if self.keys.is_pressed(Keycode::D) { delta += perp; } // Normalize delta so that the player doesn't move faster moving in a diagonal direction if!delta.is_origin() { delta = delta.normalize() * MOVE_SPEED; }
pub fn update(&mut self) { self.update_camera(); self.calculate_collisions(); } pub fn draw_minimap( &self, canvas: &mut Canvas<Window>, dims: (f64, f64), ) -> Result<(), String> { let minimap_offset = (dims.0.max(dims.1) / 4.); let minimap_base = Vec2D::new(minimap_offset, minimap_offset); // Background canvas.set_draw_color(Color::BLACK); canvas.fill_rect(Rect::new( minimap_offset as i32, minimap_offset as i32, dims.0 as u32, dims.1 as u32, ))?; canvas.set_draw_color(Color::WHITE); // Player position let position_mapped = self.position.remap(self.map.dims, dims) + minimap_base; canvas.fill_rect(Rect::from_center(position_mapped, 8, 8))?; // Player lines let ray_scale = dims.0.max(dims.1) / 2.; let half_fov = self.fov.to_radians() / 2.; let forward_end = position_mapped + (Vec2D::from_angle(self.angle) * ray_scale); let left_end = position_mapped + (Vec2D::from_angle(self.angle - half_fov) * ray_scale); let right_end = position_mapped + (Vec2D::from_angle(self.angle + half_fov) * ray_scale); canvas.draw_lines(&[ position_mapped.into(), forward_end.into(), position_mapped.into(), left_end.into(), position_mapped.into(), right_end.into(), ] as &[Point])?; // TODO: FOV lines // Walls for wall in self.map.walls.iter() { canvas.set_draw_color(self.get_color(wall.color_index)); let start = wall.a.remap(self.map.dims, dims) + minimap_base; let end = wall.b.remap(self.map.dims, dims) + minimap_base; canvas.draw_line(start, end)?; } // let mut current_angle = self.angle + (self.fov.to_radians() / 2.); // for _ in 0..self.resolution { // let mut ray = self.position; // ray.set_angle(current_angle); // ray += self.position; // let mut max_height = f64::NEG_INFINITY; // let mut collisions: Vec<(bool, Vec2D)> = vec![]; // let mut collision = Vec2D::Origin; // for wall in self.map.walls.iter() { // if let Some(intersection_vector) = ray.intersects(wall) { // let raw_distance = ray.dist(&intersection_vector); // let delta = current_angle - self.angle; // let corrected_distance = raw_distance * (delta.cos() as f64); // let projected_height = self.projection_factor / corrected_distance; // if projected_height > max_height { // max_height = projected_height; // collisions = collisions.into_iter().fold(vec![], |mut acc, cur| { // acc.push((false, cur.1)); // acc // }); // collisions.push((true, collision)); // collision = intersection_vector; // } else { // collisions.push((false, intersection_vector)); // } // } // } // if!max_height.is_infinite() { // canvas.set_draw_color(Color::RED); // canvas.draw_rects( // collisions // .into_iter() // .map(|(_, v)| { // Rect::from_center( // Point::new( // remap(v.x(), 0., self.map.dims.0, 0., dims.0).floor() as i32, // remap(v.y(), 0., self.map.dims.1, 0., dims.1).floor() as i32, // ) + minimap_base, // 2, // 2, // ) // }) // .collect::<Vec<Rect>>() // .as_slice(), // ); // } // current_angle -= self.radian_per_column; // } Ok(()) } fn render_frame(&self, canvas: &mut Canvas<Window>) -> Result<(), String> { let column_width_signed = self.column_width as i32; // TODO: Draw background let mut current_index = usize::MAX; let mut current_color = Color::BLACK; for (idx, (color_index, height)) in self.columns.iter().copied().enumerate() { if color_index!= current_index { current_color = self.get_color(color_index); current_index = color_index; } let dim_amt = remap(height as f64, 0, WINDOW_HEIGHT, 255, 0).floor() as u8; canvas.set_draw_color(current_color.dim(dim_amt)); canvas.fill_rect(Rect::from_center( Point::new( idx as i32 * column_width_signed + (column_width_signed / 2), WINDOW_HEIGHT as i32 / 2, ), self.column_width, height, ))?; } Ok(()) } pub fn draw(&self, canvas: &mut Canvas<Window>) -> Result<(), String> { self.render_frame(canvas)?; self.draw_minimap(canvas, (WINDOW_WIDTH as f64 / 5., WINDOW_WIDTH as f64 / 5.))?; Ok(()) } }
self.position.add_x_y_raw(delta.x_y()); self.position.clamp(self.map.dims, PLAYER_WALL_PADDING); }
random_line_split
state.rs
use std::collections::HashMap; use num_traits::{AsPrimitive, Float}; use sdl2::{ keyboard::Keycode, pixels::Color, rect::{Point, Rect}, render::Canvas, video::Window, }; use crate::{ ext::ColorExt, key_state_handler::KeyStateHandler, map::Map, math::vector::Vec2D, WINDOW_HEIGHT, WINDOW_WIDTH, }; pub fn remap< T:'static + Float + Copy, ON: AsPrimitive<T> + Copy, OX: AsPrimitive<T> + Copy, NN: AsPrimitive<T> + Copy, NX: AsPrimitive<T> + Copy, >( value: T, old_min: ON, old_max: OX, new_min: NN, new_max: NX, ) -> T { new_min.as_() + (new_max.as_() - new_min.as_()) * ((value - old_min.as_()) / (old_max.as_() - old_min.as_())) } pub fn remap_minz< T:'static + Float + Copy + AsPrimitive<T>, OX: AsPrimitive<T> + Copy, NX: AsPrimitive<T> + Copy, >( value: T, old_max: OX, new_max: NX, ) -> T { remap(value, T::zero(), old_max, T::zero(), new_max) } const MOUSE_SENSITIVITY: f64 = 0.01; const MOVE_SPEED: f64 = 2.5; const PLAYER_WALL_PADDING: f64 = 10.; const WALL_ACTUAL_HEIGHT: f64 = 48.; pub struct State { pub(crate) position: Vec2D, pub(crate) angle: f64, pub(crate) fov: f64, pub(crate) wall_colors: Vec<Color>, pub(crate) map: Map, pub(crate) keys: KeyStateHandler, pub(crate) columns: Vec<(usize, u32)>, pub(crate) resolution: usize, pub(crate) projection_factor: f64, pub(crate) radian_per_column: f64, pub(crate) column_width: u32, } impl State { pub fn new() -> Self { let wall_colors = vec![ Color::RGB(128, 255, 0), Color::RGB(0, 128, 255), Color::RGB(255, 0, 128), Color::RGB(0, 255, 0), Color::RGB(0, 0, 255), Color::WHITE, ]; // let map = Map::default(); let map = Map::load("./assets/maps/many_walls.json").unwrap(); let (w, h) = map.dims; // let movement_vector = Line::new(origin, origin + geo::Point::new(delta_x, delta_y)); let position = Vec2D::new(w as f64 / 2., 50. + h as f64 / 2.); let fov = 60.; let projection_plane_distance = ((WINDOW_WIDTH / 2) as f64 / (fov.to_radians() / 2.).tan()) as f64; let resolution = WINDOW_WIDTH as usize; Self { position, angle: std::f64::consts::PI, fov, wall_colors, map, keys: KeyStateHandler::new(), columns: Vec::with_capacity(resolution), resolution, projection_factor: projection_plane_distance * WALL_ACTUAL_HEIGHT, radian_per_column: fov.to_radians() / resolution as f64, column_width: WINDOW_WIDTH / resolution as u32, } } fn get_color(&self, index: usize) -> Color { self.wall_colors.get(index).copied().unwrap_or(Color::WHITE) } pub fn mouse_motion(&mut self, dx: i32) { self.angle += MOUSE_SENSITIVITY * dx as f64; } fn calculate_collisions(&mut self) { let mut current_angle = self.angle - (self.fov.to_radians() / 2.); let end_angle = current_angle + (self.radian_per_column * self.resolution as f64); self.columns.clear(); for _ in 0..self.resolution { let mut ray = Vec2D::from_angle(current_angle); ray.translate(&self.position); let mut max_height = f64::NEG_INFINITY; let mut wall_color_index = 0; for wall in self.map.walls.iter() { if let Some(intersection_vector) = ray.intersects(wall)
} if max_height.is_infinite() { self.columns.push((0, 0)); } else { self.columns .push((wall_color_index, max_height.round() as u32)); } current_angle += self.radian_per_column; } } fn update_camera(&mut self) { let mut delta = Vec2D::Origin; let par = Vec2D::from_angle(self.angle as f64); let perp = Vec2D::from_angle(self.angle + (90f64).to_radians()); if self.keys.is_pressed(Keycode::W) { delta += par; } if self.keys.is_pressed(Keycode::S) { delta += -par; } if self.keys.is_pressed(Keycode::A) { delta += -perp; } if self.keys.is_pressed(Keycode::D) { delta += perp; } // Normalize delta so that the player doesn't move faster moving in a diagonal direction if!delta.is_origin() { delta = delta.normalize() * MOVE_SPEED; } self.position.add_x_y_raw(delta.x_y()); self.position.clamp(self.map.dims, PLAYER_WALL_PADDING); } pub fn update(&mut self) { self.update_camera(); self.calculate_collisions(); } pub fn draw_minimap( &self, canvas: &mut Canvas<Window>, dims: (f64, f64), ) -> Result<(), String> { let minimap_offset = (dims.0.max(dims.1) / 4.); let minimap_base = Vec2D::new(minimap_offset, minimap_offset); // Background canvas.set_draw_color(Color::BLACK); canvas.fill_rect(Rect::new( minimap_offset as i32, minimap_offset as i32, dims.0 as u32, dims.1 as u32, ))?; canvas.set_draw_color(Color::WHITE); // Player position let position_mapped = self.position.remap(self.map.dims, dims) + minimap_base; canvas.fill_rect(Rect::from_center(position_mapped, 8, 8))?; // Player lines let ray_scale = dims.0.max(dims.1) / 2.; let half_fov = self.fov.to_radians() / 2.; let forward_end = position_mapped + (Vec2D::from_angle(self.angle) * ray_scale); let left_end = position_mapped + (Vec2D::from_angle(self.angle - half_fov) * ray_scale); let right_end = position_mapped + (Vec2D::from_angle(self.angle + half_fov) * ray_scale); canvas.draw_lines(&[ position_mapped.into(), forward_end.into(), position_mapped.into(), left_end.into(), position_mapped.into(), right_end.into(), ] as &[Point])?; // TODO: FOV lines // Walls for wall in self.map.walls.iter() { canvas.set_draw_color(self.get_color(wall.color_index)); let start = wall.a.remap(self.map.dims, dims) + minimap_base; let end = wall.b.remap(self.map.dims, dims) + minimap_base; canvas.draw_line(start, end)?; } // let mut current_angle = self.angle + (self.fov.to_radians() / 2.); // for _ in 0..self.resolution { // let mut ray = self.position; // ray.set_angle(current_angle); // ray += self.position; // let mut max_height = f64::NEG_INFINITY; // let mut collisions: Vec<(bool, Vec2D)> = vec![]; // let mut collision = Vec2D::Origin; // for wall in self.map.walls.iter() { // if let Some(intersection_vector) = ray.intersects(wall) { // let raw_distance = ray.dist(&intersection_vector); // let delta = current_angle - self.angle; // let corrected_distance = raw_distance * (delta.cos() as f64); // let projected_height = self.projection_factor / corrected_distance; // if projected_height > max_height { // max_height = projected_height; // collisions = collisions.into_iter().fold(vec![], |mut acc, cur| { // acc.push((false, cur.1)); // acc // }); // collisions.push((true, collision)); // collision = intersection_vector; // } else { // collisions.push((false, intersection_vector)); // } // } // } // if!max_height.is_infinite() { // canvas.set_draw_color(Color::RED); // canvas.draw_rects( // collisions // .into_iter() // .map(|(_, v)| { // Rect::from_center( // Point::new( // remap(v.x(), 0., self.map.dims.0, 0., dims.0).floor() as i32, // remap(v.y(), 0., self.map.dims.1, 0., dims.1).floor() as i32, // ) + minimap_base, // 2, // 2, // ) // }) // .collect::<Vec<Rect>>() // .as_slice(), // ); // } // current_angle -= self.radian_per_column; // } Ok(()) } fn render_frame(&self, canvas: &mut Canvas<Window>) -> Result<(), String> { let column_width_signed = self.column_width as i32; // TODO: Draw background let mut current_index = usize::MAX; let mut current_color = Color::BLACK; for (idx, (color_index, height)) in self.columns.iter().copied().enumerate() { if color_index!= current_index { current_color = self.get_color(color_index); current_index = color_index; } let dim_amt = remap(height as f64, 0, WINDOW_HEIGHT, 255, 0).floor() as u8; canvas.set_draw_color(current_color.dim(dim_amt)); canvas.fill_rect(Rect::from_center( Point::new( idx as i32 * column_width_signed + (column_width_signed / 2), WINDOW_HEIGHT as i32 / 2, ), self.column_width, height, ))?; } Ok(()) } pub fn draw(&self, canvas: &mut Canvas<Window>) -> Result<(), String> { self.render_frame(canvas)?; self.draw_minimap(canvas, (WINDOW_WIDTH as f64 / 5., WINDOW_WIDTH as f64 / 5.))?; Ok(()) } }
{ let raw_distance = ray.dist(&intersection_vector); let delta = current_angle - self.angle; let corrected_distance = raw_distance * (delta.cos() as f64); let projected_height = self.projection_factor / corrected_distance; if projected_height > max_height { max_height = projected_height; wall_color_index = wall.color_index; } }
conditional_block
state.rs
use std::collections::HashMap; use num_traits::{AsPrimitive, Float}; use sdl2::{ keyboard::Keycode, pixels::Color, rect::{Point, Rect}, render::Canvas, video::Window, }; use crate::{ ext::ColorExt, key_state_handler::KeyStateHandler, map::Map, math::vector::Vec2D, WINDOW_HEIGHT, WINDOW_WIDTH, }; pub fn remap< T:'static + Float + Copy, ON: AsPrimitive<T> + Copy, OX: AsPrimitive<T> + Copy, NN: AsPrimitive<T> + Copy, NX: AsPrimitive<T> + Copy, >( value: T, old_min: ON, old_max: OX, new_min: NN, new_max: NX, ) -> T { new_min.as_() + (new_max.as_() - new_min.as_()) * ((value - old_min.as_()) / (old_max.as_() - old_min.as_())) } pub fn remap_minz< T:'static + Float + Copy + AsPrimitive<T>, OX: AsPrimitive<T> + Copy, NX: AsPrimitive<T> + Copy, >( value: T, old_max: OX, new_max: NX, ) -> T { remap(value, T::zero(), old_max, T::zero(), new_max) } const MOUSE_SENSITIVITY: f64 = 0.01; const MOVE_SPEED: f64 = 2.5; const PLAYER_WALL_PADDING: f64 = 10.; const WALL_ACTUAL_HEIGHT: f64 = 48.; pub struct State { pub(crate) position: Vec2D, pub(crate) angle: f64, pub(crate) fov: f64, pub(crate) wall_colors: Vec<Color>, pub(crate) map: Map, pub(crate) keys: KeyStateHandler, pub(crate) columns: Vec<(usize, u32)>, pub(crate) resolution: usize, pub(crate) projection_factor: f64, pub(crate) radian_per_column: f64, pub(crate) column_width: u32, } impl State { pub fn new() -> Self { let wall_colors = vec![ Color::RGB(128, 255, 0), Color::RGB(0, 128, 255), Color::RGB(255, 0, 128), Color::RGB(0, 255, 0), Color::RGB(0, 0, 255), Color::WHITE, ]; // let map = Map::default(); let map = Map::load("./assets/maps/many_walls.json").unwrap(); let (w, h) = map.dims; // let movement_vector = Line::new(origin, origin + geo::Point::new(delta_x, delta_y)); let position = Vec2D::new(w as f64 / 2., 50. + h as f64 / 2.); let fov = 60.; let projection_plane_distance = ((WINDOW_WIDTH / 2) as f64 / (fov.to_radians() / 2.).tan()) as f64; let resolution = WINDOW_WIDTH as usize; Self { position, angle: std::f64::consts::PI, fov, wall_colors, map, keys: KeyStateHandler::new(), columns: Vec::with_capacity(resolution), resolution, projection_factor: projection_plane_distance * WALL_ACTUAL_HEIGHT, radian_per_column: fov.to_radians() / resolution as f64, column_width: WINDOW_WIDTH / resolution as u32, } } fn get_color(&self, index: usize) -> Color { self.wall_colors.get(index).copied().unwrap_or(Color::WHITE) } pub fn mouse_motion(&mut self, dx: i32) { self.angle += MOUSE_SENSITIVITY * dx as f64; } fn calculate_collisions(&mut self) { let mut current_angle = self.angle - (self.fov.to_radians() / 2.); let end_angle = current_angle + (self.radian_per_column * self.resolution as f64); self.columns.clear(); for _ in 0..self.resolution { let mut ray = Vec2D::from_angle(current_angle); ray.translate(&self.position); let mut max_height = f64::NEG_INFINITY; let mut wall_color_index = 0; for wall in self.map.walls.iter() { if let Some(intersection_vector) = ray.intersects(wall) { let raw_distance = ray.dist(&intersection_vector); let delta = current_angle - self.angle; let corrected_distance = raw_distance * (delta.cos() as f64); let projected_height = self.projection_factor / corrected_distance; if projected_height > max_height { max_height = projected_height; wall_color_index = wall.color_index; } } } if max_height.is_infinite() { self.columns.push((0, 0)); } else { self.columns .push((wall_color_index, max_height.round() as u32)); } current_angle += self.radian_per_column; } } fn update_camera(&mut self) { let mut delta = Vec2D::Origin; let par = Vec2D::from_angle(self.angle as f64); let perp = Vec2D::from_angle(self.angle + (90f64).to_radians()); if self.keys.is_pressed(Keycode::W) { delta += par; } if self.keys.is_pressed(Keycode::S) { delta += -par; } if self.keys.is_pressed(Keycode::A) { delta += -perp; } if self.keys.is_pressed(Keycode::D) { delta += perp; } // Normalize delta so that the player doesn't move faster moving in a diagonal direction if!delta.is_origin() { delta = delta.normalize() * MOVE_SPEED; } self.position.add_x_y_raw(delta.x_y()); self.position.clamp(self.map.dims, PLAYER_WALL_PADDING); } pub fn update(&mut self) { self.update_camera(); self.calculate_collisions(); } pub fn draw_minimap( &self, canvas: &mut Canvas<Window>, dims: (f64, f64), ) -> Result<(), String> { let minimap_offset = (dims.0.max(dims.1) / 4.); let minimap_base = Vec2D::new(minimap_offset, minimap_offset); // Background canvas.set_draw_color(Color::BLACK); canvas.fill_rect(Rect::new( minimap_offset as i32, minimap_offset as i32, dims.0 as u32, dims.1 as u32, ))?; canvas.set_draw_color(Color::WHITE); // Player position let position_mapped = self.position.remap(self.map.dims, dims) + minimap_base; canvas.fill_rect(Rect::from_center(position_mapped, 8, 8))?; // Player lines let ray_scale = dims.0.max(dims.1) / 2.; let half_fov = self.fov.to_radians() / 2.; let forward_end = position_mapped + (Vec2D::from_angle(self.angle) * ray_scale); let left_end = position_mapped + (Vec2D::from_angle(self.angle - half_fov) * ray_scale); let right_end = position_mapped + (Vec2D::from_angle(self.angle + half_fov) * ray_scale); canvas.draw_lines(&[ position_mapped.into(), forward_end.into(), position_mapped.into(), left_end.into(), position_mapped.into(), right_end.into(), ] as &[Point])?; // TODO: FOV lines // Walls for wall in self.map.walls.iter() { canvas.set_draw_color(self.get_color(wall.color_index)); let start = wall.a.remap(self.map.dims, dims) + minimap_base; let end = wall.b.remap(self.map.dims, dims) + minimap_base; canvas.draw_line(start, end)?; } // let mut current_angle = self.angle + (self.fov.to_radians() / 2.); // for _ in 0..self.resolution { // let mut ray = self.position; // ray.set_angle(current_angle); // ray += self.position; // let mut max_height = f64::NEG_INFINITY; // let mut collisions: Vec<(bool, Vec2D)> = vec![]; // let mut collision = Vec2D::Origin; // for wall in self.map.walls.iter() { // if let Some(intersection_vector) = ray.intersects(wall) { // let raw_distance = ray.dist(&intersection_vector); // let delta = current_angle - self.angle; // let corrected_distance = raw_distance * (delta.cos() as f64); // let projected_height = self.projection_factor / corrected_distance; // if projected_height > max_height { // max_height = projected_height; // collisions = collisions.into_iter().fold(vec![], |mut acc, cur| { // acc.push((false, cur.1)); // acc // }); // collisions.push((true, collision)); // collision = intersection_vector; // } else { // collisions.push((false, intersection_vector)); // } // } // } // if!max_height.is_infinite() { // canvas.set_draw_color(Color::RED); // canvas.draw_rects( // collisions // .into_iter() // .map(|(_, v)| { // Rect::from_center( // Point::new( // remap(v.x(), 0., self.map.dims.0, 0., dims.0).floor() as i32, // remap(v.y(), 0., self.map.dims.1, 0., dims.1).floor() as i32, // ) + minimap_base, // 2, // 2, // ) // }) // .collect::<Vec<Rect>>() // .as_slice(), // ); // } // current_angle -= self.radian_per_column; // } Ok(()) } fn render_frame(&self, canvas: &mut Canvas<Window>) -> Result<(), String> { let column_width_signed = self.column_width as i32; // TODO: Draw background let mut current_index = usize::MAX; let mut current_color = Color::BLACK; for (idx, (color_index, height)) in self.columns.iter().copied().enumerate() { if color_index!= current_index { current_color = self.get_color(color_index); current_index = color_index; } let dim_amt = remap(height as f64, 0, WINDOW_HEIGHT, 255, 0).floor() as u8; canvas.set_draw_color(current_color.dim(dim_amt)); canvas.fill_rect(Rect::from_center( Point::new( idx as i32 * column_width_signed + (column_width_signed / 2), WINDOW_HEIGHT as i32 / 2, ), self.column_width, height, ))?; } Ok(()) } pub fn
(&self, canvas: &mut Canvas<Window>) -> Result<(), String> { self.render_frame(canvas)?; self.draw_minimap(canvas, (WINDOW_WIDTH as f64 / 5., WINDOW_WIDTH as f64 / 5.))?; Ok(()) } }
draw
identifier_name
opdb.rs
/* @author: xiao cai niao @datetime: 2019/11/6 */ use actix_web::{web}; use crate::webroute::route::{HostInfo, PostUserInfo, EditInfo, EditMainTain}; use crate::storage::rocks::{DbInfo, KeyValue, CfNameTypeCode, PrefixTypeCode}; use crate::ha::procotol::{DownNodeCheck, RecoveryInfo, ReplicationState, MysqlMonitorStatus}; use std::error::Error; use crate::ha::nodes_manager::{SlaveInfo}; use serde::{Serialize, Deserialize}; use crate::rand_string; use crate::ha::procotol::MysqlState; use crate::ha::sys_manager::MonitorSetting; use crate::webroute::new_route::ResponseMonitorStatic; use std::str::from_utf8; /// /// mysql node info, insert to rocksdb /// /// pub fn insert_mysql_host_info(data: web::Data<DbInfo>, info: &web::Json<HostInfo>) -> Result<(), Box<dyn Error>> { let check_unique = data.get(&info.host, &CfNameTypeCode::HaNodesInfo.get()); match check_unique { Ok(v) => { if v.value.len() > 0 { let a = format!("this key: ({}) already exists in the database",&info.host); return Err(a.into()); } } _ => {} } let v = HostInfoValue::new(info)?; v.save(&data)?; //初始化节点监控配置 let monitor_info = MonitorSetting::new(&info.host); monitor_info.save(&data)?; Ok(()) } #[derive(Serialize, Deserialize, Debug)] pub struct HaChangeLog { pub key: String, //格式 host_timestamp host为宕机节点 pub cluster_name: String, pub old_master_info: DownNodeCheck, //宕机节点信息 pub new_master_binlog_info: SlaveInfo, //如果宕机节点在切换之前未进行binlog追加将保存新master读取到的binlog信息,在宕机节点恢复时会进行判断回滚 pub recovery_info: RecoveryInfo, //宕机恢复同步所需的新master信息 pub recovery_status: bool, //是否已恢复 pub switch_status: bool, //切换状态 } impl HaChangeLog { pub fn new() -> HaChangeLog { HaChangeLog{ key: "".to_string(), cluster_name: "".to_string(), old_master_info: DownNodeCheck { host: "".to_string(), dbport: 0 }, new_master_binlog_info: SlaveInfo { host: "".to_string(), dbport: 0, slave_info: ReplicationState { log_name: "".to_string(), read_log_pos: 0, exec_log_pos: 0 }, new_master: false }, recovery_info: RecoveryInfo { binlog: "".to_string(), position: 0, gtid: "".to_string(), masterhost: "".to_string(), masterport: 0, read_binlog: "".to_string(), read_position: 0 }, recovery_status: false, switch_status: false } } pub fn save(&self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>> { let key = format!("{}_{}",self.key.clone(), crate::timestamp()); let value = serde_json::to_string(self)?; let row = KeyValue{key, value}; db.put(&row, &CfNameTypeCode::HaChangeLog.get())?; return Ok(()); } pub fn update(&mut self, db: &web::Data<DbInfo>, row_key: String) -> Result<(), Box<dyn Error>> { let value = serde_json::to_string(self)?; let row = KeyValue{key: row_key, value}; db.put(&row, &CfNameTypeCode::HaChangeLog.get())?; return Ok(()); } } /// /// /// /// 用户信息结构 #[derive(Serialize, Deserialize, Clone, Debug)] pub struct UserInfo { pub user_name: String, pub password: String, pub hook_id: String, pub create_time: i64, pub update_time: i64 } impl UserInfo { pub fn new(info: &PostUserInfo) -> UserInfo { let create_time = crate::timestamp(); let update_time = crate::timestamp(); UserInfo{ user_name: info.user_name.clone(), password: info.password.clone(), hook_id: rand_string(), create_time, update_time } } } /// /// /// /// 节点基础信息, host做为key #[derive(Serialize, Deserialize, Debug, Clone)] pub struct HostInfoValue { pub host: String, //127.0.0.1:3306 pub dbport: usize, //default 3306 pub rtype: String, //db、route pub cluster_name: String, //集群名称,route类型默认default pub online: bool, //db是否在线, true、false pub insert_time: i64, pub update_time: i64, pub maintain: bool, //是否处于维护模式,true、false } impl HostInfoValue { pub fn new(info: &HostInfo) -> Result<HostInfoValue, Box<dyn Error>> { let h = HostInfoValue{ host: info.host.clone(), rtype: info.rtype.clone(), dbport: info.dbport.clone(), cluster_name: info.cluster_name.clone(), online: false, insert_time: crate::timestamp(), update_time: crate::timestamp(), maintain: false }; Ok(h) } /// /// 写入db pub fn save(&self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>> { let value = serde_json::to_string(&self)?; let row = KeyValue{key: self.host.clone(), value}; db.put(&row, &CfNameTypeCode::HaNodesInfo.get())?; Ok(()) } /// /// 编辑节点信息 pub fn edit(&mut self, info: &web::Json<EditInfo>) { self.host = info.host.clone(); self.dbport = info.dbport.clone(); self.cluster_name = info.cluster_name.clone(); self.update_time = crate::timestamp(); } /// /// 设置节点维护模式状态 pub fn maintain(&mut self, info: &web::Json<EditMainTain>) { if info.maintain == "true".to_string() { self.maintain = false; }else { self.maintain = true; } self.update_time = crate::timestamp(); } /// /// 获取当前节点在db中保存的状态信息 pub fn get_state(&self, db: &web::Data<DbInfo>) -> Result<MysqlState, Box<dyn Error>> { let kv = db.get(&self.host, &CfNameTypeCode::NodesState.get())?; if kv.value.len() > 0 { let state: MysqlState = serde_json::from_str(&kv.value)?; return Ok(state); }else { //let err = format!("this host: {} no state data", &self.host); //return Err(err.into()); let state = MysqlState::new(); return Ok(state); } } pub fn get_role(&self, db: &web::Data<DbInfo>) -> Result<String, Box<dyn Error>> { let state = self.get_state(db)?; Ok(state.role) } } /// /// /// /// /// 获取db中现有的cluster列表 #[derive(Serialize, Deserialize, Debug)] pub struct NodeClusterList{ pub cluster_name_list: Vec<String> } impl NodeClusterList{ pub fn new() -> NodeClusterList{ NodeClusterList { cluster_name_list: vec![] } } pub fn init(&mut self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>> { let result = db.iterator(&CfNameTypeCode::HaNodesInfo.get(), &String::from(""))?; for row in &result{ let value: HostInfoValue = serde_json::from_str(&row.value)?; if self.is_exists(&value.cluster_name){continue;} self.cluster_name_list.push(value.cluster_name.clone()); } Ok(()) } fn is_exists(&self, cluster_name: &String) -> bool { for cl in &self.cluster_name_list { if cl == cluster_name{ return true; } } return false; } } /// 获取route信息中现有的cluster列表 #[derive(Serialize, Deserialize, Debug)] pub struct RouteClusterList{ pub cluster_name_list: Vec<String> } impl RouteClusterList{ pub fn new() -> RouteClusterList { RouteClusterList{ cluster_name_list: vec![] } } pub fn init(&mut self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>>{ let route_all = db.get_route_all()?; for route in &route_all { if!self.is_exists(&route.value.cluster_name){ self.cluster_name_list.push(route.value.cluster_name.clone()); } } Ok(()) } fn is_exists(&self, cluster_name: &String) -> bool { for cl in &self.cluster_name_list { if cl == cluster_name { return true; } } return false; } } /// /// /// /// /// node节点信息 #[derive(Deserialize, Serialize, Debug)] pub struct NodeInfo{ pub cluster_name: String, pub host: String, pub dbport: usize, pub online: bool, //是否在线, true、false pub maintain: bool, //是否处于维护模式,true、false pub role: String, //主从角色 pub master: String, pub sql_thread: bool, pub io_thread: bool, pub seconds_behind: usize, pub read_only: bool, pub version: String, pub executed_gtid_set: String, pub innodb_flush_log_at_trx_commit: usize, pub sync_binlog: usize, pub server_id: usize, pub event_scheduler: String, pub sql_error: String } impl NodeInfo{ pub fn new(state: &MysqlState, node: &HostInfoValue) -> NodeInfo { let mut ni = NodeInfo{ cluster_name: node.cluster_name.clone(), host: node.host.clone(), dbport: node.dbport.clone(), online: node.online.clone(), maintain: node.maintain.clone(), role: state.role.clone(), master: state.master.clone(), sql_thread: state.sql_thread.clone(), io_thread: state.io_thread.clone(), seconds_behind: state.seconds_behind.clone(), read_only: state.read_only.clone(), version: state.version.clone(), executed_gtid_set: state.executed_gtid_set.clone(), innodb_flush_log_at_trx_commit: state.innodb_flush_log_at_trx_commit.clone(), sync_binlog: state.sync_binlog.clone(), server_id: state.server_id.clone(), event_scheduler: state.event_scheduler.clone(), sql_error: "".to_string() }; if state.last_io_error.len() > 0{ ni.sql_error = state.last_io_error.clone(); }else if state.last_sql_error.len() > 0 { ni.sql_error = state.last_sql_error.clone(); } return ni; } } /// /// /// /// /// 每个集群节点信息 #[derive(Deserialize, Serialize, Debug)] pub struct ClusterNodeInfo{ pub cluster_name: String, pub total: usize, pub nodes_info: Vec<NodeInfo> } impl ClusterNodeInfo{ pub fn new(cluster_name: &String) -> ClusterNodeInfo{ ClusterNodeInfo{ cluster_name: cluster_name.clone(), total: 0, nodes_info: vec![] } } pub fn init(&mut self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>>{ let result = db.iterator(&CfNameTypeCode::HaNodesInfo.get(), &String::from(""))?; for row in &result{ let node: HostInfoValue = serde_json::from_str(&row.value)?; if &node.cluster_name == &self.cluster_name{ let state = node.get_state(db)?; let node_info = NodeInfo::new(&state, &node); self.total += 1; self.nodes_info.push(node_info); } } Ok(()) } /// /// 统计所有节点监控信息, 用于首页展示 /// /// 倒叙迭代获取每个节点最后一条数据, 如果每个节点都已获取最后一条数据就退出迭代 pub fn static_monitor(&self, db: &web::Data<DbInfo>, rsm: &mut ResponseMonitorStatic) -> Result<(), Box<dyn Error>> { let cf_name = CfNameTypeCode::SystemData.get(); let mut tmp: Vec<String> = vec![]; //首先检查是否开启监控 for node in &self.nodes_info{ if!self.check_monitor_setting(db, &node.host){ tmp.push(node.host.clone()); } } if let Some(cf) = db.db.cf_handle(&cf_name){ let mut iter = db.db.raw_iterator_cf(cf)?; iter.seek_to_last(); iter.prev(); 'all: while iter.valid() { if tmp.len() == self.nodes_info.len(){ break 'all; } if let Some(s) = iter.key(){ let key: String = from_utf8(&s.to_vec())?.parse()?; if key.starts_with(&PrefixTypeCode::NodeMonitorData.prefix()){ 'b: for n in &self.nodes_info{ if key.contains(n.host.as_str()){ for t in &tmp{ if t == &n.host{ break 'b; } } if let Some(v) = iter.value(){ let v: MysqlMonitorStatus = serde_json::from_slice(&v)?; rsm.update(&v); tmp.push(n.host.clone()); } break 'b; } } } } // // iter.prev(); } } Ok(()) } fn check_monitor_setting(&self, db: &web::Data<DbInfo>, host: &String) -> bool{ let a = db.prefix_get(&PrefixTypeCode::NodeMonitorSeting, host); match a { Ok(v) => { if v.value.len() > 0{ let value: MonitorSetting = serde_json::from_str(&v.value).unwrap(); return value.monitor.clone(); } } Err(e) => { info!("{}", e.to_string()); } } return false; } } /// /// impl MysqlState{ pub fn save(&self, db: &web::Data<DbInfo>, key: &String) -> Result<(), Box<dyn Error>> { let value = serde_json::to_string(&self)?; let a = KeyValue{key: key.clone(), value }; db.put(&a, &CfNameTyp
pub cluster_name: String, pub delay: usize } impl SlaveBehindSetting{ pub fn new(cluster_name: &String) -> SlaveBehindSetting { SlaveBehindSetting{ cluster_name: cluster_name.clone(), delay: 100 } } pub fn save(&self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>>{ db.prefix_put(&PrefixTypeCode::SlaveDelaySeting, &self.cluster_name, &self)?; Ok(()) } }
eCode::NodesState.get())?; Ok(()) } } /// /// /// slave behind 配置结构体 #[derive(Serialize, Deserialize, Debug)] pub struct SlaveBehindSetting{
conditional_block
opdb.rs
/* @author: xiao cai niao @datetime: 2019/11/6 */ use actix_web::{web}; use crate::webroute::route::{HostInfo, PostUserInfo, EditInfo, EditMainTain}; use crate::storage::rocks::{DbInfo, KeyValue, CfNameTypeCode, PrefixTypeCode}; use crate::ha::procotol::{DownNodeCheck, RecoveryInfo, ReplicationState, MysqlMonitorStatus}; use std::error::Error; use crate::ha::nodes_manager::{SlaveInfo}; use serde::{Serialize, Deserialize}; use crate::rand_string; use crate::ha::procotol::MysqlState; use crate::ha::sys_manager::MonitorSetting; use crate::webroute::new_route::ResponseMonitorStatic; use std::str::from_utf8; /// /// mysql node info, insert to rocksdb /// /// pub fn in
ata: web::Data<DbInfo>, info: &web::Json<HostInfo>) -> Result<(), Box<dyn Error>> { let check_unique = data.get(&info.host, &CfNameTypeCode::HaNodesInfo.get()); match check_unique { Ok(v) => { if v.value.len() > 0 { let a = format!("this key: ({}) already exists in the database",&info.host); return Err(a.into()); } } _ => {} } let v = HostInfoValue::new(info)?; v.save(&data)?; //初始化节点监控配置 let monitor_info = MonitorSetting::new(&info.host); monitor_info.save(&data)?; Ok(()) } #[derive(Serialize, Deserialize, Debug)] pub struct HaChangeLog { pub key: String, //格式 host_timestamp host为宕机节点 pub cluster_name: String, pub old_master_info: DownNodeCheck, //宕机节点信息 pub new_master_binlog_info: SlaveInfo, //如果宕机节点在切换之前未进行binlog追加将保存新master读取到的binlog信息,在宕机节点恢复时会进行判断回滚 pub recovery_info: RecoveryInfo, //宕机恢复同步所需的新master信息 pub recovery_status: bool, //是否已恢复 pub switch_status: bool, //切换状态 } impl HaChangeLog { pub fn new() -> HaChangeLog { HaChangeLog{ key: "".to_string(), cluster_name: "".to_string(), old_master_info: DownNodeCheck { host: "".to_string(), dbport: 0 }, new_master_binlog_info: SlaveInfo { host: "".to_string(), dbport: 0, slave_info: ReplicationState { log_name: "".to_string(), read_log_pos: 0, exec_log_pos: 0 }, new_master: false }, recovery_info: RecoveryInfo { binlog: "".to_string(), position: 0, gtid: "".to_string(), masterhost: "".to_string(), masterport: 0, read_binlog: "".to_string(), read_position: 0 }, recovery_status: false, switch_status: false } } pub fn save(&self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>> { let key = format!("{}_{}",self.key.clone(), crate::timestamp()); let value = serde_json::to_string(self)?; let row = KeyValue{key, value}; db.put(&row, &CfNameTypeCode::HaChangeLog.get())?; return Ok(()); } pub fn update(&mut self, db: &web::Data<DbInfo>, row_key: String) -> Result<(), Box<dyn Error>> { let value = serde_json::to_string(self)?; let row = KeyValue{key: row_key, value}; db.put(&row, &CfNameTypeCode::HaChangeLog.get())?; return Ok(()); } } /// /// /// /// 用户信息结构 #[derive(Serialize, Deserialize, Clone, Debug)] pub struct UserInfo { pub user_name: String, pub password: String, pub hook_id: String, pub create_time: i64, pub update_time: i64 } impl UserInfo { pub fn new(info: &PostUserInfo) -> UserInfo { let create_time = crate::timestamp(); let update_time = crate::timestamp(); UserInfo{ user_name: info.user_name.clone(), password: info.password.clone(), hook_id: rand_string(), create_time, update_time } } } /// /// /// /// 节点基础信息, host做为key #[derive(Serialize, Deserialize, Debug, Clone)] pub struct HostInfoValue { pub host: String, //127.0.0.1:3306 pub dbport: usize, //default 3306 pub rtype: String, //db、route pub cluster_name: String, //集群名称,route类型默认default pub online: bool, //db是否在线, true、false pub insert_time: i64, pub update_time: i64, pub maintain: bool, //是否处于维护模式,true、false } impl HostInfoValue { pub fn new(info: &HostInfo) -> Result<HostInfoValue, Box<dyn Error>> { let h = HostInfoValue{ host: info.host.clone(), rtype: info.rtype.clone(), dbport: info.dbport.clone(), cluster_name: info.cluster_name.clone(), online: false, insert_time: crate::timestamp(), update_time: crate::timestamp(), maintain: false }; Ok(h) } /// /// 写入db pub fn save(&self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>> { let value = serde_json::to_string(&self)?; let row = KeyValue{key: self.host.clone(), value}; db.put(&row, &CfNameTypeCode::HaNodesInfo.get())?; Ok(()) } /// /// 编辑节点信息 pub fn edit(&mut self, info: &web::Json<EditInfo>) { self.host = info.host.clone(); self.dbport = info.dbport.clone(); self.cluster_name = info.cluster_name.clone(); self.update_time = crate::timestamp(); } /// /// 设置节点维护模式状态 pub fn maintain(&mut self, info: &web::Json<EditMainTain>) { if info.maintain == "true".to_string() { self.maintain = false; }else { self.maintain = true; } self.update_time = crate::timestamp(); } /// /// 获取当前节点在db中保存的状态信息 pub fn get_state(&self, db: &web::Data<DbInfo>) -> Result<MysqlState, Box<dyn Error>> { let kv = db.get(&self.host, &CfNameTypeCode::NodesState.get())?; if kv.value.len() > 0 { let state: MysqlState = serde_json::from_str(&kv.value)?; return Ok(state); }else { //let err = format!("this host: {} no state data", &self.host); //return Err(err.into()); let state = MysqlState::new(); return Ok(state); } } pub fn get_role(&self, db: &web::Data<DbInfo>) -> Result<String, Box<dyn Error>> { let state = self.get_state(db)?; Ok(state.role) } } /// /// /// /// /// 获取db中现有的cluster列表 #[derive(Serialize, Deserialize, Debug)] pub struct NodeClusterList{ pub cluster_name_list: Vec<String> } impl NodeClusterList{ pub fn new() -> NodeClusterList{ NodeClusterList { cluster_name_list: vec![] } } pub fn init(&mut self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>> { let result = db.iterator(&CfNameTypeCode::HaNodesInfo.get(), &String::from(""))?; for row in &result{ let value: HostInfoValue = serde_json::from_str(&row.value)?; if self.is_exists(&value.cluster_name){continue;} self.cluster_name_list.push(value.cluster_name.clone()); } Ok(()) } fn is_exists(&self, cluster_name: &String) -> bool { for cl in &self.cluster_name_list { if cl == cluster_name{ return true; } } return false; } } /// 获取route信息中现有的cluster列表 #[derive(Serialize, Deserialize, Debug)] pub struct RouteClusterList{ pub cluster_name_list: Vec<String> } impl RouteClusterList{ pub fn new() -> RouteClusterList { RouteClusterList{ cluster_name_list: vec![] } } pub fn init(&mut self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>>{ let route_all = db.get_route_all()?; for route in &route_all { if!self.is_exists(&route.value.cluster_name){ self.cluster_name_list.push(route.value.cluster_name.clone()); } } Ok(()) } fn is_exists(&self, cluster_name: &String) -> bool { for cl in &self.cluster_name_list { if cl == cluster_name { return true; } } return false; } } /// /// /// /// /// node节点信息 #[derive(Deserialize, Serialize, Debug)] pub struct NodeInfo{ pub cluster_name: String, pub host: String, pub dbport: usize, pub online: bool, //是否在线, true、false pub maintain: bool, //是否处于维护模式,true、false pub role: String, //主从角色 pub master: String, pub sql_thread: bool, pub io_thread: bool, pub seconds_behind: usize, pub read_only: bool, pub version: String, pub executed_gtid_set: String, pub innodb_flush_log_at_trx_commit: usize, pub sync_binlog: usize, pub server_id: usize, pub event_scheduler: String, pub sql_error: String } impl NodeInfo{ pub fn new(state: &MysqlState, node: &HostInfoValue) -> NodeInfo { let mut ni = NodeInfo{ cluster_name: node.cluster_name.clone(), host: node.host.clone(), dbport: node.dbport.clone(), online: node.online.clone(), maintain: node.maintain.clone(), role: state.role.clone(), master: state.master.clone(), sql_thread: state.sql_thread.clone(), io_thread: state.io_thread.clone(), seconds_behind: state.seconds_behind.clone(), read_only: state.read_only.clone(), version: state.version.clone(), executed_gtid_set: state.executed_gtid_set.clone(), innodb_flush_log_at_trx_commit: state.innodb_flush_log_at_trx_commit.clone(), sync_binlog: state.sync_binlog.clone(), server_id: state.server_id.clone(), event_scheduler: state.event_scheduler.clone(), sql_error: "".to_string() }; if state.last_io_error.len() > 0{ ni.sql_error = state.last_io_error.clone(); }else if state.last_sql_error.len() > 0 { ni.sql_error = state.last_sql_error.clone(); } return ni; } } /// /// /// /// /// 每个集群节点信息 #[derive(Deserialize, Serialize, Debug)] pub struct ClusterNodeInfo{ pub cluster_name: String, pub total: usize, pub nodes_info: Vec<NodeInfo> } impl ClusterNodeInfo{ pub fn new(cluster_name: &String) -> ClusterNodeInfo{ ClusterNodeInfo{ cluster_name: cluster_name.clone(), total: 0, nodes_info: vec![] } } pub fn init(&mut self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>>{ let result = db.iterator(&CfNameTypeCode::HaNodesInfo.get(), &String::from(""))?; for row in &result{ let node: HostInfoValue = serde_json::from_str(&row.value)?; if &node.cluster_name == &self.cluster_name{ let state = node.get_state(db)?; let node_info = NodeInfo::new(&state, &node); self.total += 1; self.nodes_info.push(node_info); } } Ok(()) } /// /// 统计所有节点监控信息, 用于首页展示 /// /// 倒叙迭代获取每个节点最后一条数据, 如果每个节点都已获取最后一条数据就退出迭代 pub fn static_monitor(&self, db: &web::Data<DbInfo>, rsm: &mut ResponseMonitorStatic) -> Result<(), Box<dyn Error>> { let cf_name = CfNameTypeCode::SystemData.get(); let mut tmp: Vec<String> = vec![]; //首先检查是否开启监控 for node in &self.nodes_info{ if!self.check_monitor_setting(db, &node.host){ tmp.push(node.host.clone()); } } if let Some(cf) = db.db.cf_handle(&cf_name){ let mut iter = db.db.raw_iterator_cf(cf)?; iter.seek_to_last(); iter.prev(); 'all: while iter.valid() { if tmp.len() == self.nodes_info.len(){ break 'all; } if let Some(s) = iter.key(){ let key: String = from_utf8(&s.to_vec())?.parse()?; if key.starts_with(&PrefixTypeCode::NodeMonitorData.prefix()){ 'b: for n in &self.nodes_info{ if key.contains(n.host.as_str()){ for t in &tmp{ if t == &n.host{ break 'b; } } if let Some(v) = iter.value(){ let v: MysqlMonitorStatus = serde_json::from_slice(&v)?; rsm.update(&v); tmp.push(n.host.clone()); } break 'b; } } } } // // iter.prev(); } } Ok(()) } fn check_monitor_setting(&self, db: &web::Data<DbInfo>, host: &String) -> bool{ let a = db.prefix_get(&PrefixTypeCode::NodeMonitorSeting, host); match a { Ok(v) => { if v.value.len() > 0{ let value: MonitorSetting = serde_json::from_str(&v.value).unwrap(); return value.monitor.clone(); } } Err(e) => { info!("{}", e.to_string()); } } return false; } } /// /// impl MysqlState{ pub fn save(&self, db: &web::Data<DbInfo>, key: &String) -> Result<(), Box<dyn Error>> { let value = serde_json::to_string(&self)?; let a = KeyValue{key: key.clone(), value }; db.put(&a, &CfNameTypeCode::NodesState.get())?; Ok(()) } } /// /// /// slave behind 配置结构体 #[derive(Serialize, Deserialize, Debug)] pub struct SlaveBehindSetting{ pub cluster_name: String, pub delay: usize } impl SlaveBehindSetting{ pub fn new(cluster_name: &String) -> SlaveBehindSetting { SlaveBehindSetting{ cluster_name: cluster_name.clone(), delay: 100 } } pub fn save(&self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>>{ db.prefix_put(&PrefixTypeCode::SlaveDelaySeting, &self.cluster_name, &self)?; Ok(()) } }
sert_mysql_host_info(d
identifier_name
opdb.rs
/* @author: xiao cai niao @datetime: 2019/11/6 */ use actix_web::{web}; use crate::webroute::route::{HostInfo, PostUserInfo, EditInfo, EditMainTain}; use crate::storage::rocks::{DbInfo, KeyValue, CfNameTypeCode, PrefixTypeCode}; use crate::ha::procotol::{DownNodeCheck, RecoveryInfo, ReplicationState, MysqlMonitorStatus}; use std::error::Error; use crate::ha::nodes_manager::{SlaveInfo}; use serde::{Serialize, Deserialize}; use crate::rand_string; use crate::ha::procotol::MysqlState; use crate::ha::sys_manager::MonitorSetting; use crate::webroute::new_route::ResponseMonitorStatic; use std::str::from_utf8; /// /// mysql node info, insert to rocksdb /// /// pub fn insert_mysql_host_info(data: web::Data<DbInfo>, info: &web::Json<HostInfo>) -> Result<(), Box<dyn Error>> { let check_unique = data.get(&info.host, &CfNameTypeCode::HaNodesInfo.get()); match check_unique { Ok(v) => { if v.value.len() > 0 { let a = format!("this key: ({}) already exists in the database",&info.host); return Err(a.into()); } } _ => {} } let v = HostInfoValue::new(info)?; v.save(&data)?; //初始化节点监控配置 let monitor_info = MonitorSetting::new(&info.host); monitor_info.save(&data)?; Ok(()) } #[derive(Serialize, Deserialize, Debug)] pub struct HaChangeLog { pub key: String, //格式 host_timestamp host为宕机节点 pub cluster_name: String, pub old_master_info: DownNodeCheck, //宕机节点信息 pub new_master_binlog_info: SlaveInfo, //如果宕机节点在切换之前未进行binlog追加将保存新master读取到的binlog信息,在宕机节点恢复时会进行判断回滚 pub recovery_info: RecoveryInfo, //宕机恢复同步所需的新master信息 pub recovery_status: bool, //是否已恢复 pub switch_status: bool, //切换状态 } impl HaChangeLog { pub fn new() -> HaChangeLog { HaChangeLog{ key: "".to_string(), cluster_name: "".to_string(), old_master_info: DownNodeCheck { host: "".to_string(), dbport: 0 }, new_master_binlog_info: SlaveInfo { host: "".to_string(), dbport: 0, slave_info: ReplicationState { log_name: "".to_string(), read_log_pos: 0, exec_log_pos: 0 }, new_master: false }, recovery_info: RecoveryInfo { binlog: "".to_string(), position: 0, gtid: "".to_string(), masterhost: "".to_string(), masterport: 0, read_binlog: "".to_string(), read_position: 0 }, recovery_status: false, switch_status: false } } pub fn save(&self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>> { let key = format!("{}_{}",self.key.clone(), crate::timestamp()); let value = serde_json::to_string(self)?; let row = KeyValue{key, value}; db.put(&row, &CfNameTypeCode::HaChangeLog.get())?; return Ok(()); } pub fn update(&mut self, db: &web::Data<DbInfo>, row_key: String) -> Result<(), Box<dyn Error>> { let value = serde_json::to_string(self)?; let row = KeyValue{key: row_key, value}; db.put(&row, &CfNameTypeCode::HaChangeLog.get())?; return Ok(()); } } /// /// /// /// 用户信息结构 #[derive(Serialize, Deserialize, Clone, Debug)] pub struct UserInfo { pub user_name: String, pub password: String, pub hook_id: String, pub create_time: i64, pub update_time: i64 } impl UserInfo { pub fn new(info: &PostUserInfo) -> UserInfo { let create_time = crate::timestamp(); let update_time = crate::timestamp(); UserInfo{ user_name: info.user_name.clone(), password: info.password.clone(), hook_id: rand_string(), create_time, update_time } } } /// /// /// /// 节点基础信息, host做为key #[derive(Serialize, Deserialize, Debug, Clone)] pub struct HostInfoValue { pub host: String, //127.0.0.1:3306 pub dbport: usize, //default 3306 pub rtype: String, //db、route pub cluster_name: String, //集群名称,route类型默认default pub online: bool, //db是否在线, true、false pub insert_time: i64, pub update_time: i64, pub maintain: bool, //是否处于维护模式,true、false } impl HostInfoValue { pub fn new(info: &HostInfo) -> Result<HostInfoValue, Box<dyn Error>> { let h = HostInfoValue{ host: info.host.clone(), rtype: info.rtype.clone(), dbport: info.dbport.clone(), cluster_name: info.cluster_name.clone(), online: false, insert_time: crate::timestamp(), update_time: crate::timestamp(), maintain: false }; Ok(h) } /// /// 写入db pub fn save(&self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>> { let value = serde_json::to_string(&self)?; let row = KeyValue{key: self.host.clone(), value}; db.put(&row, &CfNameTypeCode::HaNodesInfo.get())?; Ok(()) } /// /// 编辑节点信息 pub fn edit(&mut self, info: &web::Json<EditInfo>) { self.host = info.host.clone(); self.dbport = info.dbport.clone(); self.cluster_name = info.cluster_name.clone(); self.update_time = crate::timestamp(); } /// /// 设置节点维护模式状态 pub fn maintain(&mut self, info: &web::Json<EditMainTain>) {
} self.update_time = crate::timestamp(); } /// /// 获取当前节点在db中保存的状态信息 pub fn get_state(&self, db: &web::Data<DbInfo>) -> Result<MysqlState, Box<dyn Error>> { let kv = db.get(&self.host, &CfNameTypeCode::NodesState.get())?; if kv.value.len() > 0 { let state: MysqlState = serde_json::from_str(&kv.value)?; return Ok(state); }else { //let err = format!("this host: {} no state data", &self.host); //return Err(err.into()); let state = MysqlState::new(); return Ok(state); } } pub fn get_role(&self, db: &web::Data<DbInfo>) -> Result<String, Box<dyn Error>> { let state = self.get_state(db)?; Ok(state.role) } } /// /// /// /// /// 获取db中现有的cluster列表 #[derive(Serialize, Deserialize, Debug)] pub struct NodeClusterList{ pub cluster_name_list: Vec<String> } impl NodeClusterList{ pub fn new() -> NodeClusterList{ NodeClusterList { cluster_name_list: vec![] } } pub fn init(&mut self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>> { let result = db.iterator(&CfNameTypeCode::HaNodesInfo.get(), &String::from(""))?; for row in &result{ let value: HostInfoValue = serde_json::from_str(&row.value)?; if self.is_exists(&value.cluster_name){continue;} self.cluster_name_list.push(value.cluster_name.clone()); } Ok(()) } fn is_exists(&self, cluster_name: &String) -> bool { for cl in &self.cluster_name_list { if cl == cluster_name{ return true; } } return false; } } /// 获取route信息中现有的cluster列表 #[derive(Serialize, Deserialize, Debug)] pub struct RouteClusterList{ pub cluster_name_list: Vec<String> } impl RouteClusterList{ pub fn new() -> RouteClusterList { RouteClusterList{ cluster_name_list: vec![] } } pub fn init(&mut self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>>{ let route_all = db.get_route_all()?; for route in &route_all { if!self.is_exists(&route.value.cluster_name){ self.cluster_name_list.push(route.value.cluster_name.clone()); } } Ok(()) } fn is_exists(&self, cluster_name: &String) -> bool { for cl in &self.cluster_name_list { if cl == cluster_name { return true; } } return false; } } /// /// /// /// /// node节点信息 #[derive(Deserialize, Serialize, Debug)] pub struct NodeInfo{ pub cluster_name: String, pub host: String, pub dbport: usize, pub online: bool, //是否在线, true、false pub maintain: bool, //是否处于维护模式,true、false pub role: String, //主从角色 pub master: String, pub sql_thread: bool, pub io_thread: bool, pub seconds_behind: usize, pub read_only: bool, pub version: String, pub executed_gtid_set: String, pub innodb_flush_log_at_trx_commit: usize, pub sync_binlog: usize, pub server_id: usize, pub event_scheduler: String, pub sql_error: String } impl NodeInfo{ pub fn new(state: &MysqlState, node: &HostInfoValue) -> NodeInfo { let mut ni = NodeInfo{ cluster_name: node.cluster_name.clone(), host: node.host.clone(), dbport: node.dbport.clone(), online: node.online.clone(), maintain: node.maintain.clone(), role: state.role.clone(), master: state.master.clone(), sql_thread: state.sql_thread.clone(), io_thread: state.io_thread.clone(), seconds_behind: state.seconds_behind.clone(), read_only: state.read_only.clone(), version: state.version.clone(), executed_gtid_set: state.executed_gtid_set.clone(), innodb_flush_log_at_trx_commit: state.innodb_flush_log_at_trx_commit.clone(), sync_binlog: state.sync_binlog.clone(), server_id: state.server_id.clone(), event_scheduler: state.event_scheduler.clone(), sql_error: "".to_string() }; if state.last_io_error.len() > 0{ ni.sql_error = state.last_io_error.clone(); }else if state.last_sql_error.len() > 0 { ni.sql_error = state.last_sql_error.clone(); } return ni; } } /// /// /// /// /// 每个集群节点信息 #[derive(Deserialize, Serialize, Debug)] pub struct ClusterNodeInfo{ pub cluster_name: String, pub total: usize, pub nodes_info: Vec<NodeInfo> } impl ClusterNodeInfo{ pub fn new(cluster_name: &String) -> ClusterNodeInfo{ ClusterNodeInfo{ cluster_name: cluster_name.clone(), total: 0, nodes_info: vec![] } } pub fn init(&mut self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>>{ let result = db.iterator(&CfNameTypeCode::HaNodesInfo.get(), &String::from(""))?; for row in &result{ let node: HostInfoValue = serde_json::from_str(&row.value)?; if &node.cluster_name == &self.cluster_name{ let state = node.get_state(db)?; let node_info = NodeInfo::new(&state, &node); self.total += 1; self.nodes_info.push(node_info); } } Ok(()) } /// /// 统计所有节点监控信息, 用于首页展示 /// /// 倒叙迭代获取每个节点最后一条数据, 如果每个节点都已获取最后一条数据就退出迭代 pub fn static_monitor(&self, db: &web::Data<DbInfo>, rsm: &mut ResponseMonitorStatic) -> Result<(), Box<dyn Error>> { let cf_name = CfNameTypeCode::SystemData.get(); let mut tmp: Vec<String> = vec![]; //首先检查是否开启监控 for node in &self.nodes_info{ if!self.check_monitor_setting(db, &node.host){ tmp.push(node.host.clone()); } } if let Some(cf) = db.db.cf_handle(&cf_name){ let mut iter = db.db.raw_iterator_cf(cf)?; iter.seek_to_last(); iter.prev(); 'all: while iter.valid() { if tmp.len() == self.nodes_info.len(){ break 'all; } if let Some(s) = iter.key(){ let key: String = from_utf8(&s.to_vec())?.parse()?; if key.starts_with(&PrefixTypeCode::NodeMonitorData.prefix()){ 'b: for n in &self.nodes_info{ if key.contains(n.host.as_str()){ for t in &tmp{ if t == &n.host{ break 'b; } } if let Some(v) = iter.value(){ let v: MysqlMonitorStatus = serde_json::from_slice(&v)?; rsm.update(&v); tmp.push(n.host.clone()); } break 'b; } } } } // // iter.prev(); } } Ok(()) } fn check_monitor_setting(&self, db: &web::Data<DbInfo>, host: &String) -> bool{ let a = db.prefix_get(&PrefixTypeCode::NodeMonitorSeting, host); match a { Ok(v) => { if v.value.len() > 0{ let value: MonitorSetting = serde_json::from_str(&v.value).unwrap(); return value.monitor.clone(); } } Err(e) => { info!("{}", e.to_string()); } } return false; } } /// /// impl MysqlState{ pub fn save(&self, db: &web::Data<DbInfo>, key: &String) -> Result<(), Box<dyn Error>> { let value = serde_json::to_string(&self)?; let a = KeyValue{key: key.clone(), value }; db.put(&a, &CfNameTypeCode::NodesState.get())?; Ok(()) } } /// /// /// slave behind 配置结构体 #[derive(Serialize, Deserialize, Debug)] pub struct SlaveBehindSetting{ pub cluster_name: String, pub delay: usize } impl SlaveBehindSetting{ pub fn new(cluster_name: &String) -> SlaveBehindSetting { SlaveBehindSetting{ cluster_name: cluster_name.clone(), delay: 100 } } pub fn save(&self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>>{ db.prefix_put(&PrefixTypeCode::SlaveDelaySeting, &self.cluster_name, &self)?; Ok(()) } }
if info.maintain == "true".to_string() { self.maintain = false; }else { self.maintain = true;
random_line_split
opdb.rs
/* @author: xiao cai niao @datetime: 2019/11/6 */ use actix_web::{web}; use crate::webroute::route::{HostInfo, PostUserInfo, EditInfo, EditMainTain}; use crate::storage::rocks::{DbInfo, KeyValue, CfNameTypeCode, PrefixTypeCode}; use crate::ha::procotol::{DownNodeCheck, RecoveryInfo, ReplicationState, MysqlMonitorStatus}; use std::error::Error; use crate::ha::nodes_manager::{SlaveInfo}; use serde::{Serialize, Deserialize}; use crate::rand_string; use crate::ha::procotol::MysqlState; use crate::ha::sys_manager::MonitorSetting; use crate::webroute::new_route::ResponseMonitorStatic; use std::str::from_utf8; /// /// mysql node info, insert to rocksdb /// /// pub fn insert_mysql_host_info(data: web::Data<DbInfo>, info: &web::Json<HostInfo>) -> Result<(), Box<dyn Error>> { let check_unique = data.get(&info.host, &CfNameTypeCode::HaNodesInfo.get()); match check_unique { Ok(v) => { if v.value.len() > 0 { let a = format!("this key: ({}) already exists in the database",&info.host); return Err(a.into()); } } _ => {} } let v = HostInfoValue::new(info)?; v.save(&data)?; //初始化节点监控配置 let monitor_info = MonitorSetting::new(&info.host); monitor_info.save(&data)?; Ok(()) } #[derive(Serialize, Deserialize, Debug)] pub struct HaChangeLog { pub key: String, //格式 host_timestamp host为宕机节点 pub cluster_name: String, pub old_master_info: DownNodeCheck, //宕机节点信息 pub new_master_binlog_info: SlaveInfo, //如果宕机节点在切换之前未进行binlog追加将保存新master读取到的binlog信息,在宕机节点恢复时会进行判断回滚 pub recovery_info: RecoveryInfo, //宕机恢复同步所需的新master信息 pub recovery_status: bool, //是否已恢复 pub switch_status: bool, //切换状态 } impl HaChangeLog { pub fn new() -> HaChangeLog { HaChangeLog{ key: "".to_string(), cluster_name: "".to_string(), old_master_info: DownNodeCheck { host: "".to_string(), dbport: 0 }, new_master_binlog_info: SlaveInfo { host: "".to_string(), dbport: 0, slave_info: ReplicationState { log_name: "".to_string(), read_log_pos: 0, exec_log_pos: 0 }, new_master: false }, recovery_info: RecoveryInfo { binlog: "".to_string(), position: 0, gtid: "".to_string(), masterhost: "".to_string(), masterport: 0, read_binlog: "".to_string(), read_position: 0 }, recovery_status: false, switch_status: false } } pub fn save(&self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>> { let key = format!("{}_{}",self.key.clone(), crate::timestamp()); let value = serde_json::to_string(self)?; let row = KeyValue{key, value}; db.put(&row, &CfNameTypeCode::HaChangeLog.get())?; return Ok(()); } pub fn update(&mut self, db: &web::Data<DbInfo>, row_key: String) -> Result<(), Box<dyn Error>> { let value = serde_json::to_string(self)?; let row = KeyValue{key: row_key, value}; db.put(&row, &CfNameTypeCode::HaChangeLog.get())?; return Ok(()); } } /// /// /// /// 用户信息结构 #[derive(Serialize, Deserialize, Clone, Debug)] pub struct UserInfo { pub user_name: String, pub password: String, pub hook_id: String, pub create_time: i64, pub update_time: i64 } impl UserInfo { pub fn new(info: &PostUserInfo) -> UserInfo { let create_time = crate::timestamp(); let update_time = crate::timestamp(); UserInfo{ user_name: info.user_name.clone(), password: inf
ult 3306 pub rtype: String, //db、route pub cluster_name: String, //集群名称,route类型默认default pub online: bool, //db是否在线, true、false pub insert_time: i64, pub update_time: i64, pub maintain: bool, //是否处于维护模式,true、false } impl HostInfoValue { pub fn new(info: &HostInfo) -> Result<HostInfoValue, Box<dyn Error>> { let h = HostInfoValue{ host: info.host.clone(), rtype: info.rtype.clone(), dbport: info.dbport.clone(), cluster_name: info.cluster_name.clone(), online: false, insert_time: crate::timestamp(), update_time: crate::timestamp(), maintain: false }; Ok(h) } /// /// 写入db pub fn save(&self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>> { let value = serde_json::to_string(&self)?; let row = KeyValue{key: self.host.clone(), value}; db.put(&row, &CfNameTypeCode::HaNodesInfo.get())?; Ok(()) } /// /// 编辑节点信息 pub fn edit(&mut self, info: &web::Json<EditInfo>) { self.host = info.host.clone(); self.dbport = info.dbport.clone(); self.cluster_name = info.cluster_name.clone(); self.update_time = crate::timestamp(); } /// /// 设置节点维护模式状态 pub fn maintain(&mut self, info: &web::Json<EditMainTain>) { if info.maintain == "true".to_string() { self.maintain = false; }else { self.maintain = true; } self.update_time = crate::timestamp(); } /// /// 获取当前节点在db中保存的状态信息 pub fn get_state(&self, db: &web::Data<DbInfo>) -> Result<MysqlState, Box<dyn Error>> { let kv = db.get(&self.host, &CfNameTypeCode::NodesState.get())?; if kv.value.len() > 0 { let state: MysqlState = serde_json::from_str(&kv.value)?; return Ok(state); }else { //let err = format!("this host: {} no state data", &self.host); //return Err(err.into()); let state = MysqlState::new(); return Ok(state); } } pub fn get_role(&self, db: &web::Data<DbInfo>) -> Result<String, Box<dyn Error>> { let state = self.get_state(db)?; Ok(state.role) } } /// /// /// /// /// 获取db中现有的cluster列表 #[derive(Serialize, Deserialize, Debug)] pub struct NodeClusterList{ pub cluster_name_list: Vec<String> } impl NodeClusterList{ pub fn new() -> NodeClusterList{ NodeClusterList { cluster_name_list: vec![] } } pub fn init(&mut self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>> { let result = db.iterator(&CfNameTypeCode::HaNodesInfo.get(), &String::from(""))?; for row in &result{ let value: HostInfoValue = serde_json::from_str(&row.value)?; if self.is_exists(&value.cluster_name){continue;} self.cluster_name_list.push(value.cluster_name.clone()); } Ok(()) } fn is_exists(&self, cluster_name: &String) -> bool { for cl in &self.cluster_name_list { if cl == cluster_name{ return true; } } return false; } } /// 获取route信息中现有的cluster列表 #[derive(Serialize, Deserialize, Debug)] pub struct RouteClusterList{ pub cluster_name_list: Vec<String> } impl RouteClusterList{ pub fn new() -> RouteClusterList { RouteClusterList{ cluster_name_list: vec![] } } pub fn init(&mut self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>>{ let route_all = db.get_route_all()?; for route in &route_all { if!self.is_exists(&route.value.cluster_name){ self.cluster_name_list.push(route.value.cluster_name.clone()); } } Ok(()) } fn is_exists(&self, cluster_name: &String) -> bool { for cl in &self.cluster_name_list { if cl == cluster_name { return true; } } return false; } } /// /// /// /// /// node节点信息 #[derive(Deserialize, Serialize, Debug)] pub struct NodeInfo{ pub cluster_name: String, pub host: String, pub dbport: usize, pub online: bool, //是否在线, true、false pub maintain: bool, //是否处于维护模式,true、false pub role: String, //主从角色 pub master: String, pub sql_thread: bool, pub io_thread: bool, pub seconds_behind: usize, pub read_only: bool, pub version: String, pub executed_gtid_set: String, pub innodb_flush_log_at_trx_commit: usize, pub sync_binlog: usize, pub server_id: usize, pub event_scheduler: String, pub sql_error: String } impl NodeInfo{ pub fn new(state: &MysqlState, node: &HostInfoValue) -> NodeInfo { let mut ni = NodeInfo{ cluster_name: node.cluster_name.clone(), host: node.host.clone(), dbport: node.dbport.clone(), online: node.online.clone(), maintain: node.maintain.clone(), role: state.role.clone(), master: state.master.clone(), sql_thread: state.sql_thread.clone(), io_thread: state.io_thread.clone(), seconds_behind: state.seconds_behind.clone(), read_only: state.read_only.clone(), version: state.version.clone(), executed_gtid_set: state.executed_gtid_set.clone(), innodb_flush_log_at_trx_commit: state.innodb_flush_log_at_trx_commit.clone(), sync_binlog: state.sync_binlog.clone(), server_id: state.server_id.clone(), event_scheduler: state.event_scheduler.clone(), sql_error: "".to_string() }; if state.last_io_error.len() > 0{ ni.sql_error = state.last_io_error.clone(); }else if state.last_sql_error.len() > 0 { ni.sql_error = state.last_sql_error.clone(); } return ni; } } /// /// /// /// /// 每个集群节点信息 #[derive(Deserialize, Serialize, Debug)] pub struct ClusterNodeInfo{ pub cluster_name: String, pub total: usize, pub nodes_info: Vec<NodeInfo> } impl ClusterNodeInfo{ pub fn new(cluster_name: &String) -> ClusterNodeInfo{ ClusterNodeInfo{ cluster_name: cluster_name.clone(), total: 0, nodes_info: vec![] } } pub fn init(&mut self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>>{ let result = db.iterator(&CfNameTypeCode::HaNodesInfo.get(), &String::from(""))?; for row in &result{ let node: HostInfoValue = serde_json::from_str(&row.value)?; if &node.cluster_name == &self.cluster_name{ let state = node.get_state(db)?; let node_info = NodeInfo::new(&state, &node); self.total += 1; self.nodes_info.push(node_info); } } Ok(()) } /// /// 统计所有节点监控信息, 用于首页展示 /// /// 倒叙迭代获取每个节点最后一条数据, 如果每个节点都已获取最后一条数据就退出迭代 pub fn static_monitor(&self, db: &web::Data<DbInfo>, rsm: &mut ResponseMonitorStatic) -> Result<(), Box<dyn Error>> { let cf_name = CfNameTypeCode::SystemData.get(); let mut tmp: Vec<String> = vec![]; //首先检查是否开启监控 for node in &self.nodes_info{ if!self.check_monitor_setting(db, &node.host){ tmp.push(node.host.clone()); } } if let Some(cf) = db.db.cf_handle(&cf_name){ let mut iter = db.db.raw_iterator_cf(cf)?; iter.seek_to_last(); iter.prev(); 'all: while iter.valid() { if tmp.len() == self.nodes_info.len(){ break 'all; } if let Some(s) = iter.key(){ let key: String = from_utf8(&s.to_vec())?.parse()?; if key.starts_with(&PrefixTypeCode::NodeMonitorData.prefix()){ 'b: for n in &self.nodes_info{ if key.contains(n.host.as_str()){ for t in &tmp{ if t == &n.host{ break 'b; } } if let Some(v) = iter.value(){ let v: MysqlMonitorStatus = serde_json::from_slice(&v)?; rsm.update(&v); tmp.push(n.host.clone()); } break 'b; } } } } // // iter.prev(); } } Ok(()) } fn check_monitor_setting(&self, db: &web::Data<DbInfo>, host: &String) -> bool{ let a = db.prefix_get(&PrefixTypeCode::NodeMonitorSeting, host); match a { Ok(v) => { if v.value.len() > 0{ let value: MonitorSetting = serde_json::from_str(&v.value).unwrap(); return value.monitor.clone(); } } Err(e) => { info!("{}", e.to_string()); } } return false; } } /// /// impl MysqlState{ pub fn save(&self, db: &web::Data<DbInfo>, key: &String) -> Result<(), Box<dyn Error>> { let value = serde_json::to_string(&self)?; let a = KeyValue{key: key.clone(), value }; db.put(&a, &CfNameTypeCode::NodesState.get())?; Ok(()) } } /// /// /// slave behind 配置结构体 #[derive(Serialize, Deserialize, Debug)] pub struct SlaveBehindSetting{ pub cluster_name: String, pub delay: usize } impl SlaveBehindSetting{ pub fn new(cluster_name: &String) -> SlaveBehindSetting { SlaveBehindSetting{ cluster_name: cluster_name.clone(), delay: 100 } } pub fn save(&self, db: &web::Data<DbInfo>) -> Result<(), Box<dyn Error>>{ db.prefix_put(&PrefixTypeCode::SlaveDelaySeting, &self.cluster_name, &self)?; Ok(()) } }
o.password.clone(), hook_id: rand_string(), create_time, update_time } } } /// /// /// /// 节点基础信息, host做为key #[derive(Serialize, Deserialize, Debug, Clone)] pub struct HostInfoValue { pub host: String, //127.0.0.1:3306 pub dbport: usize, //defa
identifier_body
conn.rs
// Copyright 2016 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Provides a connection wrapper that handles the lower level tasks in sending //! or receiving data from the TCP socket, as well as dealing with timeouts. use std::iter; use std::ops::Deref; use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; use futures; use futures::{Future, Stream}; use futures::stream; use futures::sync::mpsc::{Sender, UnboundedReceiver, UnboundedSender}; use tokio_core::net::TcpStream; use tokio_io::{AsyncRead, AsyncWrite}; use tokio_io::io::{read_exact, write_all}; use tokio_timer::{Timer, TimerError}; use core::core::hash::Hash; use core::ser; use msg::*; use types::Error; use rate_limit::*; use util::LOGGER; /// Handler to provide to the connection, will be called back anytime a message /// is received. The provided sender can be use to immediately send back /// another message. pub trait Handler: Sync + Send { /// Handle function to implement to process incoming messages. A sender to /// reply immediately as well as the message header and its unparsed body /// are provided. fn handle( &self, sender: UnboundedSender<Vec<u8>>, header: MsgHeader, body: Vec<u8>, ) -> Result<Option<Hash>, ser::Error>; } impl<F> Handler for F where F: Fn(UnboundedSender<Vec<u8>>, MsgHeader, Vec<u8>) -> Result<Option<Hash>, ser::Error>, F: Sync + Send, { fn handle( &self, sender: UnboundedSender<Vec<u8>>, header: MsgHeader, body: Vec<u8>, ) -> Result<Option<Hash>, ser::Error> { self(sender, header, body) } } /// A higher level connection wrapping the TcpStream. Maintains the amount of /// data transmitted and deals with the low-level task of sending and /// receiving data, parsing message headers and timeouts. #[allow(dead_code)] pub struct Connection { // Channel to push bytes to the remote peer outbound_chan: UnboundedSender<Vec<u8>>, // Close the connection with the remote peer close_chan: Sender<()>, // Bytes we've sent. sent_bytes: Arc<Mutex<u64>>, // Bytes we've received. received_bytes: Arc<Mutex<u64>>, // Counter for read errors. error_count: Mutex<u64>, } impl Connection { /// Start listening on the provided connection and wraps it. Does not hang /// the current thread, instead just returns a future and the Connection /// itself. pub fn listen<F>( conn: TcpStream, handler: F, ) -> (Connection, Box<Future<Item = (), Error = Error>>) where F: Handler +'static, { let (reader, writer) = conn.split(); // Set Max Read to 12 Mb/s let reader = ThrottledReader::new(reader, 12_000_000); // Set Max Write to 12 Mb/s let writer = ThrottledWriter::new(writer, 12_000_000); // prepare the channel that will transmit data to the connection writer let (tx, rx) = futures::sync::mpsc::unbounded(); // same for closing the connection let (close_tx, close_rx) = futures::sync::mpsc::channel(1); let close_conn = close_rx .for_each(|_| Ok(())) .map_err(|_| Error::ConnectionClose); let me = Connection { outbound_chan: tx.clone(), close_chan: close_tx, sent_bytes: Arc::new(Mutex::new(0)), received_bytes: Arc::new(Mutex::new(0)), error_count: Mutex::new(0), }; // setup the reading future, getting messages from the peer and processing them let read_msg = me.read_msg(tx, reader, handler).map(|_| ()); // setting the writing future, getting messages from our system and sending // them out let write_msg = me.write_msg(rx, writer).map(|_| ()); // select between our different futures and return them let fut = Box::new( close_conn .select(read_msg.select(write_msg).map(|_| ()).map_err(|(e, _)| e)) .map(|_| ()) .map_err(|(e, _)| e), ); (me, fut) } /// Prepares the future that gets message data produced by our system and /// sends it to the peer connection fn write_msg<W>( &self, rx: UnboundedReceiver<Vec<u8>>, writer: W, ) -> Box<Future<Item = W, Error = Error>> where W: AsyncWrite +'static, { let sent_bytes = self.sent_bytes.clone(); let send_data = rx .map_err(|_| Error::ConnectionClose) .map(move |data| { // add the count of bytes sent let mut sent_bytes = sent_bytes.lock().unwrap(); *sent_bytes += data.len() as u64; data }) // write the data and make sure the future returns the right types .fold(writer, |writer, data| { write_all(writer, data).map_err(|e| Error::Connection(e)).map(|(writer, _)| writer) }); Box::new(send_data) } /// Prepares the future reading from the peer connection, parsing each /// message and forwarding them appropriately based on their type fn read_msg<F, R>( &self, sender: UnboundedSender<Vec<u8>>, reader: R, handler: F, ) -> Box<Future<Item = R, Error = Error>> where F: Handler +'static, R: AsyncRead +'static, { // infinite iterator stream so we repeat the message reading logic until the // peer is stopped let iter = stream::iter_ok(iter::repeat(()).map(Ok::<(), Error>)); // setup the reading future, getting messages from the peer and processing them let recv_bytes = self.received_bytes.clone(); let handler = Arc::new(handler); let read_msg = iter.fold(reader, move |reader, _| { let recv_bytes = recv_bytes.clone(); let handler = handler.clone(); let sender_inner = sender.clone(); // first read the message header read_exact(reader, vec![0u8; HEADER_LEN as usize]) .from_err() .and_then(move |(reader, buf)| { let header = try!(ser::deserialize::<MsgHeader>(&mut &buf[..])); Ok((reader, header)) }) .and_then(move |(reader, header)| { // now that we have a size, proceed with the body read_exact(reader, vec![0u8; header.msg_len as usize]) .map(|(reader, buf)| (reader, header, buf)) .from_err() }) .and_then(move |(reader, header, buf)| { // add the count of bytes received let mut recv_bytes = recv_bytes.lock().unwrap(); *recv_bytes += header.serialized_len() + header.msg_len; // and handle the different message types let msg_type = header.msg_type; if let Err(e) = handler.handle(sender_inner.clone(), header, buf) { debug!(LOGGER, "Invalid {:?} message: {}", msg_type, e); return Err(Error::Serialization(e)); } Ok(reader) }) }); Box::new(read_msg) } /// Utility function to send any Writeable. Handles adding the header and /// serialization. pub fn
<W: ser::Writeable>(&self, t: Type, body: &W) -> Result<(), Error> { let mut body_data = vec![]; try!(ser::serialize(&mut body_data, body)); let mut data = vec![]; try!(ser::serialize( &mut data, &MsgHeader::new(t, body_data.len() as u64), )); data.append(&mut body_data); self.outbound_chan .unbounded_send(data) .map_err(|_| Error::ConnectionClose) } /// Bytes sent and received by this peer to the remote peer. pub fn transmitted_bytes(&self) -> (u64, u64) { let sent = *self.sent_bytes.lock().unwrap(); let recv = *self.received_bytes.lock().unwrap(); (sent, recv) } } /// Connection wrapper that handles a request/response oriented interaction with /// a timeout. pub struct TimeoutConnection { underlying: Connection, expected_responses: Arc<Mutex<Vec<(Type, Option<Hash>, Instant)>>>, } impl TimeoutConnection { /// Same as Connection pub fn listen<F>( conn: TcpStream, handler: F, ) -> (TimeoutConnection, Box<Future<Item = (), Error = Error>>) where F: Handler +'static, { let expects = Arc::new(Mutex::new(vec![])); // Decorates the handler to remove the "subscription" from the expected // responses. We got our replies, so no timeout should occur. let exp = expects.clone(); let (conn, fut) = Connection::listen(conn, move |sender, header: MsgHeader, data| { let msg_type = header.msg_type; let recv_h = try!(handler.handle(sender, header, data)); let mut expects = exp.lock().unwrap(); let filtered = expects .iter() .filter(|&&(typ, h, _): &&(Type, Option<Hash>, Instant)| { msg_type!= typ || h.is_some() && recv_h!= h }) .map(|&x| x) .collect::<Vec<_>>(); *expects = filtered; Ok(recv_h) }); // Registers a timer with the event loop to regularly check for timeouts. let exp = expects.clone(); let timer = Timer::default() .interval(Duration::new(2, 0)) .fold((), move |_, _| { let exp = exp.lock().unwrap(); for &(ty, h, t) in exp.deref() { if Instant::now() - t > Duration::new(5, 0) { trace!(LOGGER, "Too long: {:?} {:?}", ty, h); return Err(TimerError::TooLong); } } Ok(()) }) .from_err(); let me = TimeoutConnection { underlying: conn, expected_responses: expects, }; ( me, Box::new(fut.select(timer).map(|_| ()).map_err(|(e1, _)| e1)), ) } /// Sends a request and registers a timer on the provided message type and /// optionally the hash of the sent data. pub fn send_request<W: ser::Writeable>( &self, t: Type, rt: Type, body: &W, expect_h: Option<(Hash)>, ) -> Result<(), Error> { let _sent = try!(self.underlying.send_msg(t, body)); let mut expects = self.expected_responses.lock().unwrap(); expects.push((rt, expect_h, Instant::now())); Ok(()) } /// Same as Connection pub fn send_msg<W: ser::Writeable>(&self, t: Type, body: &W) -> Result<(), Error> { self.underlying.send_msg(t, body) } /// Same as Connection pub fn transmitted_bytes(&self) -> (u64, u64) { self.underlying.transmitted_bytes() } }
send_msg
identifier_name
conn.rs
// Copyright 2016 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Provides a connection wrapper that handles the lower level tasks in sending //! or receiving data from the TCP socket, as well as dealing with timeouts. use std::iter; use std::ops::Deref; use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; use futures; use futures::{Future, Stream}; use futures::stream; use futures::sync::mpsc::{Sender, UnboundedReceiver, UnboundedSender}; use tokio_core::net::TcpStream; use tokio_io::{AsyncRead, AsyncWrite}; use tokio_io::io::{read_exact, write_all}; use tokio_timer::{Timer, TimerError}; use core::core::hash::Hash; use core::ser; use msg::*; use types::Error; use rate_limit::*; use util::LOGGER; /// Handler to provide to the connection, will be called back anytime a message /// is received. The provided sender can be use to immediately send back /// another message. pub trait Handler: Sync + Send { /// Handle function to implement to process incoming messages. A sender to /// reply immediately as well as the message header and its unparsed body /// are provided. fn handle( &self, sender: UnboundedSender<Vec<u8>>, header: MsgHeader, body: Vec<u8>, ) -> Result<Option<Hash>, ser::Error>; } impl<F> Handler for F where F: Fn(UnboundedSender<Vec<u8>>, MsgHeader, Vec<u8>) -> Result<Option<Hash>, ser::Error>, F: Sync + Send, { fn handle( &self, sender: UnboundedSender<Vec<u8>>, header: MsgHeader, body: Vec<u8>, ) -> Result<Option<Hash>, ser::Error> { self(sender, header, body) } } /// A higher level connection wrapping the TcpStream. Maintains the amount of /// data transmitted and deals with the low-level task of sending and /// receiving data, parsing message headers and timeouts. #[allow(dead_code)] pub struct Connection { // Channel to push bytes to the remote peer outbound_chan: UnboundedSender<Vec<u8>>, // Close the connection with the remote peer close_chan: Sender<()>, // Bytes we've sent. sent_bytes: Arc<Mutex<u64>>, // Bytes we've received. received_bytes: Arc<Mutex<u64>>, // Counter for read errors. error_count: Mutex<u64>, } impl Connection { /// Start listening on the provided connection and wraps it. Does not hang /// the current thread, instead just returns a future and the Connection /// itself. pub fn listen<F>( conn: TcpStream, handler: F, ) -> (Connection, Box<Future<Item = (), Error = Error>>) where F: Handler +'static, { let (reader, writer) = conn.split(); // Set Max Read to 12 Mb/s let reader = ThrottledReader::new(reader, 12_000_000); // Set Max Write to 12 Mb/s let writer = ThrottledWriter::new(writer, 12_000_000); // prepare the channel that will transmit data to the connection writer let (tx, rx) = futures::sync::mpsc::unbounded(); // same for closing the connection let (close_tx, close_rx) = futures::sync::mpsc::channel(1); let close_conn = close_rx .for_each(|_| Ok(())) .map_err(|_| Error::ConnectionClose); let me = Connection { outbound_chan: tx.clone(), close_chan: close_tx, sent_bytes: Arc::new(Mutex::new(0)), received_bytes: Arc::new(Mutex::new(0)), error_count: Mutex::new(0), }; // setup the reading future, getting messages from the peer and processing them let read_msg = me.read_msg(tx, reader, handler).map(|_| ()); // setting the writing future, getting messages from our system and sending // them out let write_msg = me.write_msg(rx, writer).map(|_| ()); // select between our different futures and return them let fut = Box::new( close_conn .select(read_msg.select(write_msg).map(|_| ()).map_err(|(e, _)| e)) .map(|_| ()) .map_err(|(e, _)| e), ); (me, fut) } /// Prepares the future that gets message data produced by our system and /// sends it to the peer connection fn write_msg<W>( &self, rx: UnboundedReceiver<Vec<u8>>, writer: W, ) -> Box<Future<Item = W, Error = Error>> where W: AsyncWrite +'static, { let sent_bytes = self.sent_bytes.clone(); let send_data = rx .map_err(|_| Error::ConnectionClose) .map(move |data| { // add the count of bytes sent let mut sent_bytes = sent_bytes.lock().unwrap(); *sent_bytes += data.len() as u64; data }) // write the data and make sure the future returns the right types .fold(writer, |writer, data| { write_all(writer, data).map_err(|e| Error::Connection(e)).map(|(writer, _)| writer) }); Box::new(send_data) } /// Prepares the future reading from the peer connection, parsing each /// message and forwarding them appropriately based on their type fn read_msg<F, R>( &self, sender: UnboundedSender<Vec<u8>>, reader: R, handler: F, ) -> Box<Future<Item = R, Error = Error>> where F: Handler +'static, R: AsyncRead +'static, { // infinite iterator stream so we repeat the message reading logic until the // peer is stopped let iter = stream::iter_ok(iter::repeat(()).map(Ok::<(), Error>)); // setup the reading future, getting messages from the peer and processing them let recv_bytes = self.received_bytes.clone(); let handler = Arc::new(handler); let read_msg = iter.fold(reader, move |reader, _| { let recv_bytes = recv_bytes.clone(); let handler = handler.clone(); let sender_inner = sender.clone(); // first read the message header read_exact(reader, vec![0u8; HEADER_LEN as usize]) .from_err() .and_then(move |(reader, buf)| { let header = try!(ser::deserialize::<MsgHeader>(&mut &buf[..])); Ok((reader, header)) }) .and_then(move |(reader, header)| { // now that we have a size, proceed with the body read_exact(reader, vec![0u8; header.msg_len as usize]) .map(|(reader, buf)| (reader, header, buf)) .from_err() }) .and_then(move |(reader, header, buf)| { // add the count of bytes received let mut recv_bytes = recv_bytes.lock().unwrap(); *recv_bytes += header.serialized_len() + header.msg_len; // and handle the different message types let msg_type = header.msg_type; if let Err(e) = handler.handle(sender_inner.clone(), header, buf) { debug!(LOGGER, "Invalid {:?} message: {}", msg_type, e); return Err(Error::Serialization(e)); } Ok(reader) }) }); Box::new(read_msg) } /// Utility function to send any Writeable. Handles adding the header and /// serialization. pub fn send_msg<W: ser::Writeable>(&self, t: Type, body: &W) -> Result<(), Error> { let mut body_data = vec![]; try!(ser::serialize(&mut body_data, body)); let mut data = vec![]; try!(ser::serialize( &mut data, &MsgHeader::new(t, body_data.len() as u64), )); data.append(&mut body_data); self.outbound_chan .unbounded_send(data) .map_err(|_| Error::ConnectionClose) } /// Bytes sent and received by this peer to the remote peer. pub fn transmitted_bytes(&self) -> (u64, u64) { let sent = *self.sent_bytes.lock().unwrap(); let recv = *self.received_bytes.lock().unwrap(); (sent, recv) } } /// Connection wrapper that handles a request/response oriented interaction with /// a timeout. pub struct TimeoutConnection { underlying: Connection, expected_responses: Arc<Mutex<Vec<(Type, Option<Hash>, Instant)>>>, } impl TimeoutConnection { /// Same as Connection pub fn listen<F>( conn: TcpStream, handler: F, ) -> (TimeoutConnection, Box<Future<Item = (), Error = Error>>) where F: Handler +'static, { let expects = Arc::new(Mutex::new(vec![]));
let (conn, fut) = Connection::listen(conn, move |sender, header: MsgHeader, data| { let msg_type = header.msg_type; let recv_h = try!(handler.handle(sender, header, data)); let mut expects = exp.lock().unwrap(); let filtered = expects .iter() .filter(|&&(typ, h, _): &&(Type, Option<Hash>, Instant)| { msg_type!= typ || h.is_some() && recv_h!= h }) .map(|&x| x) .collect::<Vec<_>>(); *expects = filtered; Ok(recv_h) }); // Registers a timer with the event loop to regularly check for timeouts. let exp = expects.clone(); let timer = Timer::default() .interval(Duration::new(2, 0)) .fold((), move |_, _| { let exp = exp.lock().unwrap(); for &(ty, h, t) in exp.deref() { if Instant::now() - t > Duration::new(5, 0) { trace!(LOGGER, "Too long: {:?} {:?}", ty, h); return Err(TimerError::TooLong); } } Ok(()) }) .from_err(); let me = TimeoutConnection { underlying: conn, expected_responses: expects, }; ( me, Box::new(fut.select(timer).map(|_| ()).map_err(|(e1, _)| e1)), ) } /// Sends a request and registers a timer on the provided message type and /// optionally the hash of the sent data. pub fn send_request<W: ser::Writeable>( &self, t: Type, rt: Type, body: &W, expect_h: Option<(Hash)>, ) -> Result<(), Error> { let _sent = try!(self.underlying.send_msg(t, body)); let mut expects = self.expected_responses.lock().unwrap(); expects.push((rt, expect_h, Instant::now())); Ok(()) } /// Same as Connection pub fn send_msg<W: ser::Writeable>(&self, t: Type, body: &W) -> Result<(), Error> { self.underlying.send_msg(t, body) } /// Same as Connection pub fn transmitted_bytes(&self) -> (u64, u64) { self.underlying.transmitted_bytes() } }
// Decorates the handler to remove the "subscription" from the expected // responses. We got our replies, so no timeout should occur. let exp = expects.clone();
random_line_split
conn.rs
// Copyright 2016 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Provides a connection wrapper that handles the lower level tasks in sending //! or receiving data from the TCP socket, as well as dealing with timeouts. use std::iter; use std::ops::Deref; use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; use futures; use futures::{Future, Stream}; use futures::stream; use futures::sync::mpsc::{Sender, UnboundedReceiver, UnboundedSender}; use tokio_core::net::TcpStream; use tokio_io::{AsyncRead, AsyncWrite}; use tokio_io::io::{read_exact, write_all}; use tokio_timer::{Timer, TimerError}; use core::core::hash::Hash; use core::ser; use msg::*; use types::Error; use rate_limit::*; use util::LOGGER; /// Handler to provide to the connection, will be called back anytime a message /// is received. The provided sender can be use to immediately send back /// another message. pub trait Handler: Sync + Send { /// Handle function to implement to process incoming messages. A sender to /// reply immediately as well as the message header and its unparsed body /// are provided. fn handle( &self, sender: UnboundedSender<Vec<u8>>, header: MsgHeader, body: Vec<u8>, ) -> Result<Option<Hash>, ser::Error>; } impl<F> Handler for F where F: Fn(UnboundedSender<Vec<u8>>, MsgHeader, Vec<u8>) -> Result<Option<Hash>, ser::Error>, F: Sync + Send, { fn handle( &self, sender: UnboundedSender<Vec<u8>>, header: MsgHeader, body: Vec<u8>, ) -> Result<Option<Hash>, ser::Error>
} /// A higher level connection wrapping the TcpStream. Maintains the amount of /// data transmitted and deals with the low-level task of sending and /// receiving data, parsing message headers and timeouts. #[allow(dead_code)] pub struct Connection { // Channel to push bytes to the remote peer outbound_chan: UnboundedSender<Vec<u8>>, // Close the connection with the remote peer close_chan: Sender<()>, // Bytes we've sent. sent_bytes: Arc<Mutex<u64>>, // Bytes we've received. received_bytes: Arc<Mutex<u64>>, // Counter for read errors. error_count: Mutex<u64>, } impl Connection { /// Start listening on the provided connection and wraps it. Does not hang /// the current thread, instead just returns a future and the Connection /// itself. pub fn listen<F>( conn: TcpStream, handler: F, ) -> (Connection, Box<Future<Item = (), Error = Error>>) where F: Handler +'static, { let (reader, writer) = conn.split(); // Set Max Read to 12 Mb/s let reader = ThrottledReader::new(reader, 12_000_000); // Set Max Write to 12 Mb/s let writer = ThrottledWriter::new(writer, 12_000_000); // prepare the channel that will transmit data to the connection writer let (tx, rx) = futures::sync::mpsc::unbounded(); // same for closing the connection let (close_tx, close_rx) = futures::sync::mpsc::channel(1); let close_conn = close_rx .for_each(|_| Ok(())) .map_err(|_| Error::ConnectionClose); let me = Connection { outbound_chan: tx.clone(), close_chan: close_tx, sent_bytes: Arc::new(Mutex::new(0)), received_bytes: Arc::new(Mutex::new(0)), error_count: Mutex::new(0), }; // setup the reading future, getting messages from the peer and processing them let read_msg = me.read_msg(tx, reader, handler).map(|_| ()); // setting the writing future, getting messages from our system and sending // them out let write_msg = me.write_msg(rx, writer).map(|_| ()); // select between our different futures and return them let fut = Box::new( close_conn .select(read_msg.select(write_msg).map(|_| ()).map_err(|(e, _)| e)) .map(|_| ()) .map_err(|(e, _)| e), ); (me, fut) } /// Prepares the future that gets message data produced by our system and /// sends it to the peer connection fn write_msg<W>( &self, rx: UnboundedReceiver<Vec<u8>>, writer: W, ) -> Box<Future<Item = W, Error = Error>> where W: AsyncWrite +'static, { let sent_bytes = self.sent_bytes.clone(); let send_data = rx .map_err(|_| Error::ConnectionClose) .map(move |data| { // add the count of bytes sent let mut sent_bytes = sent_bytes.lock().unwrap(); *sent_bytes += data.len() as u64; data }) // write the data and make sure the future returns the right types .fold(writer, |writer, data| { write_all(writer, data).map_err(|e| Error::Connection(e)).map(|(writer, _)| writer) }); Box::new(send_data) } /// Prepares the future reading from the peer connection, parsing each /// message and forwarding them appropriately based on their type fn read_msg<F, R>( &self, sender: UnboundedSender<Vec<u8>>, reader: R, handler: F, ) -> Box<Future<Item = R, Error = Error>> where F: Handler +'static, R: AsyncRead +'static, { // infinite iterator stream so we repeat the message reading logic until the // peer is stopped let iter = stream::iter_ok(iter::repeat(()).map(Ok::<(), Error>)); // setup the reading future, getting messages from the peer and processing them let recv_bytes = self.received_bytes.clone(); let handler = Arc::new(handler); let read_msg = iter.fold(reader, move |reader, _| { let recv_bytes = recv_bytes.clone(); let handler = handler.clone(); let sender_inner = sender.clone(); // first read the message header read_exact(reader, vec![0u8; HEADER_LEN as usize]) .from_err() .and_then(move |(reader, buf)| { let header = try!(ser::deserialize::<MsgHeader>(&mut &buf[..])); Ok((reader, header)) }) .and_then(move |(reader, header)| { // now that we have a size, proceed with the body read_exact(reader, vec![0u8; header.msg_len as usize]) .map(|(reader, buf)| (reader, header, buf)) .from_err() }) .and_then(move |(reader, header, buf)| { // add the count of bytes received let mut recv_bytes = recv_bytes.lock().unwrap(); *recv_bytes += header.serialized_len() + header.msg_len; // and handle the different message types let msg_type = header.msg_type; if let Err(e) = handler.handle(sender_inner.clone(), header, buf) { debug!(LOGGER, "Invalid {:?} message: {}", msg_type, e); return Err(Error::Serialization(e)); } Ok(reader) }) }); Box::new(read_msg) } /// Utility function to send any Writeable. Handles adding the header and /// serialization. pub fn send_msg<W: ser::Writeable>(&self, t: Type, body: &W) -> Result<(), Error> { let mut body_data = vec![]; try!(ser::serialize(&mut body_data, body)); let mut data = vec![]; try!(ser::serialize( &mut data, &MsgHeader::new(t, body_data.len() as u64), )); data.append(&mut body_data); self.outbound_chan .unbounded_send(data) .map_err(|_| Error::ConnectionClose) } /// Bytes sent and received by this peer to the remote peer. pub fn transmitted_bytes(&self) -> (u64, u64) { let sent = *self.sent_bytes.lock().unwrap(); let recv = *self.received_bytes.lock().unwrap(); (sent, recv) } } /// Connection wrapper that handles a request/response oriented interaction with /// a timeout. pub struct TimeoutConnection { underlying: Connection, expected_responses: Arc<Mutex<Vec<(Type, Option<Hash>, Instant)>>>, } impl TimeoutConnection { /// Same as Connection pub fn listen<F>( conn: TcpStream, handler: F, ) -> (TimeoutConnection, Box<Future<Item = (), Error = Error>>) where F: Handler +'static, { let expects = Arc::new(Mutex::new(vec![])); // Decorates the handler to remove the "subscription" from the expected // responses. We got our replies, so no timeout should occur. let exp = expects.clone(); let (conn, fut) = Connection::listen(conn, move |sender, header: MsgHeader, data| { let msg_type = header.msg_type; let recv_h = try!(handler.handle(sender, header, data)); let mut expects = exp.lock().unwrap(); let filtered = expects .iter() .filter(|&&(typ, h, _): &&(Type, Option<Hash>, Instant)| { msg_type!= typ || h.is_some() && recv_h!= h }) .map(|&x| x) .collect::<Vec<_>>(); *expects = filtered; Ok(recv_h) }); // Registers a timer with the event loop to regularly check for timeouts. let exp = expects.clone(); let timer = Timer::default() .interval(Duration::new(2, 0)) .fold((), move |_, _| { let exp = exp.lock().unwrap(); for &(ty, h, t) in exp.deref() { if Instant::now() - t > Duration::new(5, 0) { trace!(LOGGER, "Too long: {:?} {:?}", ty, h); return Err(TimerError::TooLong); } } Ok(()) }) .from_err(); let me = TimeoutConnection { underlying: conn, expected_responses: expects, }; ( me, Box::new(fut.select(timer).map(|_| ()).map_err(|(e1, _)| e1)), ) } /// Sends a request and registers a timer on the provided message type and /// optionally the hash of the sent data. pub fn send_request<W: ser::Writeable>( &self, t: Type, rt: Type, body: &W, expect_h: Option<(Hash)>, ) -> Result<(), Error> { let _sent = try!(self.underlying.send_msg(t, body)); let mut expects = self.expected_responses.lock().unwrap(); expects.push((rt, expect_h, Instant::now())); Ok(()) } /// Same as Connection pub fn send_msg<W: ser::Writeable>(&self, t: Type, body: &W) -> Result<(), Error> { self.underlying.send_msg(t, body) } /// Same as Connection pub fn transmitted_bytes(&self) -> (u64, u64) { self.underlying.transmitted_bytes() } }
{ self(sender, header, body) }
identifier_body
conn.rs
// Copyright 2016 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Provides a connection wrapper that handles the lower level tasks in sending //! or receiving data from the TCP socket, as well as dealing with timeouts. use std::iter; use std::ops::Deref; use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; use futures; use futures::{Future, Stream}; use futures::stream; use futures::sync::mpsc::{Sender, UnboundedReceiver, UnboundedSender}; use tokio_core::net::TcpStream; use tokio_io::{AsyncRead, AsyncWrite}; use tokio_io::io::{read_exact, write_all}; use tokio_timer::{Timer, TimerError}; use core::core::hash::Hash; use core::ser; use msg::*; use types::Error; use rate_limit::*; use util::LOGGER; /// Handler to provide to the connection, will be called back anytime a message /// is received. The provided sender can be use to immediately send back /// another message. pub trait Handler: Sync + Send { /// Handle function to implement to process incoming messages. A sender to /// reply immediately as well as the message header and its unparsed body /// are provided. fn handle( &self, sender: UnboundedSender<Vec<u8>>, header: MsgHeader, body: Vec<u8>, ) -> Result<Option<Hash>, ser::Error>; } impl<F> Handler for F where F: Fn(UnboundedSender<Vec<u8>>, MsgHeader, Vec<u8>) -> Result<Option<Hash>, ser::Error>, F: Sync + Send, { fn handle( &self, sender: UnboundedSender<Vec<u8>>, header: MsgHeader, body: Vec<u8>, ) -> Result<Option<Hash>, ser::Error> { self(sender, header, body) } } /// A higher level connection wrapping the TcpStream. Maintains the amount of /// data transmitted and deals with the low-level task of sending and /// receiving data, parsing message headers and timeouts. #[allow(dead_code)] pub struct Connection { // Channel to push bytes to the remote peer outbound_chan: UnboundedSender<Vec<u8>>, // Close the connection with the remote peer close_chan: Sender<()>, // Bytes we've sent. sent_bytes: Arc<Mutex<u64>>, // Bytes we've received. received_bytes: Arc<Mutex<u64>>, // Counter for read errors. error_count: Mutex<u64>, } impl Connection { /// Start listening on the provided connection and wraps it. Does not hang /// the current thread, instead just returns a future and the Connection /// itself. pub fn listen<F>( conn: TcpStream, handler: F, ) -> (Connection, Box<Future<Item = (), Error = Error>>) where F: Handler +'static, { let (reader, writer) = conn.split(); // Set Max Read to 12 Mb/s let reader = ThrottledReader::new(reader, 12_000_000); // Set Max Write to 12 Mb/s let writer = ThrottledWriter::new(writer, 12_000_000); // prepare the channel that will transmit data to the connection writer let (tx, rx) = futures::sync::mpsc::unbounded(); // same for closing the connection let (close_tx, close_rx) = futures::sync::mpsc::channel(1); let close_conn = close_rx .for_each(|_| Ok(())) .map_err(|_| Error::ConnectionClose); let me = Connection { outbound_chan: tx.clone(), close_chan: close_tx, sent_bytes: Arc::new(Mutex::new(0)), received_bytes: Arc::new(Mutex::new(0)), error_count: Mutex::new(0), }; // setup the reading future, getting messages from the peer and processing them let read_msg = me.read_msg(tx, reader, handler).map(|_| ()); // setting the writing future, getting messages from our system and sending // them out let write_msg = me.write_msg(rx, writer).map(|_| ()); // select between our different futures and return them let fut = Box::new( close_conn .select(read_msg.select(write_msg).map(|_| ()).map_err(|(e, _)| e)) .map(|_| ()) .map_err(|(e, _)| e), ); (me, fut) } /// Prepares the future that gets message data produced by our system and /// sends it to the peer connection fn write_msg<W>( &self, rx: UnboundedReceiver<Vec<u8>>, writer: W, ) -> Box<Future<Item = W, Error = Error>> where W: AsyncWrite +'static, { let sent_bytes = self.sent_bytes.clone(); let send_data = rx .map_err(|_| Error::ConnectionClose) .map(move |data| { // add the count of bytes sent let mut sent_bytes = sent_bytes.lock().unwrap(); *sent_bytes += data.len() as u64; data }) // write the data and make sure the future returns the right types .fold(writer, |writer, data| { write_all(writer, data).map_err(|e| Error::Connection(e)).map(|(writer, _)| writer) }); Box::new(send_data) } /// Prepares the future reading from the peer connection, parsing each /// message and forwarding them appropriately based on their type fn read_msg<F, R>( &self, sender: UnboundedSender<Vec<u8>>, reader: R, handler: F, ) -> Box<Future<Item = R, Error = Error>> where F: Handler +'static, R: AsyncRead +'static, { // infinite iterator stream so we repeat the message reading logic until the // peer is stopped let iter = stream::iter_ok(iter::repeat(()).map(Ok::<(), Error>)); // setup the reading future, getting messages from the peer and processing them let recv_bytes = self.received_bytes.clone(); let handler = Arc::new(handler); let read_msg = iter.fold(reader, move |reader, _| { let recv_bytes = recv_bytes.clone(); let handler = handler.clone(); let sender_inner = sender.clone(); // first read the message header read_exact(reader, vec![0u8; HEADER_LEN as usize]) .from_err() .and_then(move |(reader, buf)| { let header = try!(ser::deserialize::<MsgHeader>(&mut &buf[..])); Ok((reader, header)) }) .and_then(move |(reader, header)| { // now that we have a size, proceed with the body read_exact(reader, vec![0u8; header.msg_len as usize]) .map(|(reader, buf)| (reader, header, buf)) .from_err() }) .and_then(move |(reader, header, buf)| { // add the count of bytes received let mut recv_bytes = recv_bytes.lock().unwrap(); *recv_bytes += header.serialized_len() + header.msg_len; // and handle the different message types let msg_type = header.msg_type; if let Err(e) = handler.handle(sender_inner.clone(), header, buf)
Ok(reader) }) }); Box::new(read_msg) } /// Utility function to send any Writeable. Handles adding the header and /// serialization. pub fn send_msg<W: ser::Writeable>(&self, t: Type, body: &W) -> Result<(), Error> { let mut body_data = vec![]; try!(ser::serialize(&mut body_data, body)); let mut data = vec![]; try!(ser::serialize( &mut data, &MsgHeader::new(t, body_data.len() as u64), )); data.append(&mut body_data); self.outbound_chan .unbounded_send(data) .map_err(|_| Error::ConnectionClose) } /// Bytes sent and received by this peer to the remote peer. pub fn transmitted_bytes(&self) -> (u64, u64) { let sent = *self.sent_bytes.lock().unwrap(); let recv = *self.received_bytes.lock().unwrap(); (sent, recv) } } /// Connection wrapper that handles a request/response oriented interaction with /// a timeout. pub struct TimeoutConnection { underlying: Connection, expected_responses: Arc<Mutex<Vec<(Type, Option<Hash>, Instant)>>>, } impl TimeoutConnection { /// Same as Connection pub fn listen<F>( conn: TcpStream, handler: F, ) -> (TimeoutConnection, Box<Future<Item = (), Error = Error>>) where F: Handler +'static, { let expects = Arc::new(Mutex::new(vec![])); // Decorates the handler to remove the "subscription" from the expected // responses. We got our replies, so no timeout should occur. let exp = expects.clone(); let (conn, fut) = Connection::listen(conn, move |sender, header: MsgHeader, data| { let msg_type = header.msg_type; let recv_h = try!(handler.handle(sender, header, data)); let mut expects = exp.lock().unwrap(); let filtered = expects .iter() .filter(|&&(typ, h, _): &&(Type, Option<Hash>, Instant)| { msg_type!= typ || h.is_some() && recv_h!= h }) .map(|&x| x) .collect::<Vec<_>>(); *expects = filtered; Ok(recv_h) }); // Registers a timer with the event loop to regularly check for timeouts. let exp = expects.clone(); let timer = Timer::default() .interval(Duration::new(2, 0)) .fold((), move |_, _| { let exp = exp.lock().unwrap(); for &(ty, h, t) in exp.deref() { if Instant::now() - t > Duration::new(5, 0) { trace!(LOGGER, "Too long: {:?} {:?}", ty, h); return Err(TimerError::TooLong); } } Ok(()) }) .from_err(); let me = TimeoutConnection { underlying: conn, expected_responses: expects, }; ( me, Box::new(fut.select(timer).map(|_| ()).map_err(|(e1, _)| e1)), ) } /// Sends a request and registers a timer on the provided message type and /// optionally the hash of the sent data. pub fn send_request<W: ser::Writeable>( &self, t: Type, rt: Type, body: &W, expect_h: Option<(Hash)>, ) -> Result<(), Error> { let _sent = try!(self.underlying.send_msg(t, body)); let mut expects = self.expected_responses.lock().unwrap(); expects.push((rt, expect_h, Instant::now())); Ok(()) } /// Same as Connection pub fn send_msg<W: ser::Writeable>(&self, t: Type, body: &W) -> Result<(), Error> { self.underlying.send_msg(t, body) } /// Same as Connection pub fn transmitted_bytes(&self) -> (u64, u64) { self.underlying.transmitted_bytes() } }
{ debug!(LOGGER, "Invalid {:?} message: {}", msg_type, e); return Err(Error::Serialization(e)); }
conditional_block
client.rs
use std::thread; use std::net::SocketAddr; use std::net::ToSocketAddrs; use std::sync::mpsc; use std::sync::Arc; use futures; use futures::Future; use futures::stream::Stream; use tokio_core; use tokio_core::reactor; use solicit::http::HttpScheme; use solicit::http::HttpError; use solicit::http::Header; use solicit::http::StaticHeader; use method::MethodDescriptor; use error::*; use result::*; use futures_misc::*; use futures_grpc::*; use grpc::*; use http_client::*; use solicit_misc::*; use assert_types::*; trait GrpcResponseHandlerTrait : Send +'static + HttpClientResponseHandler { } struct GrpcResponseHandlerTyped<Req : Send +'static, Resp : Send +'static> { method: Arc<MethodDescriptor<Req, Resp>>, complete: tokio_core::channel::Sender<ResultOrEof<Resp, GrpcError>>, remaining_response: Vec<u8>, } impl<Req : Send +'static, Resp : Send +'static> GrpcResponseHandlerTrait for GrpcResponseHandlerTyped<Req, Resp> { } impl<Req : Send +'static, Resp : Send +'static> HttpClientResponseHandler for GrpcResponseHandlerTyped<Req, Resp> { fn headers(&mut self, headers: Vec<StaticHeader>) -> bool { println!("client: received headers"); if slice_get_header(&headers, ":status")!= Some("200") { if let Some(message) = slice_get_header(&headers, HEADER_GRPC_MESSAGE) { self.complete.send(ResultOrEof::Error(GrpcError::GrpcMessage(GrpcMessageError { grpc_message: message.to_owned() }))).unwrap(); } else { self.complete.send(ResultOrEof::Error(GrpcError::Other("not 200"))).unwrap(); } false } else { true } } fn data_frame(&mut self, chunk: Vec<u8>) -> bool { self.remaining_response.extend(&chunk); loop { let len = match parse_grpc_frame(&self.remaining_response) { Err(e) => { self.complete.send(ResultOrEof::Error(e)).unwrap(); return false; } Ok(None) => break, Ok(Some((message, len))) => { let resp = self.method.resp_marshaller.read(&message); self.complete.send(From::from(resp)).ok(); len } }; self.remaining_response.drain(..len); } true } fn trailers(&mut self, headers: Vec<StaticHeader>) -> bool { let _status_200 = slice_get_header(&headers, ":status") == Some("200"); let grpc_status_0 = slice_get_header(&headers, HEADER_GRPC_STATUS) == Some("0"); if /* status_200 && */ grpc_status_0 { true } else { if let Some(message) = slice_get_header(&headers, HEADER_GRPC_MESSAGE) { self.complete.send(ResultOrEof::Error(GrpcError::GrpcMessage(GrpcMessageError { grpc_message: message.to_owned() }))).unwrap(); } else { self.complete.send(ResultOrEof::Error(GrpcError::Other("not xxx"))).unwrap(); } false } } fn end(&mut self) { self.complete.send(ResultOrEof::Eof).unwrap(); } } struct GrpcResponseHandler { tr: Box<GrpcResponseHandlerTrait>, } impl HttpClientResponseHandler for GrpcResponseHandler { fn headers(&mut self, headers: Vec<StaticHeader>) -> bool { self.tr.headers(headers)
fn data_frame(&mut self, chunk: Vec<u8>) -> bool { self.tr.data_frame(chunk) } fn trailers(&mut self, headers: Vec<StaticHeader>) -> bool { self.tr.trailers(headers) } fn end(&mut self) { self.tr.end() } } // Data sent from event loop to GrpcClient struct LoopToClient { // used only once to send shutdown signal shutdown_tx: tokio_core::channel::Sender<()>, loop_handle: reactor::Remote, http_conn: Arc<HttpClientConnectionAsync<GrpcResponseHandler>>, } fn _assert_loop_to_client() { assert_send::<reactor::Remote>(); assert_send::<HttpClientConnectionAsync<GrpcResponseHandler>>(); assert_send::<HttpClientConnectionAsync<GrpcResponseHandler>>(); assert_sync::<HttpClientConnectionAsync<GrpcResponseHandler>>(); assert_send::<Arc<HttpClientConnectionAsync<GrpcResponseHandler>>>(); assert_send::<tokio_core::channel::Sender<()>>(); assert_send::<LoopToClient>(); } /// gRPC client implementation. /// Used by generated code. pub struct GrpcClient { loop_to_client: LoopToClient, thread_join_handle: Option<thread::JoinHandle<()>>, host: String, http_scheme: HttpScheme, } impl GrpcClient { /// Create a client connected to specified host and port. pub fn new(host: &str, port: u16) -> GrpcResult<GrpcClient> { // TODO: sync // TODO: try connect to all addrs let socket_addr = try!((host, port).to_socket_addrs()).next().unwrap(); // We need some data back from event loop. // This channel is used to exchange that data let (get_from_loop_tx, get_from_loop_rx) = mpsc::channel(); // Start event loop. let join_handle = thread::spawn(move || { run_client_event_loop(socket_addr, get_from_loop_tx); }); // Get back call channel and shutdown channel. let loop_to_client = try!(get_from_loop_rx.recv() .map_err(|_| GrpcError::Other("get response from loop"))); Ok(GrpcClient { loop_to_client: loop_to_client, thread_join_handle: Some(join_handle), host: host.to_owned(), http_scheme: HttpScheme::Http, }) } pub fn new_resp_channel<Resp : Send +'static>(&self) -> futures::Oneshot<(tokio_core::channel::Sender<ResultOrEof<Resp, GrpcError>>, GrpcStreamSend<Resp>)> { let (one_sender, one_receiver) = futures::oneshot(); self.loop_to_client.loop_handle.spawn(move |handle| { let (sender, receiver) = tokio_core::channel::channel(&handle).unwrap(); let receiver: GrpcStreamSend<ResultOrEof<Resp, GrpcError>> = Box::new(receiver.map_err(GrpcError::from)); let receiver: GrpcStreamSend<Resp> = Box::new(stream_with_eof_and_error(receiver)); one_sender.complete((sender, receiver)); futures::finished(()) }); one_receiver } pub fn call_impl<Req : Send +'static, Resp : Send +'static>(&self, req: GrpcStreamSend<Req>, method: Arc<MethodDescriptor<Req, Resp>>) -> GrpcStreamSend<Resp> { let host = self.host.clone(); let http_scheme = self.http_scheme.clone(); let http_conn = self.loop_to_client.http_conn.clone(); // A channel to send response back to caller let future = self.new_resp_channel().map_err(GrpcError::from).and_then(move |(complete, receiver)| { let headers = vec![ Header::new(":method", "POST"), Header::new(":path", method.name.clone()), Header::new(":authority", host.clone()), Header::new(":scheme", http_scheme.as_bytes()), ]; let request_frames = { let method = method.clone(); req .and_then(move |req| { let grpc_frame = try!(method.req_marshaller.write(&req)); Ok(write_grpc_frame_to_vec(&grpc_frame)) }) .map_err(|e| HttpError::Other(Box::new(e))) }; let start_request = http_conn.start_request( headers, Box::new(request_frames), GrpcResponseHandler { tr: Box::new(GrpcResponseHandlerTyped { method: method.clone(), complete: complete, remaining_response: Vec::new(), }), } ).map_err(GrpcError::from); let receiver: GrpcStreamSend<Resp> = receiver; start_request.map(move |()| receiver) }); let s: GrpcStreamSend<Resp> = future_flatten_to_stream(future); s } pub fn call_unary<Req : Send +'static, Resp : Send +'static>(&self, req: Req, method: Arc<MethodDescriptor<Req, Resp>>) -> GrpcFutureSend<Resp> { stream_single_send(self.call_impl(Box::new(stream_once_send(req)), method)) } pub fn call_server_streaming<Req : Send +'static, Resp : Send +'static>(&self, req: Req, method: Arc<MethodDescriptor<Req, Resp>>) -> GrpcStreamSend<Resp> { self.call_impl(stream_once_send(req).boxed(), method) } pub fn call_client_streaming<Req : Send +'static, Resp : Send +'static>(&self, req: GrpcStreamSend<Req>, method: Arc<MethodDescriptor<Req, Resp>>) -> GrpcFutureSend<Resp> { stream_single_send(self.call_impl(req, method)) } pub fn call_bidi<Req : Send +'static, Resp : Send +'static>(&self, req: GrpcStreamSend<Req>, method: Arc<MethodDescriptor<Req, Resp>>) -> GrpcStreamSend<Resp> { self.call_impl(req, method) } } // We shutdown client in destructor. impl Drop for GrpcClient { fn drop(&mut self) { // ignore error because even loop may be already dead self.loop_to_client.shutdown_tx.send(()).ok(); // do not ignore errors because we own event loop thread self.thread_join_handle.take().expect("handle.take") .join().expect("join thread"); } } // Event loop entry point fn run_client_event_loop( socket_addr: SocketAddr, send_to_back: mpsc::Sender<LoopToClient>) { // Create an event loop. let mut lp = reactor::Core::new().unwrap(); // Create a channel to receive shutdown signal. let (shutdown_tx, shutdown_rx) = tokio_core::channel::channel(&lp.handle()).unwrap(); let (http_conn, http_conn_future) = HttpClientConnectionAsync::new(lp.handle(), &socket_addr); let http_conn_future: GrpcFuture<_> = Box::new(http_conn_future.map_err(GrpcError::from)); // Send channels back to GrpcClient send_to_back .send(LoopToClient { shutdown_tx: shutdown_tx, loop_handle: lp.remote(), http_conn: Arc::new(http_conn), }) .expect("send back"); let shutdown = shutdown_rx.into_future().map_err(|(e, _)| GrpcError::from(e)).and_then(move |_| { // Must complete with error, // so `join` with this future cancels another future. futures::failed::<(), _>(GrpcError::Other("shutdown")) }); // Wait for either completion of connection (i. e. error) // or shutdown signal. let done = http_conn_future.join(shutdown); // TODO: do not ignore error lp.run(done).ok(); }
}
random_line_split
client.rs
use std::thread; use std::net::SocketAddr; use std::net::ToSocketAddrs; use std::sync::mpsc; use std::sync::Arc; use futures; use futures::Future; use futures::stream::Stream; use tokio_core; use tokio_core::reactor; use solicit::http::HttpScheme; use solicit::http::HttpError; use solicit::http::Header; use solicit::http::StaticHeader; use method::MethodDescriptor; use error::*; use result::*; use futures_misc::*; use futures_grpc::*; use grpc::*; use http_client::*; use solicit_misc::*; use assert_types::*; trait GrpcResponseHandlerTrait : Send +'static + HttpClientResponseHandler { } struct GrpcResponseHandlerTyped<Req : Send +'static, Resp : Send +'static> { method: Arc<MethodDescriptor<Req, Resp>>, complete: tokio_core::channel::Sender<ResultOrEof<Resp, GrpcError>>, remaining_response: Vec<u8>, } impl<Req : Send +'static, Resp : Send +'static> GrpcResponseHandlerTrait for GrpcResponseHandlerTyped<Req, Resp> { } impl<Req : Send +'static, Resp : Send +'static> HttpClientResponseHandler for GrpcResponseHandlerTyped<Req, Resp> { fn headers(&mut self, headers: Vec<StaticHeader>) -> bool { println!("client: received headers"); if slice_get_header(&headers, ":status")!= Some("200") { if let Some(message) = slice_get_header(&headers, HEADER_GRPC_MESSAGE) { self.complete.send(ResultOrEof::Error(GrpcError::GrpcMessage(GrpcMessageError { grpc_message: message.to_owned() }))).unwrap(); } else { self.complete.send(ResultOrEof::Error(GrpcError::Other("not 200"))).unwrap(); } false } else { true } } fn data_frame(&mut self, chunk: Vec<u8>) -> bool { self.remaining_response.extend(&chunk); loop { let len = match parse_grpc_frame(&self.remaining_response) { Err(e) => { self.complete.send(ResultOrEof::Error(e)).unwrap(); return false; } Ok(None) => break, Ok(Some((message, len))) => { let resp = self.method.resp_marshaller.read(&message); self.complete.send(From::from(resp)).ok(); len } }; self.remaining_response.drain(..len); } true } fn trailers(&mut self, headers: Vec<StaticHeader>) -> bool { let _status_200 = slice_get_header(&headers, ":status") == Some("200"); let grpc_status_0 = slice_get_header(&headers, HEADER_GRPC_STATUS) == Some("0"); if /* status_200 && */ grpc_status_0 { true } else { if let Some(message) = slice_get_header(&headers, HEADER_GRPC_MESSAGE) { self.complete.send(ResultOrEof::Error(GrpcError::GrpcMessage(GrpcMessageError { grpc_message: message.to_owned() }))).unwrap(); } else { self.complete.send(ResultOrEof::Error(GrpcError::Other("not xxx"))).unwrap(); } false } } fn end(&mut self) { self.complete.send(ResultOrEof::Eof).unwrap(); } } struct GrpcResponseHandler { tr: Box<GrpcResponseHandlerTrait>, } impl HttpClientResponseHandler for GrpcResponseHandler { fn headers(&mut self, headers: Vec<StaticHeader>) -> bool { self.tr.headers(headers) } fn data_frame(&mut self, chunk: Vec<u8>) -> bool { self.tr.data_frame(chunk) } fn trailers(&mut self, headers: Vec<StaticHeader>) -> bool { self.tr.trailers(headers) } fn end(&mut self) { self.tr.end() } } // Data sent from event loop to GrpcClient struct LoopToClient { // used only once to send shutdown signal shutdown_tx: tokio_core::channel::Sender<()>, loop_handle: reactor::Remote, http_conn: Arc<HttpClientConnectionAsync<GrpcResponseHandler>>, } fn _assert_loop_to_client() { assert_send::<reactor::Remote>(); assert_send::<HttpClientConnectionAsync<GrpcResponseHandler>>(); assert_send::<HttpClientConnectionAsync<GrpcResponseHandler>>(); assert_sync::<HttpClientConnectionAsync<GrpcResponseHandler>>(); assert_send::<Arc<HttpClientConnectionAsync<GrpcResponseHandler>>>(); assert_send::<tokio_core::channel::Sender<()>>(); assert_send::<LoopToClient>(); } /// gRPC client implementation. /// Used by generated code. pub struct GrpcClient { loop_to_client: LoopToClient, thread_join_handle: Option<thread::JoinHandle<()>>, host: String, http_scheme: HttpScheme, } impl GrpcClient { /// Create a client connected to specified host and port. pub fn new(host: &str, port: u16) -> GrpcResult<GrpcClient> { // TODO: sync // TODO: try connect to all addrs let socket_addr = try!((host, port).to_socket_addrs()).next().unwrap(); // We need some data back from event loop. // This channel is used to exchange that data let (get_from_loop_tx, get_from_loop_rx) = mpsc::channel(); // Start event loop. let join_handle = thread::spawn(move || { run_client_event_loop(socket_addr, get_from_loop_tx); }); // Get back call channel and shutdown channel. let loop_to_client = try!(get_from_loop_rx.recv() .map_err(|_| GrpcError::Other("get response from loop"))); Ok(GrpcClient { loop_to_client: loop_to_client, thread_join_handle: Some(join_handle), host: host.to_owned(), http_scheme: HttpScheme::Http, }) } pub fn new_resp_channel<Resp : Send +'static>(&self) -> futures::Oneshot<(tokio_core::channel::Sender<ResultOrEof<Resp, GrpcError>>, GrpcStreamSend<Resp>)> { let (one_sender, one_receiver) = futures::oneshot(); self.loop_to_client.loop_handle.spawn(move |handle| { let (sender, receiver) = tokio_core::channel::channel(&handle).unwrap(); let receiver: GrpcStreamSend<ResultOrEof<Resp, GrpcError>> = Box::new(receiver.map_err(GrpcError::from)); let receiver: GrpcStreamSend<Resp> = Box::new(stream_with_eof_and_error(receiver)); one_sender.complete((sender, receiver)); futures::finished(()) }); one_receiver } pub fn call_impl<Req : Send +'static, Resp : Send +'static>(&self, req: GrpcStreamSend<Req>, method: Arc<MethodDescriptor<Req, Resp>>) -> GrpcStreamSend<Resp> { let host = self.host.clone(); let http_scheme = self.http_scheme.clone(); let http_conn = self.loop_to_client.http_conn.clone(); // A channel to send response back to caller let future = self.new_resp_channel().map_err(GrpcError::from).and_then(move |(complete, receiver)| { let headers = vec![ Header::new(":method", "POST"), Header::new(":path", method.name.clone()), Header::new(":authority", host.clone()), Header::new(":scheme", http_scheme.as_bytes()), ]; let request_frames = { let method = method.clone(); req .and_then(move |req| { let grpc_frame = try!(method.req_marshaller.write(&req)); Ok(write_grpc_frame_to_vec(&grpc_frame)) }) .map_err(|e| HttpError::Other(Box::new(e))) }; let start_request = http_conn.start_request( headers, Box::new(request_frames), GrpcResponseHandler { tr: Box::new(GrpcResponseHandlerTyped { method: method.clone(), complete: complete, remaining_response: Vec::new(), }), } ).map_err(GrpcError::from); let receiver: GrpcStreamSend<Resp> = receiver; start_request.map(move |()| receiver) }); let s: GrpcStreamSend<Resp> = future_flatten_to_stream(future); s } pub fn call_unary<Req : Send +'static, Resp : Send +'static>(&self, req: Req, method: Arc<MethodDescriptor<Req, Resp>>) -> GrpcFutureSend<Resp> { stream_single_send(self.call_impl(Box::new(stream_once_send(req)), method)) } pub fn call_server_streaming<Req : Send +'static, Resp : Send +'static>(&self, req: Req, method: Arc<MethodDescriptor<Req, Resp>>) -> GrpcStreamSend<Resp> { self.call_impl(stream_once_send(req).boxed(), method) } pub fn
<Req : Send +'static, Resp : Send +'static>(&self, req: GrpcStreamSend<Req>, method: Arc<MethodDescriptor<Req, Resp>>) -> GrpcFutureSend<Resp> { stream_single_send(self.call_impl(req, method)) } pub fn call_bidi<Req : Send +'static, Resp : Send +'static>(&self, req: GrpcStreamSend<Req>, method: Arc<MethodDescriptor<Req, Resp>>) -> GrpcStreamSend<Resp> { self.call_impl(req, method) } } // We shutdown client in destructor. impl Drop for GrpcClient { fn drop(&mut self) { // ignore error because even loop may be already dead self.loop_to_client.shutdown_tx.send(()).ok(); // do not ignore errors because we own event loop thread self.thread_join_handle.take().expect("handle.take") .join().expect("join thread"); } } // Event loop entry point fn run_client_event_loop( socket_addr: SocketAddr, send_to_back: mpsc::Sender<LoopToClient>) { // Create an event loop. let mut lp = reactor::Core::new().unwrap(); // Create a channel to receive shutdown signal. let (shutdown_tx, shutdown_rx) = tokio_core::channel::channel(&lp.handle()).unwrap(); let (http_conn, http_conn_future) = HttpClientConnectionAsync::new(lp.handle(), &socket_addr); let http_conn_future: GrpcFuture<_> = Box::new(http_conn_future.map_err(GrpcError::from)); // Send channels back to GrpcClient send_to_back .send(LoopToClient { shutdown_tx: shutdown_tx, loop_handle: lp.remote(), http_conn: Arc::new(http_conn), }) .expect("send back"); let shutdown = shutdown_rx.into_future().map_err(|(e, _)| GrpcError::from(e)).and_then(move |_| { // Must complete with error, // so `join` with this future cancels another future. futures::failed::<(), _>(GrpcError::Other("shutdown")) }); // Wait for either completion of connection (i. e. error) // or shutdown signal. let done = http_conn_future.join(shutdown); // TODO: do not ignore error lp.run(done).ok(); }
call_client_streaming
identifier_name
client.rs
use std::thread; use std::net::SocketAddr; use std::net::ToSocketAddrs; use std::sync::mpsc; use std::sync::Arc; use futures; use futures::Future; use futures::stream::Stream; use tokio_core; use tokio_core::reactor; use solicit::http::HttpScheme; use solicit::http::HttpError; use solicit::http::Header; use solicit::http::StaticHeader; use method::MethodDescriptor; use error::*; use result::*; use futures_misc::*; use futures_grpc::*; use grpc::*; use http_client::*; use solicit_misc::*; use assert_types::*; trait GrpcResponseHandlerTrait : Send +'static + HttpClientResponseHandler { } struct GrpcResponseHandlerTyped<Req : Send +'static, Resp : Send +'static> { method: Arc<MethodDescriptor<Req, Resp>>, complete: tokio_core::channel::Sender<ResultOrEof<Resp, GrpcError>>, remaining_response: Vec<u8>, } impl<Req : Send +'static, Resp : Send +'static> GrpcResponseHandlerTrait for GrpcResponseHandlerTyped<Req, Resp> { } impl<Req : Send +'static, Resp : Send +'static> HttpClientResponseHandler for GrpcResponseHandlerTyped<Req, Resp> { fn headers(&mut self, headers: Vec<StaticHeader>) -> bool { println!("client: received headers"); if slice_get_header(&headers, ":status")!= Some("200") { if let Some(message) = slice_get_header(&headers, HEADER_GRPC_MESSAGE) { self.complete.send(ResultOrEof::Error(GrpcError::GrpcMessage(GrpcMessageError { grpc_message: message.to_owned() }))).unwrap(); } else { self.complete.send(ResultOrEof::Error(GrpcError::Other("not 200"))).unwrap(); } false } else { true } } fn data_frame(&mut self, chunk: Vec<u8>) -> bool { self.remaining_response.extend(&chunk); loop { let len = match parse_grpc_frame(&self.remaining_response) { Err(e) => { self.complete.send(ResultOrEof::Error(e)).unwrap(); return false; } Ok(None) => break, Ok(Some((message, len))) => { let resp = self.method.resp_marshaller.read(&message); self.complete.send(From::from(resp)).ok(); len } }; self.remaining_response.drain(..len); } true } fn trailers(&mut self, headers: Vec<StaticHeader>) -> bool { let _status_200 = slice_get_header(&headers, ":status") == Some("200"); let grpc_status_0 = slice_get_header(&headers, HEADER_GRPC_STATUS) == Some("0"); if /* status_200 && */ grpc_status_0 { true } else { if let Some(message) = slice_get_header(&headers, HEADER_GRPC_MESSAGE) { self.complete.send(ResultOrEof::Error(GrpcError::GrpcMessage(GrpcMessageError { grpc_message: message.to_owned() }))).unwrap(); } else { self.complete.send(ResultOrEof::Error(GrpcError::Other("not xxx"))).unwrap(); } false } } fn end(&mut self) { self.complete.send(ResultOrEof::Eof).unwrap(); } } struct GrpcResponseHandler { tr: Box<GrpcResponseHandlerTrait>, } impl HttpClientResponseHandler for GrpcResponseHandler { fn headers(&mut self, headers: Vec<StaticHeader>) -> bool { self.tr.headers(headers) } fn data_frame(&mut self, chunk: Vec<u8>) -> bool { self.tr.data_frame(chunk) } fn trailers(&mut self, headers: Vec<StaticHeader>) -> bool { self.tr.trailers(headers) } fn end(&mut self) { self.tr.end() } } // Data sent from event loop to GrpcClient struct LoopToClient { // used only once to send shutdown signal shutdown_tx: tokio_core::channel::Sender<()>, loop_handle: reactor::Remote, http_conn: Arc<HttpClientConnectionAsync<GrpcResponseHandler>>, } fn _assert_loop_to_client() { assert_send::<reactor::Remote>(); assert_send::<HttpClientConnectionAsync<GrpcResponseHandler>>(); assert_send::<HttpClientConnectionAsync<GrpcResponseHandler>>(); assert_sync::<HttpClientConnectionAsync<GrpcResponseHandler>>(); assert_send::<Arc<HttpClientConnectionAsync<GrpcResponseHandler>>>(); assert_send::<tokio_core::channel::Sender<()>>(); assert_send::<LoopToClient>(); } /// gRPC client implementation. /// Used by generated code. pub struct GrpcClient { loop_to_client: LoopToClient, thread_join_handle: Option<thread::JoinHandle<()>>, host: String, http_scheme: HttpScheme, } impl GrpcClient { /// Create a client connected to specified host and port. pub fn new(host: &str, port: u16) -> GrpcResult<GrpcClient> { // TODO: sync // TODO: try connect to all addrs let socket_addr = try!((host, port).to_socket_addrs()).next().unwrap(); // We need some data back from event loop. // This channel is used to exchange that data let (get_from_loop_tx, get_from_loop_rx) = mpsc::channel(); // Start event loop. let join_handle = thread::spawn(move || { run_client_event_loop(socket_addr, get_from_loop_tx); }); // Get back call channel and shutdown channel. let loop_to_client = try!(get_from_loop_rx.recv() .map_err(|_| GrpcError::Other("get response from loop"))); Ok(GrpcClient { loop_to_client: loop_to_client, thread_join_handle: Some(join_handle), host: host.to_owned(), http_scheme: HttpScheme::Http, }) } pub fn new_resp_channel<Resp : Send +'static>(&self) -> futures::Oneshot<(tokio_core::channel::Sender<ResultOrEof<Resp, GrpcError>>, GrpcStreamSend<Resp>)> { let (one_sender, one_receiver) = futures::oneshot(); self.loop_to_client.loop_handle.spawn(move |handle| { let (sender, receiver) = tokio_core::channel::channel(&handle).unwrap(); let receiver: GrpcStreamSend<ResultOrEof<Resp, GrpcError>> = Box::new(receiver.map_err(GrpcError::from)); let receiver: GrpcStreamSend<Resp> = Box::new(stream_with_eof_and_error(receiver)); one_sender.complete((sender, receiver)); futures::finished(()) }); one_receiver } pub fn call_impl<Req : Send +'static, Resp : Send +'static>(&self, req: GrpcStreamSend<Req>, method: Arc<MethodDescriptor<Req, Resp>>) -> GrpcStreamSend<Resp> { let host = self.host.clone(); let http_scheme = self.http_scheme.clone(); let http_conn = self.loop_to_client.http_conn.clone(); // A channel to send response back to caller let future = self.new_resp_channel().map_err(GrpcError::from).and_then(move |(complete, receiver)| { let headers = vec![ Header::new(":method", "POST"), Header::new(":path", method.name.clone()), Header::new(":authority", host.clone()), Header::new(":scheme", http_scheme.as_bytes()), ]; let request_frames = { let method = method.clone(); req .and_then(move |req| { let grpc_frame = try!(method.req_marshaller.write(&req)); Ok(write_grpc_frame_to_vec(&grpc_frame)) }) .map_err(|e| HttpError::Other(Box::new(e))) }; let start_request = http_conn.start_request( headers, Box::new(request_frames), GrpcResponseHandler { tr: Box::new(GrpcResponseHandlerTyped { method: method.clone(), complete: complete, remaining_response: Vec::new(), }), } ).map_err(GrpcError::from); let receiver: GrpcStreamSend<Resp> = receiver; start_request.map(move |()| receiver) }); let s: GrpcStreamSend<Resp> = future_flatten_to_stream(future); s } pub fn call_unary<Req : Send +'static, Resp : Send +'static>(&self, req: Req, method: Arc<MethodDescriptor<Req, Resp>>) -> GrpcFutureSend<Resp> { stream_single_send(self.call_impl(Box::new(stream_once_send(req)), method)) } pub fn call_server_streaming<Req : Send +'static, Resp : Send +'static>(&self, req: Req, method: Arc<MethodDescriptor<Req, Resp>>) -> GrpcStreamSend<Resp> { self.call_impl(stream_once_send(req).boxed(), method) } pub fn call_client_streaming<Req : Send +'static, Resp : Send +'static>(&self, req: GrpcStreamSend<Req>, method: Arc<MethodDescriptor<Req, Resp>>) -> GrpcFutureSend<Resp> { stream_single_send(self.call_impl(req, method)) } pub fn call_bidi<Req : Send +'static, Resp : Send +'static>(&self, req: GrpcStreamSend<Req>, method: Arc<MethodDescriptor<Req, Resp>>) -> GrpcStreamSend<Resp> { self.call_impl(req, method) } } // We shutdown client in destructor. impl Drop for GrpcClient { fn drop(&mut self)
} // Event loop entry point fn run_client_event_loop( socket_addr: SocketAddr, send_to_back: mpsc::Sender<LoopToClient>) { // Create an event loop. let mut lp = reactor::Core::new().unwrap(); // Create a channel to receive shutdown signal. let (shutdown_tx, shutdown_rx) = tokio_core::channel::channel(&lp.handle()).unwrap(); let (http_conn, http_conn_future) = HttpClientConnectionAsync::new(lp.handle(), &socket_addr); let http_conn_future: GrpcFuture<_> = Box::new(http_conn_future.map_err(GrpcError::from)); // Send channels back to GrpcClient send_to_back .send(LoopToClient { shutdown_tx: shutdown_tx, loop_handle: lp.remote(), http_conn: Arc::new(http_conn), }) .expect("send back"); let shutdown = shutdown_rx.into_future().map_err(|(e, _)| GrpcError::from(e)).and_then(move |_| { // Must complete with error, // so `join` with this future cancels another future. futures::failed::<(), _>(GrpcError::Other("shutdown")) }); // Wait for either completion of connection (i. e. error) // or shutdown signal. let done = http_conn_future.join(shutdown); // TODO: do not ignore error lp.run(done).ok(); }
{ // ignore error because even loop may be already dead self.loop_to_client.shutdown_tx.send(()).ok(); // do not ignore errors because we own event loop thread self.thread_join_handle.take().expect("handle.take") .join().expect("join thread"); }
identifier_body
fixed.rs
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. use crate::array::PrimitiveArray; use crate::null_sentinel; use arrow_array::builder::BufferBuilder; use arrow_array::{ArrowPrimitiveType, BooleanArray, FixedSizeBinaryArray}; use arrow_buffer::{bit_util, i256, ArrowNativeType, Buffer, MutableBuffer}; use arrow_data::{ArrayData, ArrayDataBuilder}; use arrow_schema::{DataType, SortOptions}; use half::f16; pub trait FromSlice { fn from_slice(slice: &[u8], invert: bool) -> Self; } impl<const N: usize> FromSlice for [u8; N] { #[inline] fn from_slice(slice: &[u8], invert: bool) -> Self { let mut t: Self = slice.try_into().unwrap(); if invert { t.iter_mut().for_each(|o| *o =!*o); } t } } /// Encodes a value of a particular fixed width type into bytes according to the rules /// described on [`super::RowConverter`] pub trait FixedLengthEncoding: Copy { const ENCODED_LEN: usize = 1 + std::mem::size_of::<Self::Encoded>(); type Encoded: Sized + Copy + FromSlice + AsRef<[u8]> + AsMut<[u8]>; fn encode(self) -> Self::Encoded; fn decode(encoded: Self::Encoded) -> Self; } impl FixedLengthEncoding for bool { type Encoded = [u8; 1]; fn encode(self) -> [u8; 1] { [self as u8] } fn decode(encoded: Self::Encoded) -> Self { encoded[0]!= 0 } } macro_rules! encode_signed { ($n:expr, $t:ty) => { impl FixedLengthEncoding for $t { type Encoded = [u8; $n]; fn encode(self) -> [u8; $n] { let mut b = self.to_be_bytes(); // Toggle top "sign" bit to ensure consistent sort order b[0] ^= 0x80; b } fn decode(mut encoded: Self::Encoded) -> Self { // Toggle top "sign" bit encoded[0] ^= 0x80; Self::from_be_bytes(encoded) } } }; } encode_signed!(1, i8); encode_signed!(2, i16); encode_signed!(4, i32); encode_signed!(8, i64); encode_signed!(16, i128); encode_signed!(32, i256); macro_rules! encode_unsigned { ($n:expr, $t:ty) => { impl FixedLengthEncoding for $t { type Encoded = [u8; $n]; fn encode(self) -> [u8; $n] { self.to_be_bytes() } fn decode(encoded: Self::Encoded) -> Self { Self::from_be_bytes(encoded) } } }; } encode_unsigned!(1, u8); encode_unsigned!(2, u16); encode_unsigned!(4, u32); encode_unsigned!(8, u64); impl FixedLengthEncoding for f16 { type Encoded = [u8; 2]; fn encode(self) -> [u8; 2] { // https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260 let s = self.to_bits() as i16; let val = s ^ (((s >> 15) as u16) >> 1) as i16; val.encode() } fn decode(encoded: Self::Encoded) -> Self { let bits = i16::decode(encoded); let val = bits ^ (((bits >> 15) as u16) >> 1) as i16; Self::from_bits(val as u16) } } impl FixedLengthEncoding for f32 { type Encoded = [u8; 4]; fn encode(self) -> [u8; 4] { // https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260 let s = self.to_bits() as i32; let val = s ^ (((s >> 31) as u32) >> 1) as i32; val.encode() } fn decode(encoded: Self::Encoded) -> Self { let bits = i32::decode(encoded); let val = bits ^ (((bits >> 31) as u32) >> 1) as i32; Self::from_bits(val as u32) } } impl FixedLengthEncoding for f64 { type Encoded = [u8; 8]; fn encode(self) -> [u8; 8] { // https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260 let s = self.to_bits() as i64; let val = s ^ (((s >> 63) as u64) >> 1) as i64; val.encode() } fn decode(encoded: Self::Encoded) -> Self { let bits = i64::decode(encoded); let val = bits ^ (((bits >> 63) as u64) >> 1) as i64; Self::from_bits(val as u64) } } /// Returns the total encoded length (including null byte) for a value of type `T::Native` pub const fn
<T>(_col: &PrimitiveArray<T>) -> usize where T: ArrowPrimitiveType, T::Native: FixedLengthEncoding, { T::Native::ENCODED_LEN } /// Fixed width types are encoded as /// /// - 1 byte `0` if null or `1` if valid /// - bytes of [`FixedLengthEncoding`] pub fn encode<T: FixedLengthEncoding, I: IntoIterator<Item = Option<T>>>( data: &mut [u8], offsets: &mut [usize], i: I, opts: SortOptions, ) { for (offset, maybe_val) in offsets.iter_mut().skip(1).zip(i) { let end_offset = *offset + T::ENCODED_LEN; if let Some(val) = maybe_val { let to_write = &mut data[*offset..end_offset]; to_write[0] = 1; let mut encoded = val.encode(); if opts.descending { // Flip bits to reverse order encoded.as_mut().iter_mut().for_each(|v| *v =!*v) } to_write[1..].copy_from_slice(encoded.as_ref()) } else { data[*offset] = null_sentinel(opts); } *offset = end_offset; } } pub fn encode_fixed_size_binary( data: &mut [u8], offsets: &mut [usize], array: &FixedSizeBinaryArray, opts: SortOptions, ) { let len = array.value_length() as usize; for (offset, maybe_val) in offsets.iter_mut().skip(1).zip(array.iter()) { let end_offset = *offset + len + 1; if let Some(val) = maybe_val { let to_write = &mut data[*offset..end_offset]; to_write[0] = 1; to_write[1..].copy_from_slice(&val[..len]); if opts.descending { // Flip bits to reverse order to_write[1..1 + len].iter_mut().for_each(|v| *v =!*v) } } else { data[*offset] = null_sentinel(opts); } *offset = end_offset; } } /// Splits `len` bytes from `src` #[inline] fn split_off<'a>(src: &mut &'a [u8], len: usize) -> &'a [u8] { let v = &src[..len]; *src = &src[len..]; v } /// Decodes a `BooleanArray` from rows pub fn decode_bool(rows: &mut [&[u8]], options: SortOptions) -> BooleanArray { let true_val = match options.descending { true =>!1, false => 1, }; let len = rows.len(); let mut null_count = 0; let mut nulls = MutableBuffer::new(bit_util::ceil(len, 64) * 8); let mut values = MutableBuffer::new(bit_util::ceil(len, 64) * 8); let chunks = len / 64; let remainder = len % 64; for chunk in 0..chunks { let mut null_packed = 0; let mut values_packed = 0; for bit_idx in 0..64 { let i = split_off(&mut rows[bit_idx + chunk * 64], 2); let (null, value) = (i[0] == 1, i[1] == true_val); null_count +=!null as usize; null_packed |= (null as u64) << bit_idx; values_packed |= (value as u64) << bit_idx; } nulls.push(null_packed); values.push(values_packed); } if remainder!= 0 { let mut null_packed = 0; let mut values_packed = 0; for bit_idx in 0..remainder { let i = split_off(&mut rows[bit_idx + chunks * 64], 2); let (null, value) = (i[0] == 1, i[1] == true_val); null_count +=!null as usize; null_packed |= (null as u64) << bit_idx; values_packed |= (value as u64) << bit_idx; } nulls.push(null_packed); values.push(values_packed); } let builder = ArrayDataBuilder::new(DataType::Boolean) .len(rows.len()) .null_count(null_count) .add_buffer(values.into()) .null_bit_buffer(Some(nulls.into())); // SAFETY: // Buffers are the correct length unsafe { BooleanArray::from(builder.build_unchecked()) } } /// Decodes a single byte from each row, interpreting `0x01` as a valid value /// and all other values as a null /// /// Returns the null count and null buffer pub fn decode_nulls(rows: &[&[u8]]) -> (usize, Buffer) { let mut null_count = 0; let buffer = MutableBuffer::collect_bool(rows.len(), |idx| { let valid = rows[idx][0] == 1; null_count +=!valid as usize; valid }) .into(); (null_count, buffer) } /// Decodes a `ArrayData` from rows based on the provided `FixedLengthEncoding` `T` /// /// # Safety /// /// `data_type` must be appropriate native type for `T` unsafe fn decode_fixed<T: FixedLengthEncoding + ArrowNativeType>( rows: &mut [&[u8]], data_type: DataType, options: SortOptions, ) -> ArrayData { let len = rows.len(); let mut values = BufferBuilder::<T>::new(len); let (null_count, nulls) = decode_nulls(rows); for row in rows { let i = split_off(row, T::ENCODED_LEN); let value = T::Encoded::from_slice(&i[1..], options.descending); values.append(T::decode(value)); } let builder = ArrayDataBuilder::new(data_type) .len(len) .null_count(null_count) .add_buffer(values.finish()) .null_bit_buffer(Some(nulls)); // SAFETY: Buffers correct length builder.build_unchecked() } /// Decodes a `PrimitiveArray` from rows pub fn decode_primitive<T: ArrowPrimitiveType>( rows: &mut [&[u8]], data_type: DataType, options: SortOptions, ) -> PrimitiveArray<T> where T::Native: FixedLengthEncoding, { assert!(PrimitiveArray::<T>::is_compatible(&data_type)); // SAFETY: // Validated data type above unsafe { decode_fixed::<T::Native>(rows, data_type, options).into() } } /// Decodes a `FixedLengthBinary` from rows pub fn decode_fixed_size_binary( rows: &mut [&[u8]], size: i32, options: SortOptions, ) -> FixedSizeBinaryArray { let len = rows.len(); let mut values = MutableBuffer::new(size as usize * rows.len()); let (null_count, nulls) = decode_nulls(rows); let encoded_len = size as usize + 1; for row in rows { let i = split_off(row, encoded_len); values.extend_from_slice(&i[1..]); } if options.descending { for v in values.as_slice_mut() { *v =!*v; } } let builder = ArrayDataBuilder::new(DataType::FixedSizeBinary(size)) .len(len) .null_count(null_count) .add_buffer(values.into()) .null_bit_buffer(Some(nulls)); // SAFETY: Buffers correct length unsafe { builder.build_unchecked().into() } }
encoded_len
identifier_name
fixed.rs
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. use crate::array::PrimitiveArray; use crate::null_sentinel; use arrow_array::builder::BufferBuilder; use arrow_array::{ArrowPrimitiveType, BooleanArray, FixedSizeBinaryArray}; use arrow_buffer::{bit_util, i256, ArrowNativeType, Buffer, MutableBuffer}; use arrow_data::{ArrayData, ArrayDataBuilder}; use arrow_schema::{DataType, SortOptions}; use half::f16; pub trait FromSlice { fn from_slice(slice: &[u8], invert: bool) -> Self; } impl<const N: usize> FromSlice for [u8; N] { #[inline] fn from_slice(slice: &[u8], invert: bool) -> Self { let mut t: Self = slice.try_into().unwrap(); if invert { t.iter_mut().for_each(|o| *o =!*o); } t } } /// Encodes a value of a particular fixed width type into bytes according to the rules /// described on [`super::RowConverter`] pub trait FixedLengthEncoding: Copy { const ENCODED_LEN: usize = 1 + std::mem::size_of::<Self::Encoded>(); type Encoded: Sized + Copy + FromSlice + AsRef<[u8]> + AsMut<[u8]>; fn encode(self) -> Self::Encoded; fn decode(encoded: Self::Encoded) -> Self; } impl FixedLengthEncoding for bool { type Encoded = [u8; 1]; fn encode(self) -> [u8; 1] { [self as u8] } fn decode(encoded: Self::Encoded) -> Self { encoded[0]!= 0 } } macro_rules! encode_signed { ($n:expr, $t:ty) => { impl FixedLengthEncoding for $t { type Encoded = [u8; $n]; fn encode(self) -> [u8; $n] { let mut b = self.to_be_bytes(); // Toggle top "sign" bit to ensure consistent sort order b[0] ^= 0x80; b } fn decode(mut encoded: Self::Encoded) -> Self { // Toggle top "sign" bit encoded[0] ^= 0x80; Self::from_be_bytes(encoded) } } }; } encode_signed!(1, i8); encode_signed!(2, i16); encode_signed!(4, i32); encode_signed!(8, i64); encode_signed!(16, i128); encode_signed!(32, i256); macro_rules! encode_unsigned { ($n:expr, $t:ty) => { impl FixedLengthEncoding for $t { type Encoded = [u8; $n]; fn encode(self) -> [u8; $n] { self.to_be_bytes() } fn decode(encoded: Self::Encoded) -> Self { Self::from_be_bytes(encoded) } } }; } encode_unsigned!(1, u8); encode_unsigned!(2, u16); encode_unsigned!(4, u32); encode_unsigned!(8, u64); impl FixedLengthEncoding for f16 { type Encoded = [u8; 2]; fn encode(self) -> [u8; 2] { // https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260 let s = self.to_bits() as i16; let val = s ^ (((s >> 15) as u16) >> 1) as i16; val.encode() } fn decode(encoded: Self::Encoded) -> Self { let bits = i16::decode(encoded); let val = bits ^ (((bits >> 15) as u16) >> 1) as i16; Self::from_bits(val as u16) } } impl FixedLengthEncoding for f32 { type Encoded = [u8; 4]; fn encode(self) -> [u8; 4] { // https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260 let s = self.to_bits() as i32; let val = s ^ (((s >> 31) as u32) >> 1) as i32; val.encode() } fn decode(encoded: Self::Encoded) -> Self { let bits = i32::decode(encoded); let val = bits ^ (((bits >> 31) as u32) >> 1) as i32; Self::from_bits(val as u32) } }
fn encode(self) -> [u8; 8] { // https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260 let s = self.to_bits() as i64; let val = s ^ (((s >> 63) as u64) >> 1) as i64; val.encode() } fn decode(encoded: Self::Encoded) -> Self { let bits = i64::decode(encoded); let val = bits ^ (((bits >> 63) as u64) >> 1) as i64; Self::from_bits(val as u64) } } /// Returns the total encoded length (including null byte) for a value of type `T::Native` pub const fn encoded_len<T>(_col: &PrimitiveArray<T>) -> usize where T: ArrowPrimitiveType, T::Native: FixedLengthEncoding, { T::Native::ENCODED_LEN } /// Fixed width types are encoded as /// /// - 1 byte `0` if null or `1` if valid /// - bytes of [`FixedLengthEncoding`] pub fn encode<T: FixedLengthEncoding, I: IntoIterator<Item = Option<T>>>( data: &mut [u8], offsets: &mut [usize], i: I, opts: SortOptions, ) { for (offset, maybe_val) in offsets.iter_mut().skip(1).zip(i) { let end_offset = *offset + T::ENCODED_LEN; if let Some(val) = maybe_val { let to_write = &mut data[*offset..end_offset]; to_write[0] = 1; let mut encoded = val.encode(); if opts.descending { // Flip bits to reverse order encoded.as_mut().iter_mut().for_each(|v| *v =!*v) } to_write[1..].copy_from_slice(encoded.as_ref()) } else { data[*offset] = null_sentinel(opts); } *offset = end_offset; } } pub fn encode_fixed_size_binary( data: &mut [u8], offsets: &mut [usize], array: &FixedSizeBinaryArray, opts: SortOptions, ) { let len = array.value_length() as usize; for (offset, maybe_val) in offsets.iter_mut().skip(1).zip(array.iter()) { let end_offset = *offset + len + 1; if let Some(val) = maybe_val { let to_write = &mut data[*offset..end_offset]; to_write[0] = 1; to_write[1..].copy_from_slice(&val[..len]); if opts.descending { // Flip bits to reverse order to_write[1..1 + len].iter_mut().for_each(|v| *v =!*v) } } else { data[*offset] = null_sentinel(opts); } *offset = end_offset; } } /// Splits `len` bytes from `src` #[inline] fn split_off<'a>(src: &mut &'a [u8], len: usize) -> &'a [u8] { let v = &src[..len]; *src = &src[len..]; v } /// Decodes a `BooleanArray` from rows pub fn decode_bool(rows: &mut [&[u8]], options: SortOptions) -> BooleanArray { let true_val = match options.descending { true =>!1, false => 1, }; let len = rows.len(); let mut null_count = 0; let mut nulls = MutableBuffer::new(bit_util::ceil(len, 64) * 8); let mut values = MutableBuffer::new(bit_util::ceil(len, 64) * 8); let chunks = len / 64; let remainder = len % 64; for chunk in 0..chunks { let mut null_packed = 0; let mut values_packed = 0; for bit_idx in 0..64 { let i = split_off(&mut rows[bit_idx + chunk * 64], 2); let (null, value) = (i[0] == 1, i[1] == true_val); null_count +=!null as usize; null_packed |= (null as u64) << bit_idx; values_packed |= (value as u64) << bit_idx; } nulls.push(null_packed); values.push(values_packed); } if remainder!= 0 { let mut null_packed = 0; let mut values_packed = 0; for bit_idx in 0..remainder { let i = split_off(&mut rows[bit_idx + chunks * 64], 2); let (null, value) = (i[0] == 1, i[1] == true_val); null_count +=!null as usize; null_packed |= (null as u64) << bit_idx; values_packed |= (value as u64) << bit_idx; } nulls.push(null_packed); values.push(values_packed); } let builder = ArrayDataBuilder::new(DataType::Boolean) .len(rows.len()) .null_count(null_count) .add_buffer(values.into()) .null_bit_buffer(Some(nulls.into())); // SAFETY: // Buffers are the correct length unsafe { BooleanArray::from(builder.build_unchecked()) } } /// Decodes a single byte from each row, interpreting `0x01` as a valid value /// and all other values as a null /// /// Returns the null count and null buffer pub fn decode_nulls(rows: &[&[u8]]) -> (usize, Buffer) { let mut null_count = 0; let buffer = MutableBuffer::collect_bool(rows.len(), |idx| { let valid = rows[idx][0] == 1; null_count +=!valid as usize; valid }) .into(); (null_count, buffer) } /// Decodes a `ArrayData` from rows based on the provided `FixedLengthEncoding` `T` /// /// # Safety /// /// `data_type` must be appropriate native type for `T` unsafe fn decode_fixed<T: FixedLengthEncoding + ArrowNativeType>( rows: &mut [&[u8]], data_type: DataType, options: SortOptions, ) -> ArrayData { let len = rows.len(); let mut values = BufferBuilder::<T>::new(len); let (null_count, nulls) = decode_nulls(rows); for row in rows { let i = split_off(row, T::ENCODED_LEN); let value = T::Encoded::from_slice(&i[1..], options.descending); values.append(T::decode(value)); } let builder = ArrayDataBuilder::new(data_type) .len(len) .null_count(null_count) .add_buffer(values.finish()) .null_bit_buffer(Some(nulls)); // SAFETY: Buffers correct length builder.build_unchecked() } /// Decodes a `PrimitiveArray` from rows pub fn decode_primitive<T: ArrowPrimitiveType>( rows: &mut [&[u8]], data_type: DataType, options: SortOptions, ) -> PrimitiveArray<T> where T::Native: FixedLengthEncoding, { assert!(PrimitiveArray::<T>::is_compatible(&data_type)); // SAFETY: // Validated data type above unsafe { decode_fixed::<T::Native>(rows, data_type, options).into() } } /// Decodes a `FixedLengthBinary` from rows pub fn decode_fixed_size_binary( rows: &mut [&[u8]], size: i32, options: SortOptions, ) -> FixedSizeBinaryArray { let len = rows.len(); let mut values = MutableBuffer::new(size as usize * rows.len()); let (null_count, nulls) = decode_nulls(rows); let encoded_len = size as usize + 1; for row in rows { let i = split_off(row, encoded_len); values.extend_from_slice(&i[1..]); } if options.descending { for v in values.as_slice_mut() { *v =!*v; } } let builder = ArrayDataBuilder::new(DataType::FixedSizeBinary(size)) .len(len) .null_count(null_count) .add_buffer(values.into()) .null_bit_buffer(Some(nulls)); // SAFETY: Buffers correct length unsafe { builder.build_unchecked().into() } }
impl FixedLengthEncoding for f64 { type Encoded = [u8; 8];
random_line_split
fixed.rs
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. use crate::array::PrimitiveArray; use crate::null_sentinel; use arrow_array::builder::BufferBuilder; use arrow_array::{ArrowPrimitiveType, BooleanArray, FixedSizeBinaryArray}; use arrow_buffer::{bit_util, i256, ArrowNativeType, Buffer, MutableBuffer}; use arrow_data::{ArrayData, ArrayDataBuilder}; use arrow_schema::{DataType, SortOptions}; use half::f16; pub trait FromSlice { fn from_slice(slice: &[u8], invert: bool) -> Self; } impl<const N: usize> FromSlice for [u8; N] { #[inline] fn from_slice(slice: &[u8], invert: bool) -> Self { let mut t: Self = slice.try_into().unwrap(); if invert { t.iter_mut().for_each(|o| *o =!*o); } t } } /// Encodes a value of a particular fixed width type into bytes according to the rules /// described on [`super::RowConverter`] pub trait FixedLengthEncoding: Copy { const ENCODED_LEN: usize = 1 + std::mem::size_of::<Self::Encoded>(); type Encoded: Sized + Copy + FromSlice + AsRef<[u8]> + AsMut<[u8]>; fn encode(self) -> Self::Encoded; fn decode(encoded: Self::Encoded) -> Self; } impl FixedLengthEncoding for bool { type Encoded = [u8; 1]; fn encode(self) -> [u8; 1] { [self as u8] } fn decode(encoded: Self::Encoded) -> Self { encoded[0]!= 0 } } macro_rules! encode_signed { ($n:expr, $t:ty) => { impl FixedLengthEncoding for $t { type Encoded = [u8; $n]; fn encode(self) -> [u8; $n] { let mut b = self.to_be_bytes(); // Toggle top "sign" bit to ensure consistent sort order b[0] ^= 0x80; b } fn decode(mut encoded: Self::Encoded) -> Self { // Toggle top "sign" bit encoded[0] ^= 0x80; Self::from_be_bytes(encoded) } } }; } encode_signed!(1, i8); encode_signed!(2, i16); encode_signed!(4, i32); encode_signed!(8, i64); encode_signed!(16, i128); encode_signed!(32, i256); macro_rules! encode_unsigned { ($n:expr, $t:ty) => { impl FixedLengthEncoding for $t { type Encoded = [u8; $n]; fn encode(self) -> [u8; $n] { self.to_be_bytes() } fn decode(encoded: Self::Encoded) -> Self { Self::from_be_bytes(encoded) } } }; } encode_unsigned!(1, u8); encode_unsigned!(2, u16); encode_unsigned!(4, u32); encode_unsigned!(8, u64); impl FixedLengthEncoding for f16 { type Encoded = [u8; 2]; fn encode(self) -> [u8; 2] { // https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260 let s = self.to_bits() as i16; let val = s ^ (((s >> 15) as u16) >> 1) as i16; val.encode() } fn decode(encoded: Self::Encoded) -> Self { let bits = i16::decode(encoded); let val = bits ^ (((bits >> 15) as u16) >> 1) as i16; Self::from_bits(val as u16) } } impl FixedLengthEncoding for f32 { type Encoded = [u8; 4]; fn encode(self) -> [u8; 4] { // https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260 let s = self.to_bits() as i32; let val = s ^ (((s >> 31) as u32) >> 1) as i32; val.encode() } fn decode(encoded: Self::Encoded) -> Self { let bits = i32::decode(encoded); let val = bits ^ (((bits >> 31) as u32) >> 1) as i32; Self::from_bits(val as u32) } } impl FixedLengthEncoding for f64 { type Encoded = [u8; 8]; fn encode(self) -> [u8; 8] { // https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260 let s = self.to_bits() as i64; let val = s ^ (((s >> 63) as u64) >> 1) as i64; val.encode() } fn decode(encoded: Self::Encoded) -> Self { let bits = i64::decode(encoded); let val = bits ^ (((bits >> 63) as u64) >> 1) as i64; Self::from_bits(val as u64) } } /// Returns the total encoded length (including null byte) for a value of type `T::Native` pub const fn encoded_len<T>(_col: &PrimitiveArray<T>) -> usize where T: ArrowPrimitiveType, T::Native: FixedLengthEncoding, { T::Native::ENCODED_LEN } /// Fixed width types are encoded as /// /// - 1 byte `0` if null or `1` if valid /// - bytes of [`FixedLengthEncoding`] pub fn encode<T: FixedLengthEncoding, I: IntoIterator<Item = Option<T>>>( data: &mut [u8], offsets: &mut [usize], i: I, opts: SortOptions, ) { for (offset, maybe_val) in offsets.iter_mut().skip(1).zip(i) { let end_offset = *offset + T::ENCODED_LEN; if let Some(val) = maybe_val { let to_write = &mut data[*offset..end_offset]; to_write[0] = 1; let mut encoded = val.encode(); if opts.descending
to_write[1..].copy_from_slice(encoded.as_ref()) } else { data[*offset] = null_sentinel(opts); } *offset = end_offset; } } pub fn encode_fixed_size_binary( data: &mut [u8], offsets: &mut [usize], array: &FixedSizeBinaryArray, opts: SortOptions, ) { let len = array.value_length() as usize; for (offset, maybe_val) in offsets.iter_mut().skip(1).zip(array.iter()) { let end_offset = *offset + len + 1; if let Some(val) = maybe_val { let to_write = &mut data[*offset..end_offset]; to_write[0] = 1; to_write[1..].copy_from_slice(&val[..len]); if opts.descending { // Flip bits to reverse order to_write[1..1 + len].iter_mut().for_each(|v| *v =!*v) } } else { data[*offset] = null_sentinel(opts); } *offset = end_offset; } } /// Splits `len` bytes from `src` #[inline] fn split_off<'a>(src: &mut &'a [u8], len: usize) -> &'a [u8] { let v = &src[..len]; *src = &src[len..]; v } /// Decodes a `BooleanArray` from rows pub fn decode_bool(rows: &mut [&[u8]], options: SortOptions) -> BooleanArray { let true_val = match options.descending { true =>!1, false => 1, }; let len = rows.len(); let mut null_count = 0; let mut nulls = MutableBuffer::new(bit_util::ceil(len, 64) * 8); let mut values = MutableBuffer::new(bit_util::ceil(len, 64) * 8); let chunks = len / 64; let remainder = len % 64; for chunk in 0..chunks { let mut null_packed = 0; let mut values_packed = 0; for bit_idx in 0..64 { let i = split_off(&mut rows[bit_idx + chunk * 64], 2); let (null, value) = (i[0] == 1, i[1] == true_val); null_count +=!null as usize; null_packed |= (null as u64) << bit_idx; values_packed |= (value as u64) << bit_idx; } nulls.push(null_packed); values.push(values_packed); } if remainder!= 0 { let mut null_packed = 0; let mut values_packed = 0; for bit_idx in 0..remainder { let i = split_off(&mut rows[bit_idx + chunks * 64], 2); let (null, value) = (i[0] == 1, i[1] == true_val); null_count +=!null as usize; null_packed |= (null as u64) << bit_idx; values_packed |= (value as u64) << bit_idx; } nulls.push(null_packed); values.push(values_packed); } let builder = ArrayDataBuilder::new(DataType::Boolean) .len(rows.len()) .null_count(null_count) .add_buffer(values.into()) .null_bit_buffer(Some(nulls.into())); // SAFETY: // Buffers are the correct length unsafe { BooleanArray::from(builder.build_unchecked()) } } /// Decodes a single byte from each row, interpreting `0x01` as a valid value /// and all other values as a null /// /// Returns the null count and null buffer pub fn decode_nulls(rows: &[&[u8]]) -> (usize, Buffer) { let mut null_count = 0; let buffer = MutableBuffer::collect_bool(rows.len(), |idx| { let valid = rows[idx][0] == 1; null_count +=!valid as usize; valid }) .into(); (null_count, buffer) } /// Decodes a `ArrayData` from rows based on the provided `FixedLengthEncoding` `T` /// /// # Safety /// /// `data_type` must be appropriate native type for `T` unsafe fn decode_fixed<T: FixedLengthEncoding + ArrowNativeType>( rows: &mut [&[u8]], data_type: DataType, options: SortOptions, ) -> ArrayData { let len = rows.len(); let mut values = BufferBuilder::<T>::new(len); let (null_count, nulls) = decode_nulls(rows); for row in rows { let i = split_off(row, T::ENCODED_LEN); let value = T::Encoded::from_slice(&i[1..], options.descending); values.append(T::decode(value)); } let builder = ArrayDataBuilder::new(data_type) .len(len) .null_count(null_count) .add_buffer(values.finish()) .null_bit_buffer(Some(nulls)); // SAFETY: Buffers correct length builder.build_unchecked() } /// Decodes a `PrimitiveArray` from rows pub fn decode_primitive<T: ArrowPrimitiveType>( rows: &mut [&[u8]], data_type: DataType, options: SortOptions, ) -> PrimitiveArray<T> where T::Native: FixedLengthEncoding, { assert!(PrimitiveArray::<T>::is_compatible(&data_type)); // SAFETY: // Validated data type above unsafe { decode_fixed::<T::Native>(rows, data_type, options).into() } } /// Decodes a `FixedLengthBinary` from rows pub fn decode_fixed_size_binary( rows: &mut [&[u8]], size: i32, options: SortOptions, ) -> FixedSizeBinaryArray { let len = rows.len(); let mut values = MutableBuffer::new(size as usize * rows.len()); let (null_count, nulls) = decode_nulls(rows); let encoded_len = size as usize + 1; for row in rows { let i = split_off(row, encoded_len); values.extend_from_slice(&i[1..]); } if options.descending { for v in values.as_slice_mut() { *v =!*v; } } let builder = ArrayDataBuilder::new(DataType::FixedSizeBinary(size)) .len(len) .null_count(null_count) .add_buffer(values.into()) .null_bit_buffer(Some(nulls)); // SAFETY: Buffers correct length unsafe { builder.build_unchecked().into() } }
{ // Flip bits to reverse order encoded.as_mut().iter_mut().for_each(|v| *v = !*v) }
conditional_block
fixed.rs
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. use crate::array::PrimitiveArray; use crate::null_sentinel; use arrow_array::builder::BufferBuilder; use arrow_array::{ArrowPrimitiveType, BooleanArray, FixedSizeBinaryArray}; use arrow_buffer::{bit_util, i256, ArrowNativeType, Buffer, MutableBuffer}; use arrow_data::{ArrayData, ArrayDataBuilder}; use arrow_schema::{DataType, SortOptions}; use half::f16; pub trait FromSlice { fn from_slice(slice: &[u8], invert: bool) -> Self; } impl<const N: usize> FromSlice for [u8; N] { #[inline] fn from_slice(slice: &[u8], invert: bool) -> Self { let mut t: Self = slice.try_into().unwrap(); if invert { t.iter_mut().for_each(|o| *o =!*o); } t } } /// Encodes a value of a particular fixed width type into bytes according to the rules /// described on [`super::RowConverter`] pub trait FixedLengthEncoding: Copy { const ENCODED_LEN: usize = 1 + std::mem::size_of::<Self::Encoded>(); type Encoded: Sized + Copy + FromSlice + AsRef<[u8]> + AsMut<[u8]>; fn encode(self) -> Self::Encoded; fn decode(encoded: Self::Encoded) -> Self; } impl FixedLengthEncoding for bool { type Encoded = [u8; 1]; fn encode(self) -> [u8; 1]
fn decode(encoded: Self::Encoded) -> Self { encoded[0]!= 0 } } macro_rules! encode_signed { ($n:expr, $t:ty) => { impl FixedLengthEncoding for $t { type Encoded = [u8; $n]; fn encode(self) -> [u8; $n] { let mut b = self.to_be_bytes(); // Toggle top "sign" bit to ensure consistent sort order b[0] ^= 0x80; b } fn decode(mut encoded: Self::Encoded) -> Self { // Toggle top "sign" bit encoded[0] ^= 0x80; Self::from_be_bytes(encoded) } } }; } encode_signed!(1, i8); encode_signed!(2, i16); encode_signed!(4, i32); encode_signed!(8, i64); encode_signed!(16, i128); encode_signed!(32, i256); macro_rules! encode_unsigned { ($n:expr, $t:ty) => { impl FixedLengthEncoding for $t { type Encoded = [u8; $n]; fn encode(self) -> [u8; $n] { self.to_be_bytes() } fn decode(encoded: Self::Encoded) -> Self { Self::from_be_bytes(encoded) } } }; } encode_unsigned!(1, u8); encode_unsigned!(2, u16); encode_unsigned!(4, u32); encode_unsigned!(8, u64); impl FixedLengthEncoding for f16 { type Encoded = [u8; 2]; fn encode(self) -> [u8; 2] { // https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260 let s = self.to_bits() as i16; let val = s ^ (((s >> 15) as u16) >> 1) as i16; val.encode() } fn decode(encoded: Self::Encoded) -> Self { let bits = i16::decode(encoded); let val = bits ^ (((bits >> 15) as u16) >> 1) as i16; Self::from_bits(val as u16) } } impl FixedLengthEncoding for f32 { type Encoded = [u8; 4]; fn encode(self) -> [u8; 4] { // https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260 let s = self.to_bits() as i32; let val = s ^ (((s >> 31) as u32) >> 1) as i32; val.encode() } fn decode(encoded: Self::Encoded) -> Self { let bits = i32::decode(encoded); let val = bits ^ (((bits >> 31) as u32) >> 1) as i32; Self::from_bits(val as u32) } } impl FixedLengthEncoding for f64 { type Encoded = [u8; 8]; fn encode(self) -> [u8; 8] { // https://github.com/rust-lang/rust/blob/9c20b2a8cc7588decb6de25ac6a7912dcef24d65/library/core/src/num/f32.rs#L1176-L1260 let s = self.to_bits() as i64; let val = s ^ (((s >> 63) as u64) >> 1) as i64; val.encode() } fn decode(encoded: Self::Encoded) -> Self { let bits = i64::decode(encoded); let val = bits ^ (((bits >> 63) as u64) >> 1) as i64; Self::from_bits(val as u64) } } /// Returns the total encoded length (including null byte) for a value of type `T::Native` pub const fn encoded_len<T>(_col: &PrimitiveArray<T>) -> usize where T: ArrowPrimitiveType, T::Native: FixedLengthEncoding, { T::Native::ENCODED_LEN } /// Fixed width types are encoded as /// /// - 1 byte `0` if null or `1` if valid /// - bytes of [`FixedLengthEncoding`] pub fn encode<T: FixedLengthEncoding, I: IntoIterator<Item = Option<T>>>( data: &mut [u8], offsets: &mut [usize], i: I, opts: SortOptions, ) { for (offset, maybe_val) in offsets.iter_mut().skip(1).zip(i) { let end_offset = *offset + T::ENCODED_LEN; if let Some(val) = maybe_val { let to_write = &mut data[*offset..end_offset]; to_write[0] = 1; let mut encoded = val.encode(); if opts.descending { // Flip bits to reverse order encoded.as_mut().iter_mut().for_each(|v| *v =!*v) } to_write[1..].copy_from_slice(encoded.as_ref()) } else { data[*offset] = null_sentinel(opts); } *offset = end_offset; } } pub fn encode_fixed_size_binary( data: &mut [u8], offsets: &mut [usize], array: &FixedSizeBinaryArray, opts: SortOptions, ) { let len = array.value_length() as usize; for (offset, maybe_val) in offsets.iter_mut().skip(1).zip(array.iter()) { let end_offset = *offset + len + 1; if let Some(val) = maybe_val { let to_write = &mut data[*offset..end_offset]; to_write[0] = 1; to_write[1..].copy_from_slice(&val[..len]); if opts.descending { // Flip bits to reverse order to_write[1..1 + len].iter_mut().for_each(|v| *v =!*v) } } else { data[*offset] = null_sentinel(opts); } *offset = end_offset; } } /// Splits `len` bytes from `src` #[inline] fn split_off<'a>(src: &mut &'a [u8], len: usize) -> &'a [u8] { let v = &src[..len]; *src = &src[len..]; v } /// Decodes a `BooleanArray` from rows pub fn decode_bool(rows: &mut [&[u8]], options: SortOptions) -> BooleanArray { let true_val = match options.descending { true =>!1, false => 1, }; let len = rows.len(); let mut null_count = 0; let mut nulls = MutableBuffer::new(bit_util::ceil(len, 64) * 8); let mut values = MutableBuffer::new(bit_util::ceil(len, 64) * 8); let chunks = len / 64; let remainder = len % 64; for chunk in 0..chunks { let mut null_packed = 0; let mut values_packed = 0; for bit_idx in 0..64 { let i = split_off(&mut rows[bit_idx + chunk * 64], 2); let (null, value) = (i[0] == 1, i[1] == true_val); null_count +=!null as usize; null_packed |= (null as u64) << bit_idx; values_packed |= (value as u64) << bit_idx; } nulls.push(null_packed); values.push(values_packed); } if remainder!= 0 { let mut null_packed = 0; let mut values_packed = 0; for bit_idx in 0..remainder { let i = split_off(&mut rows[bit_idx + chunks * 64], 2); let (null, value) = (i[0] == 1, i[1] == true_val); null_count +=!null as usize; null_packed |= (null as u64) << bit_idx; values_packed |= (value as u64) << bit_idx; } nulls.push(null_packed); values.push(values_packed); } let builder = ArrayDataBuilder::new(DataType::Boolean) .len(rows.len()) .null_count(null_count) .add_buffer(values.into()) .null_bit_buffer(Some(nulls.into())); // SAFETY: // Buffers are the correct length unsafe { BooleanArray::from(builder.build_unchecked()) } } /// Decodes a single byte from each row, interpreting `0x01` as a valid value /// and all other values as a null /// /// Returns the null count and null buffer pub fn decode_nulls(rows: &[&[u8]]) -> (usize, Buffer) { let mut null_count = 0; let buffer = MutableBuffer::collect_bool(rows.len(), |idx| { let valid = rows[idx][0] == 1; null_count +=!valid as usize; valid }) .into(); (null_count, buffer) } /// Decodes a `ArrayData` from rows based on the provided `FixedLengthEncoding` `T` /// /// # Safety /// /// `data_type` must be appropriate native type for `T` unsafe fn decode_fixed<T: FixedLengthEncoding + ArrowNativeType>( rows: &mut [&[u8]], data_type: DataType, options: SortOptions, ) -> ArrayData { let len = rows.len(); let mut values = BufferBuilder::<T>::new(len); let (null_count, nulls) = decode_nulls(rows); for row in rows { let i = split_off(row, T::ENCODED_LEN); let value = T::Encoded::from_slice(&i[1..], options.descending); values.append(T::decode(value)); } let builder = ArrayDataBuilder::new(data_type) .len(len) .null_count(null_count) .add_buffer(values.finish()) .null_bit_buffer(Some(nulls)); // SAFETY: Buffers correct length builder.build_unchecked() } /// Decodes a `PrimitiveArray` from rows pub fn decode_primitive<T: ArrowPrimitiveType>( rows: &mut [&[u8]], data_type: DataType, options: SortOptions, ) -> PrimitiveArray<T> where T::Native: FixedLengthEncoding, { assert!(PrimitiveArray::<T>::is_compatible(&data_type)); // SAFETY: // Validated data type above unsafe { decode_fixed::<T::Native>(rows, data_type, options).into() } } /// Decodes a `FixedLengthBinary` from rows pub fn decode_fixed_size_binary( rows: &mut [&[u8]], size: i32, options: SortOptions, ) -> FixedSizeBinaryArray { let len = rows.len(); let mut values = MutableBuffer::new(size as usize * rows.len()); let (null_count, nulls) = decode_nulls(rows); let encoded_len = size as usize + 1; for row in rows { let i = split_off(row, encoded_len); values.extend_from_slice(&i[1..]); } if options.descending { for v in values.as_slice_mut() { *v =!*v; } } let builder = ArrayDataBuilder::new(DataType::FixedSizeBinary(size)) .len(len) .null_count(null_count) .add_buffer(values.into()) .null_bit_buffer(Some(nulls)); // SAFETY: Buffers correct length unsafe { builder.build_unchecked().into() } }
{ [self as u8] }
identifier_body
mod.rs
// Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 // Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ account_address::AccountAddress, account_config::stc_type_tag, block_metadata::BlockMetadata, contract_event::ContractEvent, vm_error::{StatusCode, StatusType, VMStatus}, write_set::WriteSet, }; use anyhow::{format_err, Error, Result}; use serde::{de, ser, Deserialize, Serialize}; use starcoin_crypto::keygen::KeyGen; use starcoin_crypto::{ ed25519::*, hash::{CryptoHash, CryptoHasher, PlainCryptoHash}, traits::*, HashValue, }; use std::ops::Deref; use std::{convert::TryFrom, fmt, time::Duration}; pub mod authenticator { pub use libra_types::transaction::authenticator::{ AuthenticationKey, AuthenticationKeyPreimage, Scheme, TransactionAuthenticator, }; } mod error; pub mod helpers; mod pending_transaction; mod transaction_argument; mod upgrade; use crate::transaction::authenticator::TransactionAuthenticator; pub use error::CallError; pub use error::Error as TransactionError; pub use libra_types::transaction::{ChangeSet, Module, Script}; pub use pending_transaction::{Condition, PendingTransaction}; use starcoin_crypto::multi_ed25519::{MultiEd25519PublicKey, MultiEd25519Signature}; pub use transaction_argument::{ parse_transaction_argument, parse_transaction_arguments, TransactionArgument, }; pub use upgrade::{InitScript, UpgradePackage}; pub type Version = u64; // Height - also used for MVCC in StateDB pub const MAX_TRANSACTION_SIZE_IN_BYTES: usize = 4096; /// RawUserTransaction is the portion of a transaction that a client signs #[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash)] pub struct RawUserTransaction { /// Sender's address. sender: AccountAddress, // Sequence number of this transaction corresponding to sender's account. sequence_number: u64, // The transaction script to execute. payload: TransactionPayload, // Maximal total gas specified by wallet to spend for this transaction. max_gas_amount: u64, // Maximal price can be paid per gas. gas_unit_price: u64, // Expiration time for this transaction. If storage is queried and // the time returned is greater than or equal to this time and this // transaction has not been included, you can be certain that it will // never be included. // A transaction that doesn't expire is represented by a very large value like // u64::max_value(). #[serde(serialize_with = "serialize_duration")] #[serde(deserialize_with = "deserialize_duration")] expiration_time: Duration, } // TODO(#1307) fn serialize_duration<S>(d: &Duration, serializer: S) -> std::result::Result<S::Ok, S::Error> where S: ser::Serializer, { serializer.serialize_u64(d.as_secs()) } fn deserialize_duration<'de, D>(deserializer: D) -> std::result::Result<Duration, D::Error> where D: de::Deserializer<'de>, { struct DurationVisitor; impl<'de> de::Visitor<'de> for DurationVisitor { type Value = Duration; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("Duration as u64") } fn visit_u64<E>(self, v: u64) -> std::result::Result<Self::Value, E> where E: de::Error, { Ok(Duration::from_secs(v)) } } deserializer.deserialize_u64(DurationVisitor) } impl RawUserTransaction { /// Create a new `RawUserTransaction` with a payload. /// /// It can be either to publish a module, to execute a script, or to issue a writeset /// transaction. pub fn new( sender: AccountAddress, sequence_number: u64, payload: TransactionPayload, max_gas_amount: u64, gas_unit_price: u64, expiration_time: Duration, ) -> Self { RawUserTransaction { sender, sequence_number, payload, max_gas_amount, gas_unit_price, expiration_time, } } /// Create a new `RawUserTransaction` with a script. /// /// A script transaction contains only code to execute. No publishing is allowed in scripts. pub fn
( sender: AccountAddress, sequence_number: u64, script: Script, max_gas_amount: u64, gas_unit_price: u64, expiration_time: Duration, ) -> Self { RawUserTransaction { sender, sequence_number, payload: TransactionPayload::Script(script), max_gas_amount, gas_unit_price, expiration_time, } } /// Create a new `RawUserTransaction` with a module to publish. /// /// A module transaction is the only way to publish code. Only one module per transaction /// can be published. pub fn new_module( sender: AccountAddress, sequence_number: u64, module: Module, max_gas_amount: u64, gas_unit_price: u64, expiration_time: Duration, ) -> Self { RawUserTransaction { sender, sequence_number, payload: TransactionPayload::Module(module), max_gas_amount, gas_unit_price, expiration_time, } } /// Signs the given `RawUserTransaction`. Note that this consumes the `RawUserTransaction` and turns it /// into a `SignatureCheckedTransaction`. /// /// For a transaction that has just been signed, its signature is expected to be valid. pub fn sign( self, private_key: &Ed25519PrivateKey, public_key: Ed25519PublicKey, ) -> Result<SignatureCheckedTransaction> { let signature = private_key.sign_message(&self.crypto_hash()); Ok(SignatureCheckedTransaction(SignedUserTransaction::new( self, public_key, signature, ))) } pub fn into_payload(self) -> TransactionPayload { self.payload } /// Return the sender of this transaction. pub fn sender(&self) -> AccountAddress { self.sender } pub fn mock() -> Self { Self::mock_by_sender(AccountAddress::random()) } pub fn mock_by_sender(sender: AccountAddress) -> Self { Self::new( sender, 0, TransactionPayload::Script(Script::new(vec![], vec![], vec![])), 0, 0, Duration::new(0, 0), ) } pub fn mock_from(compiled_script: Vec<u8>) -> Self { Self::new( AccountAddress::default(), 0, TransactionPayload::Script(Script::new(compiled_script, vec![stc_type_tag()], vec![])), 600, 0, Duration::new(0, 0), ) } } #[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize)] pub enum TransactionPayload { /// A transaction that executes code. Script(Script), /// A transaction that publishes code. Module(Module), /// A transaction that publish or update module code by a package. Package(UpgradePackage), } /// A transaction that has been signed. /// /// A `SignedUserTransaction` is a single transaction that can be atomically executed. Clients submit /// these to validator nodes, and the validator and executor submits these to the VM. /// /// **IMPORTANT:** The signature of a `SignedUserTransaction` is not guaranteed to be verified. For a /// transaction whose signature is statically guaranteed to be verified, see /// [`SignatureCheckedTransaction`]. #[derive(Clone, Eq, PartialEq, Hash, Serialize, Deserialize, CryptoHasher, CryptoHash)] pub struct SignedUserTransaction { /// The raw transaction raw_txn: RawUserTransaction, /// Public key and signature to authenticate authenticator: TransactionAuthenticator, } /// A transaction for which the signature has been verified. Created by /// [`SignedUserTransaction::check_signature`] and [`RawUserTransaction::sign`]. #[derive(Clone, Debug, Eq, PartialEq, Hash)] pub struct SignatureCheckedTransaction(SignedUserTransaction); impl SignatureCheckedTransaction { /// Returns the `SignedUserTransaction` within. pub fn into_inner(self) -> SignedUserTransaction { self.0 } /// Returns the `RawUserTransaction` within. pub fn into_raw_transaction(self) -> RawUserTransaction { self.0.into_raw_transaction() } } impl Deref for SignatureCheckedTransaction { type Target = SignedUserTransaction; fn deref(&self) -> &Self::Target { &self.0 } } impl fmt::Debug for SignedUserTransaction { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "SignedTransaction {{ \n \ {{ raw_txn: {:#?}, \n \ authenticator: {:#?}, \n \ }} \n \ }}", self.raw_txn, self.authenticator ) } } impl SignedUserTransaction { pub fn new( raw_txn: RawUserTransaction, public_key: Ed25519PublicKey, signature: Ed25519Signature, ) -> SignedUserTransaction { let authenticator = TransactionAuthenticator::ed25519(public_key, signature); SignedUserTransaction { raw_txn, authenticator, } } pub fn multi_ed25519( raw_txn: RawUserTransaction, public_key: MultiEd25519PublicKey, signature: MultiEd25519Signature, ) -> SignedUserTransaction { let authenticator = TransactionAuthenticator::multi_ed25519(public_key, signature); SignedUserTransaction { raw_txn, authenticator, } } pub fn authenticator(&self) -> TransactionAuthenticator { self.authenticator.clone() } pub fn raw_txn(&self) -> &RawUserTransaction { &self.raw_txn } pub fn sender(&self) -> AccountAddress { self.raw_txn.sender } pub fn into_raw_transaction(self) -> RawUserTransaction { self.raw_txn } pub fn sequence_number(&self) -> u64 { self.raw_txn.sequence_number } pub fn payload(&self) -> &TransactionPayload { &self.raw_txn.payload } pub fn max_gas_amount(&self) -> u64 { self.raw_txn.max_gas_amount } pub fn gas_unit_price(&self) -> u64 { self.raw_txn.gas_unit_price } pub fn expiration_time(&self) -> Duration { self.raw_txn.expiration_time } pub fn raw_txn_bytes_len(&self) -> usize { scs::to_bytes(&self.raw_txn) .expect("Unable to serialize RawUserTransaction") .len() } /// Checks that the signature of given transaction. Returns `Ok(SignatureCheckedTransaction)` if /// the signature is valid. pub fn check_signature(self) -> Result<SignatureCheckedTransaction> { self.authenticator .verify_signature(&self.raw_txn.crypto_hash())?; Ok(SignatureCheckedTransaction(self)) } //TODO pub fn mock() -> Self { let mut gen = KeyGen::from_os_rng(); let (private_key, public_key) = gen.generate_keypair(); let raw_txn = RawUserTransaction::mock(); raw_txn.sign(&private_key, public_key).unwrap().into_inner() } pub fn mock_from(compiled_script: Vec<u8>) -> Self { let mut gen = KeyGen::from_os_rng(); let (private_key, public_key) = gen.generate_keypair(); let raw_txn = RawUserTransaction::mock_from(compiled_script); raw_txn.sign(&private_key, public_key).unwrap().into_inner() } } /// The status of executing a transaction. The VM decides whether or not we should `Keep` the /// transaction output or `Discard` it based upon the execution of the transaction. We wrap these /// decisions around a `VMStatus` that provides more detail on the final execution state of the VM. #[derive(Clone, Debug, Eq, PartialEq)] pub enum TransactionStatus { /// Discard the transaction output Discard(VMStatus), /// Keep the transaction output Keep(VMStatus), } impl TransactionStatus { pub fn vm_status(&self) -> &VMStatus { match self { TransactionStatus::Discard(vm_status) | TransactionStatus::Keep(vm_status) => vm_status, } } } impl From<VMStatus> for TransactionStatus { fn from(vm_status: VMStatus) -> Self { let should_discard = match vm_status.status_type() { // Any unknown error should be discarded StatusType::Unknown => true, // Any error that is a validation status (i.e. an error arising from the prologue) // causes the transaction to not be included. StatusType::Validation => true, // If the VM encountered an invalid internal state, we should discard the transaction. StatusType::InvariantViolation => true, // A transaction that publishes code that cannot be verified will be charged. StatusType::Verification => false, // Even if we are unable to decode the transaction, there should be a charge made to // that user's account for the gas fees related to decoding, running the prologue etc. StatusType::Deserialization => false, // Any error encountered during the execution of the transaction will charge gas. StatusType::Execution => false, }; if should_discard { TransactionStatus::Discard(vm_status) } else { TransactionStatus::Keep(vm_status) } } } /// The output of executing a transaction. #[derive(Clone, Debug, Eq, PartialEq)] pub struct TransactionOutput { write_set: WriteSet, /// The list of events emitted during this transaction. events: Vec<ContractEvent>, /// The amount of gas used during execution. gas_used: u64, /// The resource increment size delta_size: i64, /// The execution status. status: TransactionStatus, } impl TransactionOutput { pub fn new( write_set: WriteSet, events: Vec<ContractEvent>, gas_used: u64, delta_size: i64, status: TransactionStatus, ) -> Self { TransactionOutput { write_set, events, gas_used, delta_size, status, } } pub fn write_set(&self) -> &WriteSet { &self.write_set } pub fn events(&self) -> &[ContractEvent] { &self.events } pub fn gas_used(&self) -> u64 { self.gas_used } pub fn status(&self) -> &TransactionStatus { &self.status } pub fn delta_size(&self) -> i64 { self.delta_size } pub fn into_inner(self) -> (WriteSet, Vec<ContractEvent>, u64, TransactionStatus) { (self.write_set, self.events, self.gas_used, self.status) } } /// `TransactionInfo` is the object we store in the transaction accumulator. It consists of the /// transaction as well as the execution result of this transaction. #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash)] pub struct TransactionInfo { /// The hash of this transaction. transaction_hash: HashValue, /// The root hash of Sparse Merkle Tree describing the world state at the end of this /// transaction. state_root_hash: HashValue, /// The root hash of Merkle Accumulator storing all events emitted during this transaction. event_root_hash: HashValue, events: Vec<ContractEvent>, /// The amount of gas used. gas_used: u64, /// The major status. This will provide the general error class. Note that this is not /// particularly high fidelity in the presence of sub statuses but, the major status does /// determine whether or not the transaction is applied to the global state or not. major_status: StatusCode, } impl TransactionInfo { /// Constructs a new `TransactionInfo` object using transaction hash, state root hash and event /// root hash. pub fn new( transaction_hash: HashValue, state_root_hash: HashValue, event_root_hash: HashValue, events: Vec<ContractEvent>, gas_used: u64, major_status: StatusCode, ) -> TransactionInfo { TransactionInfo { transaction_hash, state_root_hash, event_root_hash, events, gas_used, major_status, } } pub fn id(&self) -> HashValue { self.crypto_hash() } /// Returns the hash of this transaction. pub fn transaction_hash(&self) -> HashValue { self.transaction_hash } /// Returns root hash of Sparse Merkle Tree describing the world state at the end of this /// transaction. pub fn state_root_hash(&self) -> HashValue { self.state_root_hash } /// Returns the root hash of Merkle Accumulator storing all events emitted during this /// transaction. pub fn event_root_hash(&self) -> HashValue { self.event_root_hash } pub fn events(&self) -> &[ContractEvent] { &self.events } /// Returns the amount of gas used by this transaction. pub fn gas_used(&self) -> u64 { self.gas_used } pub fn major_status(&self) -> StatusCode { self.major_status } } /// `Transaction` will be the transaction type used internally in the libra node to represent the /// transaction to be processed and persisted. /// /// We suppress the clippy warning here as we would expect most of the transaction to be user /// transaction. #[allow(clippy::large_enum_variant)] #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash)] pub enum Transaction { /// Transaction submitted by the user. e.g: P2P payment transaction, publishing module /// transaction, etc. UserTransaction(SignedUserTransaction), /// Transaction that applies a ChangeSet to the current ChainState. This should be used for ONLY for /// genesis right now. ChangeSet(ChangeSet), /// Transaction to update the block metadata resource at the beginning of a block. BlockMetadata(BlockMetadata), } impl Transaction { pub fn as_signed_user_txn(&self) -> Result<&SignedUserTransaction> { match self { Transaction::UserTransaction(txn) => Ok(txn), _ => Err(format_err!("Not a user transaction.")), } } pub fn id(&self) -> HashValue { //TODO rethink txn id's represent. self.crypto_hash() } } impl TryFrom<Transaction> for SignedUserTransaction { type Error = Error; fn try_from(txn: Transaction) -> Result<Self> { match txn { Transaction::UserTransaction(txn) => Ok(txn), _ => Err(format_err!("Not a user transaction.")), } } } /// Pool transactions status #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum TxStatus { /// Added transaction Added, /// Rejected transaction Rejected, /// Dropped transaction Dropped, /// Invalid transaction Invalid, /// Canceled transaction Canceled, /// Culled transaction Culled, } impl std::fmt::Display for TxStatus { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let s = match self { TxStatus::Added => "added", TxStatus::Rejected => "rejected", TxStatus::Dropped => "dropped", TxStatus::Invalid => "invalid", TxStatus::Canceled => "canceled", TxStatus::Culled => "culled", }; write!(f, "{}", s) } }
new_script
identifier_name
mod.rs
// Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 // Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ account_address::AccountAddress, account_config::stc_type_tag, block_metadata::BlockMetadata, contract_event::ContractEvent, vm_error::{StatusCode, StatusType, VMStatus}, write_set::WriteSet, }; use anyhow::{format_err, Error, Result}; use serde::{de, ser, Deserialize, Serialize}; use starcoin_crypto::keygen::KeyGen; use starcoin_crypto::{ ed25519::*, hash::{CryptoHash, CryptoHasher, PlainCryptoHash}, traits::*, HashValue, }; use std::ops::Deref; use std::{convert::TryFrom, fmt, time::Duration}; pub mod authenticator { pub use libra_types::transaction::authenticator::{ AuthenticationKey, AuthenticationKeyPreimage, Scheme, TransactionAuthenticator, }; } mod error; pub mod helpers; mod pending_transaction; mod transaction_argument; mod upgrade; use crate::transaction::authenticator::TransactionAuthenticator; pub use error::CallError; pub use error::Error as TransactionError; pub use libra_types::transaction::{ChangeSet, Module, Script}; pub use pending_transaction::{Condition, PendingTransaction}; use starcoin_crypto::multi_ed25519::{MultiEd25519PublicKey, MultiEd25519Signature}; pub use transaction_argument::{ parse_transaction_argument, parse_transaction_arguments, TransactionArgument, }; pub use upgrade::{InitScript, UpgradePackage}; pub type Version = u64; // Height - also used for MVCC in StateDB pub const MAX_TRANSACTION_SIZE_IN_BYTES: usize = 4096; /// RawUserTransaction is the portion of a transaction that a client signs #[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash)] pub struct RawUserTransaction { /// Sender's address. sender: AccountAddress, // Sequence number of this transaction corresponding to sender's account. sequence_number: u64, // The transaction script to execute. payload: TransactionPayload, // Maximal total gas specified by wallet to spend for this transaction. max_gas_amount: u64, // Maximal price can be paid per gas. gas_unit_price: u64, // Expiration time for this transaction. If storage is queried and // the time returned is greater than or equal to this time and this // transaction has not been included, you can be certain that it will // never be included.
expiration_time: Duration, } // TODO(#1307) fn serialize_duration<S>(d: &Duration, serializer: S) -> std::result::Result<S::Ok, S::Error> where S: ser::Serializer, { serializer.serialize_u64(d.as_secs()) } fn deserialize_duration<'de, D>(deserializer: D) -> std::result::Result<Duration, D::Error> where D: de::Deserializer<'de>, { struct DurationVisitor; impl<'de> de::Visitor<'de> for DurationVisitor { type Value = Duration; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("Duration as u64") } fn visit_u64<E>(self, v: u64) -> std::result::Result<Self::Value, E> where E: de::Error, { Ok(Duration::from_secs(v)) } } deserializer.deserialize_u64(DurationVisitor) } impl RawUserTransaction { /// Create a new `RawUserTransaction` with a payload. /// /// It can be either to publish a module, to execute a script, or to issue a writeset /// transaction. pub fn new( sender: AccountAddress, sequence_number: u64, payload: TransactionPayload, max_gas_amount: u64, gas_unit_price: u64, expiration_time: Duration, ) -> Self { RawUserTransaction { sender, sequence_number, payload, max_gas_amount, gas_unit_price, expiration_time, } } /// Create a new `RawUserTransaction` with a script. /// /// A script transaction contains only code to execute. No publishing is allowed in scripts. pub fn new_script( sender: AccountAddress, sequence_number: u64, script: Script, max_gas_amount: u64, gas_unit_price: u64, expiration_time: Duration, ) -> Self { RawUserTransaction { sender, sequence_number, payload: TransactionPayload::Script(script), max_gas_amount, gas_unit_price, expiration_time, } } /// Create a new `RawUserTransaction` with a module to publish. /// /// A module transaction is the only way to publish code. Only one module per transaction /// can be published. pub fn new_module( sender: AccountAddress, sequence_number: u64, module: Module, max_gas_amount: u64, gas_unit_price: u64, expiration_time: Duration, ) -> Self { RawUserTransaction { sender, sequence_number, payload: TransactionPayload::Module(module), max_gas_amount, gas_unit_price, expiration_time, } } /// Signs the given `RawUserTransaction`. Note that this consumes the `RawUserTransaction` and turns it /// into a `SignatureCheckedTransaction`. /// /// For a transaction that has just been signed, its signature is expected to be valid. pub fn sign( self, private_key: &Ed25519PrivateKey, public_key: Ed25519PublicKey, ) -> Result<SignatureCheckedTransaction> { let signature = private_key.sign_message(&self.crypto_hash()); Ok(SignatureCheckedTransaction(SignedUserTransaction::new( self, public_key, signature, ))) } pub fn into_payload(self) -> TransactionPayload { self.payload } /// Return the sender of this transaction. pub fn sender(&self) -> AccountAddress { self.sender } pub fn mock() -> Self { Self::mock_by_sender(AccountAddress::random()) } pub fn mock_by_sender(sender: AccountAddress) -> Self { Self::new( sender, 0, TransactionPayload::Script(Script::new(vec![], vec![], vec![])), 0, 0, Duration::new(0, 0), ) } pub fn mock_from(compiled_script: Vec<u8>) -> Self { Self::new( AccountAddress::default(), 0, TransactionPayload::Script(Script::new(compiled_script, vec![stc_type_tag()], vec![])), 600, 0, Duration::new(0, 0), ) } } #[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize)] pub enum TransactionPayload { /// A transaction that executes code. Script(Script), /// A transaction that publishes code. Module(Module), /// A transaction that publish or update module code by a package. Package(UpgradePackage), } /// A transaction that has been signed. /// /// A `SignedUserTransaction` is a single transaction that can be atomically executed. Clients submit /// these to validator nodes, and the validator and executor submits these to the VM. /// /// **IMPORTANT:** The signature of a `SignedUserTransaction` is not guaranteed to be verified. For a /// transaction whose signature is statically guaranteed to be verified, see /// [`SignatureCheckedTransaction`]. #[derive(Clone, Eq, PartialEq, Hash, Serialize, Deserialize, CryptoHasher, CryptoHash)] pub struct SignedUserTransaction { /// The raw transaction raw_txn: RawUserTransaction, /// Public key and signature to authenticate authenticator: TransactionAuthenticator, } /// A transaction for which the signature has been verified. Created by /// [`SignedUserTransaction::check_signature`] and [`RawUserTransaction::sign`]. #[derive(Clone, Debug, Eq, PartialEq, Hash)] pub struct SignatureCheckedTransaction(SignedUserTransaction); impl SignatureCheckedTransaction { /// Returns the `SignedUserTransaction` within. pub fn into_inner(self) -> SignedUserTransaction { self.0 } /// Returns the `RawUserTransaction` within. pub fn into_raw_transaction(self) -> RawUserTransaction { self.0.into_raw_transaction() } } impl Deref for SignatureCheckedTransaction { type Target = SignedUserTransaction; fn deref(&self) -> &Self::Target { &self.0 } } impl fmt::Debug for SignedUserTransaction { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "SignedTransaction {{ \n \ {{ raw_txn: {:#?}, \n \ authenticator: {:#?}, \n \ }} \n \ }}", self.raw_txn, self.authenticator ) } } impl SignedUserTransaction { pub fn new( raw_txn: RawUserTransaction, public_key: Ed25519PublicKey, signature: Ed25519Signature, ) -> SignedUserTransaction { let authenticator = TransactionAuthenticator::ed25519(public_key, signature); SignedUserTransaction { raw_txn, authenticator, } } pub fn multi_ed25519( raw_txn: RawUserTransaction, public_key: MultiEd25519PublicKey, signature: MultiEd25519Signature, ) -> SignedUserTransaction { let authenticator = TransactionAuthenticator::multi_ed25519(public_key, signature); SignedUserTransaction { raw_txn, authenticator, } } pub fn authenticator(&self) -> TransactionAuthenticator { self.authenticator.clone() } pub fn raw_txn(&self) -> &RawUserTransaction { &self.raw_txn } pub fn sender(&self) -> AccountAddress { self.raw_txn.sender } pub fn into_raw_transaction(self) -> RawUserTransaction { self.raw_txn } pub fn sequence_number(&self) -> u64 { self.raw_txn.sequence_number } pub fn payload(&self) -> &TransactionPayload { &self.raw_txn.payload } pub fn max_gas_amount(&self) -> u64 { self.raw_txn.max_gas_amount } pub fn gas_unit_price(&self) -> u64 { self.raw_txn.gas_unit_price } pub fn expiration_time(&self) -> Duration { self.raw_txn.expiration_time } pub fn raw_txn_bytes_len(&self) -> usize { scs::to_bytes(&self.raw_txn) .expect("Unable to serialize RawUserTransaction") .len() } /// Checks that the signature of given transaction. Returns `Ok(SignatureCheckedTransaction)` if /// the signature is valid. pub fn check_signature(self) -> Result<SignatureCheckedTransaction> { self.authenticator .verify_signature(&self.raw_txn.crypto_hash())?; Ok(SignatureCheckedTransaction(self)) } //TODO pub fn mock() -> Self { let mut gen = KeyGen::from_os_rng(); let (private_key, public_key) = gen.generate_keypair(); let raw_txn = RawUserTransaction::mock(); raw_txn.sign(&private_key, public_key).unwrap().into_inner() } pub fn mock_from(compiled_script: Vec<u8>) -> Self { let mut gen = KeyGen::from_os_rng(); let (private_key, public_key) = gen.generate_keypair(); let raw_txn = RawUserTransaction::mock_from(compiled_script); raw_txn.sign(&private_key, public_key).unwrap().into_inner() } } /// The status of executing a transaction. The VM decides whether or not we should `Keep` the /// transaction output or `Discard` it based upon the execution of the transaction. We wrap these /// decisions around a `VMStatus` that provides more detail on the final execution state of the VM. #[derive(Clone, Debug, Eq, PartialEq)] pub enum TransactionStatus { /// Discard the transaction output Discard(VMStatus), /// Keep the transaction output Keep(VMStatus), } impl TransactionStatus { pub fn vm_status(&self) -> &VMStatus { match self { TransactionStatus::Discard(vm_status) | TransactionStatus::Keep(vm_status) => vm_status, } } } impl From<VMStatus> for TransactionStatus { fn from(vm_status: VMStatus) -> Self { let should_discard = match vm_status.status_type() { // Any unknown error should be discarded StatusType::Unknown => true, // Any error that is a validation status (i.e. an error arising from the prologue) // causes the transaction to not be included. StatusType::Validation => true, // If the VM encountered an invalid internal state, we should discard the transaction. StatusType::InvariantViolation => true, // A transaction that publishes code that cannot be verified will be charged. StatusType::Verification => false, // Even if we are unable to decode the transaction, there should be a charge made to // that user's account for the gas fees related to decoding, running the prologue etc. StatusType::Deserialization => false, // Any error encountered during the execution of the transaction will charge gas. StatusType::Execution => false, }; if should_discard { TransactionStatus::Discard(vm_status) } else { TransactionStatus::Keep(vm_status) } } } /// The output of executing a transaction. #[derive(Clone, Debug, Eq, PartialEq)] pub struct TransactionOutput { write_set: WriteSet, /// The list of events emitted during this transaction. events: Vec<ContractEvent>, /// The amount of gas used during execution. gas_used: u64, /// The resource increment size delta_size: i64, /// The execution status. status: TransactionStatus, } impl TransactionOutput { pub fn new( write_set: WriteSet, events: Vec<ContractEvent>, gas_used: u64, delta_size: i64, status: TransactionStatus, ) -> Self { TransactionOutput { write_set, events, gas_used, delta_size, status, } } pub fn write_set(&self) -> &WriteSet { &self.write_set } pub fn events(&self) -> &[ContractEvent] { &self.events } pub fn gas_used(&self) -> u64 { self.gas_used } pub fn status(&self) -> &TransactionStatus { &self.status } pub fn delta_size(&self) -> i64 { self.delta_size } pub fn into_inner(self) -> (WriteSet, Vec<ContractEvent>, u64, TransactionStatus) { (self.write_set, self.events, self.gas_used, self.status) } } /// `TransactionInfo` is the object we store in the transaction accumulator. It consists of the /// transaction as well as the execution result of this transaction. #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash)] pub struct TransactionInfo { /// The hash of this transaction. transaction_hash: HashValue, /// The root hash of Sparse Merkle Tree describing the world state at the end of this /// transaction. state_root_hash: HashValue, /// The root hash of Merkle Accumulator storing all events emitted during this transaction. event_root_hash: HashValue, events: Vec<ContractEvent>, /// The amount of gas used. gas_used: u64, /// The major status. This will provide the general error class. Note that this is not /// particularly high fidelity in the presence of sub statuses but, the major status does /// determine whether or not the transaction is applied to the global state or not. major_status: StatusCode, } impl TransactionInfo { /// Constructs a new `TransactionInfo` object using transaction hash, state root hash and event /// root hash. pub fn new( transaction_hash: HashValue, state_root_hash: HashValue, event_root_hash: HashValue, events: Vec<ContractEvent>, gas_used: u64, major_status: StatusCode, ) -> TransactionInfo { TransactionInfo { transaction_hash, state_root_hash, event_root_hash, events, gas_used, major_status, } } pub fn id(&self) -> HashValue { self.crypto_hash() } /// Returns the hash of this transaction. pub fn transaction_hash(&self) -> HashValue { self.transaction_hash } /// Returns root hash of Sparse Merkle Tree describing the world state at the end of this /// transaction. pub fn state_root_hash(&self) -> HashValue { self.state_root_hash } /// Returns the root hash of Merkle Accumulator storing all events emitted during this /// transaction. pub fn event_root_hash(&self) -> HashValue { self.event_root_hash } pub fn events(&self) -> &[ContractEvent] { &self.events } /// Returns the amount of gas used by this transaction. pub fn gas_used(&self) -> u64 { self.gas_used } pub fn major_status(&self) -> StatusCode { self.major_status } } /// `Transaction` will be the transaction type used internally in the libra node to represent the /// transaction to be processed and persisted. /// /// We suppress the clippy warning here as we would expect most of the transaction to be user /// transaction. #[allow(clippy::large_enum_variant)] #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash)] pub enum Transaction { /// Transaction submitted by the user. e.g: P2P payment transaction, publishing module /// transaction, etc. UserTransaction(SignedUserTransaction), /// Transaction that applies a ChangeSet to the current ChainState. This should be used for ONLY for /// genesis right now. ChangeSet(ChangeSet), /// Transaction to update the block metadata resource at the beginning of a block. BlockMetadata(BlockMetadata), } impl Transaction { pub fn as_signed_user_txn(&self) -> Result<&SignedUserTransaction> { match self { Transaction::UserTransaction(txn) => Ok(txn), _ => Err(format_err!("Not a user transaction.")), } } pub fn id(&self) -> HashValue { //TODO rethink txn id's represent. self.crypto_hash() } } impl TryFrom<Transaction> for SignedUserTransaction { type Error = Error; fn try_from(txn: Transaction) -> Result<Self> { match txn { Transaction::UserTransaction(txn) => Ok(txn), _ => Err(format_err!("Not a user transaction.")), } } } /// Pool transactions status #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum TxStatus { /// Added transaction Added, /// Rejected transaction Rejected, /// Dropped transaction Dropped, /// Invalid transaction Invalid, /// Canceled transaction Canceled, /// Culled transaction Culled, } impl std::fmt::Display for TxStatus { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let s = match self { TxStatus::Added => "added", TxStatus::Rejected => "rejected", TxStatus::Dropped => "dropped", TxStatus::Invalid => "invalid", TxStatus::Canceled => "canceled", TxStatus::Culled => "culled", }; write!(f, "{}", s) } }
// A transaction that doesn't expire is represented by a very large value like // u64::max_value(). #[serde(serialize_with = "serialize_duration")] #[serde(deserialize_with = "deserialize_duration")]
random_line_split
mod.rs
// Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 // Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ account_address::AccountAddress, account_config::stc_type_tag, block_metadata::BlockMetadata, contract_event::ContractEvent, vm_error::{StatusCode, StatusType, VMStatus}, write_set::WriteSet, }; use anyhow::{format_err, Error, Result}; use serde::{de, ser, Deserialize, Serialize}; use starcoin_crypto::keygen::KeyGen; use starcoin_crypto::{ ed25519::*, hash::{CryptoHash, CryptoHasher, PlainCryptoHash}, traits::*, HashValue, }; use std::ops::Deref; use std::{convert::TryFrom, fmt, time::Duration}; pub mod authenticator { pub use libra_types::transaction::authenticator::{ AuthenticationKey, AuthenticationKeyPreimage, Scheme, TransactionAuthenticator, }; } mod error; pub mod helpers; mod pending_transaction; mod transaction_argument; mod upgrade; use crate::transaction::authenticator::TransactionAuthenticator; pub use error::CallError; pub use error::Error as TransactionError; pub use libra_types::transaction::{ChangeSet, Module, Script}; pub use pending_transaction::{Condition, PendingTransaction}; use starcoin_crypto::multi_ed25519::{MultiEd25519PublicKey, MultiEd25519Signature}; pub use transaction_argument::{ parse_transaction_argument, parse_transaction_arguments, TransactionArgument, }; pub use upgrade::{InitScript, UpgradePackage}; pub type Version = u64; // Height - also used for MVCC in StateDB pub const MAX_TRANSACTION_SIZE_IN_BYTES: usize = 4096; /// RawUserTransaction is the portion of a transaction that a client signs #[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash)] pub struct RawUserTransaction { /// Sender's address. sender: AccountAddress, // Sequence number of this transaction corresponding to sender's account. sequence_number: u64, // The transaction script to execute. payload: TransactionPayload, // Maximal total gas specified by wallet to spend for this transaction. max_gas_amount: u64, // Maximal price can be paid per gas. gas_unit_price: u64, // Expiration time for this transaction. If storage is queried and // the time returned is greater than or equal to this time and this // transaction has not been included, you can be certain that it will // never be included. // A transaction that doesn't expire is represented by a very large value like // u64::max_value(). #[serde(serialize_with = "serialize_duration")] #[serde(deserialize_with = "deserialize_duration")] expiration_time: Duration, } // TODO(#1307) fn serialize_duration<S>(d: &Duration, serializer: S) -> std::result::Result<S::Ok, S::Error> where S: ser::Serializer, { serializer.serialize_u64(d.as_secs()) } fn deserialize_duration<'de, D>(deserializer: D) -> std::result::Result<Duration, D::Error> where D: de::Deserializer<'de>, { struct DurationVisitor; impl<'de> de::Visitor<'de> for DurationVisitor { type Value = Duration; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("Duration as u64") } fn visit_u64<E>(self, v: u64) -> std::result::Result<Self::Value, E> where E: de::Error, { Ok(Duration::from_secs(v)) } } deserializer.deserialize_u64(DurationVisitor) } impl RawUserTransaction { /// Create a new `RawUserTransaction` with a payload. /// /// It can be either to publish a module, to execute a script, or to issue a writeset /// transaction. pub fn new( sender: AccountAddress, sequence_number: u64, payload: TransactionPayload, max_gas_amount: u64, gas_unit_price: u64, expiration_time: Duration, ) -> Self { RawUserTransaction { sender, sequence_number, payload, max_gas_amount, gas_unit_price, expiration_time, } } /// Create a new `RawUserTransaction` with a script. /// /// A script transaction contains only code to execute. No publishing is allowed in scripts. pub fn new_script( sender: AccountAddress, sequence_number: u64, script: Script, max_gas_amount: u64, gas_unit_price: u64, expiration_time: Duration, ) -> Self { RawUserTransaction { sender, sequence_number, payload: TransactionPayload::Script(script), max_gas_amount, gas_unit_price, expiration_time, } } /// Create a new `RawUserTransaction` with a module to publish. /// /// A module transaction is the only way to publish code. Only one module per transaction /// can be published. pub fn new_module( sender: AccountAddress, sequence_number: u64, module: Module, max_gas_amount: u64, gas_unit_price: u64, expiration_time: Duration, ) -> Self { RawUserTransaction { sender, sequence_number, payload: TransactionPayload::Module(module), max_gas_amount, gas_unit_price, expiration_time, } } /// Signs the given `RawUserTransaction`. Note that this consumes the `RawUserTransaction` and turns it /// into a `SignatureCheckedTransaction`. /// /// For a transaction that has just been signed, its signature is expected to be valid. pub fn sign( self, private_key: &Ed25519PrivateKey, public_key: Ed25519PublicKey, ) -> Result<SignatureCheckedTransaction> { let signature = private_key.sign_message(&self.crypto_hash()); Ok(SignatureCheckedTransaction(SignedUserTransaction::new( self, public_key, signature, ))) } pub fn into_payload(self) -> TransactionPayload { self.payload } /// Return the sender of this transaction. pub fn sender(&self) -> AccountAddress { self.sender } pub fn mock() -> Self { Self::mock_by_sender(AccountAddress::random()) } pub fn mock_by_sender(sender: AccountAddress) -> Self { Self::new( sender, 0, TransactionPayload::Script(Script::new(vec![], vec![], vec![])), 0, 0, Duration::new(0, 0), ) } pub fn mock_from(compiled_script: Vec<u8>) -> Self { Self::new( AccountAddress::default(), 0, TransactionPayload::Script(Script::new(compiled_script, vec![stc_type_tag()], vec![])), 600, 0, Duration::new(0, 0), ) } } #[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize)] pub enum TransactionPayload { /// A transaction that executes code. Script(Script), /// A transaction that publishes code. Module(Module), /// A transaction that publish or update module code by a package. Package(UpgradePackage), } /// A transaction that has been signed. /// /// A `SignedUserTransaction` is a single transaction that can be atomically executed. Clients submit /// these to validator nodes, and the validator and executor submits these to the VM. /// /// **IMPORTANT:** The signature of a `SignedUserTransaction` is not guaranteed to be verified. For a /// transaction whose signature is statically guaranteed to be verified, see /// [`SignatureCheckedTransaction`]. #[derive(Clone, Eq, PartialEq, Hash, Serialize, Deserialize, CryptoHasher, CryptoHash)] pub struct SignedUserTransaction { /// The raw transaction raw_txn: RawUserTransaction, /// Public key and signature to authenticate authenticator: TransactionAuthenticator, } /// A transaction for which the signature has been verified. Created by /// [`SignedUserTransaction::check_signature`] and [`RawUserTransaction::sign`]. #[derive(Clone, Debug, Eq, PartialEq, Hash)] pub struct SignatureCheckedTransaction(SignedUserTransaction); impl SignatureCheckedTransaction { /// Returns the `SignedUserTransaction` within. pub fn into_inner(self) -> SignedUserTransaction { self.0 } /// Returns the `RawUserTransaction` within. pub fn into_raw_transaction(self) -> RawUserTransaction { self.0.into_raw_transaction() } } impl Deref for SignatureCheckedTransaction { type Target = SignedUserTransaction; fn deref(&self) -> &Self::Target { &self.0 } } impl fmt::Debug for SignedUserTransaction { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "SignedTransaction {{ \n \ {{ raw_txn: {:#?}, \n \ authenticator: {:#?}, \n \ }} \n \ }}", self.raw_txn, self.authenticator ) } } impl SignedUserTransaction { pub fn new( raw_txn: RawUserTransaction, public_key: Ed25519PublicKey, signature: Ed25519Signature, ) -> SignedUserTransaction { let authenticator = TransactionAuthenticator::ed25519(public_key, signature); SignedUserTransaction { raw_txn, authenticator, } } pub fn multi_ed25519( raw_txn: RawUserTransaction, public_key: MultiEd25519PublicKey, signature: MultiEd25519Signature, ) -> SignedUserTransaction { let authenticator = TransactionAuthenticator::multi_ed25519(public_key, signature); SignedUserTransaction { raw_txn, authenticator, } } pub fn authenticator(&self) -> TransactionAuthenticator { self.authenticator.clone() } pub fn raw_txn(&self) -> &RawUserTransaction
pub fn sender(&self) -> AccountAddress { self.raw_txn.sender } pub fn into_raw_transaction(self) -> RawUserTransaction { self.raw_txn } pub fn sequence_number(&self) -> u64 { self.raw_txn.sequence_number } pub fn payload(&self) -> &TransactionPayload { &self.raw_txn.payload } pub fn max_gas_amount(&self) -> u64 { self.raw_txn.max_gas_amount } pub fn gas_unit_price(&self) -> u64 { self.raw_txn.gas_unit_price } pub fn expiration_time(&self) -> Duration { self.raw_txn.expiration_time } pub fn raw_txn_bytes_len(&self) -> usize { scs::to_bytes(&self.raw_txn) .expect("Unable to serialize RawUserTransaction") .len() } /// Checks that the signature of given transaction. Returns `Ok(SignatureCheckedTransaction)` if /// the signature is valid. pub fn check_signature(self) -> Result<SignatureCheckedTransaction> { self.authenticator .verify_signature(&self.raw_txn.crypto_hash())?; Ok(SignatureCheckedTransaction(self)) } //TODO pub fn mock() -> Self { let mut gen = KeyGen::from_os_rng(); let (private_key, public_key) = gen.generate_keypair(); let raw_txn = RawUserTransaction::mock(); raw_txn.sign(&private_key, public_key).unwrap().into_inner() } pub fn mock_from(compiled_script: Vec<u8>) -> Self { let mut gen = KeyGen::from_os_rng(); let (private_key, public_key) = gen.generate_keypair(); let raw_txn = RawUserTransaction::mock_from(compiled_script); raw_txn.sign(&private_key, public_key).unwrap().into_inner() } } /// The status of executing a transaction. The VM decides whether or not we should `Keep` the /// transaction output or `Discard` it based upon the execution of the transaction. We wrap these /// decisions around a `VMStatus` that provides more detail on the final execution state of the VM. #[derive(Clone, Debug, Eq, PartialEq)] pub enum TransactionStatus { /// Discard the transaction output Discard(VMStatus), /// Keep the transaction output Keep(VMStatus), } impl TransactionStatus { pub fn vm_status(&self) -> &VMStatus { match self { TransactionStatus::Discard(vm_status) | TransactionStatus::Keep(vm_status) => vm_status, } } } impl From<VMStatus> for TransactionStatus { fn from(vm_status: VMStatus) -> Self { let should_discard = match vm_status.status_type() { // Any unknown error should be discarded StatusType::Unknown => true, // Any error that is a validation status (i.e. an error arising from the prologue) // causes the transaction to not be included. StatusType::Validation => true, // If the VM encountered an invalid internal state, we should discard the transaction. StatusType::InvariantViolation => true, // A transaction that publishes code that cannot be verified will be charged. StatusType::Verification => false, // Even if we are unable to decode the transaction, there should be a charge made to // that user's account for the gas fees related to decoding, running the prologue etc. StatusType::Deserialization => false, // Any error encountered during the execution of the transaction will charge gas. StatusType::Execution => false, }; if should_discard { TransactionStatus::Discard(vm_status) } else { TransactionStatus::Keep(vm_status) } } } /// The output of executing a transaction. #[derive(Clone, Debug, Eq, PartialEq)] pub struct TransactionOutput { write_set: WriteSet, /// The list of events emitted during this transaction. events: Vec<ContractEvent>, /// The amount of gas used during execution. gas_used: u64, /// The resource increment size delta_size: i64, /// The execution status. status: TransactionStatus, } impl TransactionOutput { pub fn new( write_set: WriteSet, events: Vec<ContractEvent>, gas_used: u64, delta_size: i64, status: TransactionStatus, ) -> Self { TransactionOutput { write_set, events, gas_used, delta_size, status, } } pub fn write_set(&self) -> &WriteSet { &self.write_set } pub fn events(&self) -> &[ContractEvent] { &self.events } pub fn gas_used(&self) -> u64 { self.gas_used } pub fn status(&self) -> &TransactionStatus { &self.status } pub fn delta_size(&self) -> i64 { self.delta_size } pub fn into_inner(self) -> (WriteSet, Vec<ContractEvent>, u64, TransactionStatus) { (self.write_set, self.events, self.gas_used, self.status) } } /// `TransactionInfo` is the object we store in the transaction accumulator. It consists of the /// transaction as well as the execution result of this transaction. #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash)] pub struct TransactionInfo { /// The hash of this transaction. transaction_hash: HashValue, /// The root hash of Sparse Merkle Tree describing the world state at the end of this /// transaction. state_root_hash: HashValue, /// The root hash of Merkle Accumulator storing all events emitted during this transaction. event_root_hash: HashValue, events: Vec<ContractEvent>, /// The amount of gas used. gas_used: u64, /// The major status. This will provide the general error class. Note that this is not /// particularly high fidelity in the presence of sub statuses but, the major status does /// determine whether or not the transaction is applied to the global state or not. major_status: StatusCode, } impl TransactionInfo { /// Constructs a new `TransactionInfo` object using transaction hash, state root hash and event /// root hash. pub fn new( transaction_hash: HashValue, state_root_hash: HashValue, event_root_hash: HashValue, events: Vec<ContractEvent>, gas_used: u64, major_status: StatusCode, ) -> TransactionInfo { TransactionInfo { transaction_hash, state_root_hash, event_root_hash, events, gas_used, major_status, } } pub fn id(&self) -> HashValue { self.crypto_hash() } /// Returns the hash of this transaction. pub fn transaction_hash(&self) -> HashValue { self.transaction_hash } /// Returns root hash of Sparse Merkle Tree describing the world state at the end of this /// transaction. pub fn state_root_hash(&self) -> HashValue { self.state_root_hash } /// Returns the root hash of Merkle Accumulator storing all events emitted during this /// transaction. pub fn event_root_hash(&self) -> HashValue { self.event_root_hash } pub fn events(&self) -> &[ContractEvent] { &self.events } /// Returns the amount of gas used by this transaction. pub fn gas_used(&self) -> u64 { self.gas_used } pub fn major_status(&self) -> StatusCode { self.major_status } } /// `Transaction` will be the transaction type used internally in the libra node to represent the /// transaction to be processed and persisted. /// /// We suppress the clippy warning here as we would expect most of the transaction to be user /// transaction. #[allow(clippy::large_enum_variant)] #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash)] pub enum Transaction { /// Transaction submitted by the user. e.g: P2P payment transaction, publishing module /// transaction, etc. UserTransaction(SignedUserTransaction), /// Transaction that applies a ChangeSet to the current ChainState. This should be used for ONLY for /// genesis right now. ChangeSet(ChangeSet), /// Transaction to update the block metadata resource at the beginning of a block. BlockMetadata(BlockMetadata), } impl Transaction { pub fn as_signed_user_txn(&self) -> Result<&SignedUserTransaction> { match self { Transaction::UserTransaction(txn) => Ok(txn), _ => Err(format_err!("Not a user transaction.")), } } pub fn id(&self) -> HashValue { //TODO rethink txn id's represent. self.crypto_hash() } } impl TryFrom<Transaction> for SignedUserTransaction { type Error = Error; fn try_from(txn: Transaction) -> Result<Self> { match txn { Transaction::UserTransaction(txn) => Ok(txn), _ => Err(format_err!("Not a user transaction.")), } } } /// Pool transactions status #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum TxStatus { /// Added transaction Added, /// Rejected transaction Rejected, /// Dropped transaction Dropped, /// Invalid transaction Invalid, /// Canceled transaction Canceled, /// Culled transaction Culled, } impl std::fmt::Display for TxStatus { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let s = match self { TxStatus::Added => "added", TxStatus::Rejected => "rejected", TxStatus::Dropped => "dropped", TxStatus::Invalid => "invalid", TxStatus::Canceled => "canceled", TxStatus::Culled => "culled", }; write!(f, "{}", s) } }
{ &self.raw_txn }
identifier_body
config.rs
// Copyright 2019 Twitter, Inc. // Licensed under the Apache License, Version 2.0 // http://www.apache.org/licenses/LICENSE-2.0 use crate::common::*; use clap::{App, Arg, ArgMatches}; use logger::*; use std::collections::{HashMap, HashSet}; use std::fs::File; use std::io::{BufRead, BufReader}; use std::net::SocketAddr; use std::process; use std::time::Duration; pub const VERSION: &str = env!("CARGO_PKG_VERSION"); pub const NAME: &str = env!("CARGO_PKG_NAME"); pub const DEFAULT_SAMPLE_RATE_HZ: f64 = 1.0; pub const DEFAULT_SAMPLER_TIMEOUT_MILLISECONDS: usize = 50; pub const DEFAULT_MAX_SAMPLER_TIMEOUTS: usize = 5; pub const DEFAULT_INTERVAL_SECONDS: usize = 60; /// This struct contains the configuration of the agent. #[derive(Clone)] pub struct Config { /// the latching interval for stats interval: u64, /// sample rate for counters in Hz sample_rate: f64, /// the sampler timeout sampler_timeout: Duration, /// maximum consecutive sampler timeouts max_sampler_timeouts: usize, /// the listen address for the stats port listen: SocketAddr, /// the logging level loglevel: Level, /// memcache instance to instrument memcache: Option<SocketAddr>, /// flags for enabled statistics subsystems flags: Flags, /// the number of cores on the host cores: usize, /// an optional file to log stats to stats_log: Option<String>, /// flag to indicate Mesos sidecar mode sidecar: bool, } #[derive(Clone)] /// `Flags` is a simple wrapper for a doubly-keyed `HashSet` pub struct Flags { data: HashMap<String, HashSet<String>>, } impl Flags { /// Creates a new empty set of `Flags` pub fn new() -> Self { Self { data: HashMap::new(), } } /// Insert a `pkey`+`lkey` into the set pub fn insert(&mut self, pkey: &str, lkey: &str) { let mut entry = self.data.remove(pkey).unwrap_or_default(); entry.insert(lkey.to_owned()); self.data.insert(pkey.to_owned(), entry); } /// True if the set contains `pkey`+`lkey` pub fn contains(&self, pkey: &str, lkey: &str) -> bool { if let Some(entry) = self.data.get(pkey) { entry.get(lkey).is_some() } else { false } } /// True if the set contains the `pkey` pub fn
(&self, pkey: &str) -> bool { self.data.get(pkey).is_some() } /// Remove a `pkey`+`lkey` pub fn remove(&mut self, pkey: &str, lkey: &str) { if let Some(entry) = self.data.get_mut(pkey) { entry.remove(lkey); } } /// Remove the `pkey` and all `lkey`s under it pub fn remove_pkey(&mut self, pkey: &str) { self.data.remove(pkey); } } impl Config { /// parse command line options and return `Config` pub fn new() -> Config { let matches = App::new(NAME) .version(VERSION) .author("Brian Martin <[email protected]>") .about("high-resolution systems performance telemetry agent") .arg( Arg::with_name("listen") .short("l") .long("listen") .required(true) .takes_value(true) .value_name("IP:PORT") .help("Sets the listen address for metrics"), ) .arg( Arg::with_name("verbose") .short("v") .long("verbose") .multiple(true) .help("Increase verbosity by one level. Can be used more than once"), ) .arg( Arg::with_name("interval") .long("interval") .value_name("Seconds") .help("Integration window duration and stats endpoint refresh time") .takes_value(true), ) .arg( Arg::with_name("sample-rate") .long("sample-rate") .value_name("Hertz") .help("Sets the sampling frequency for the counters") .takes_value(true), ) .arg( Arg::with_name("sampler-timeout") .long("sampler-timeout") .value_name("MS") .help("Sets the timeout for per-sampler execution") .takes_value(true), ) .arg( Arg::with_name("max-sampler-timeouts") .long("max-sampler-timeouts") .value_name("MS") .help("Sets the maximum number of consecutive sampler timeouts") .takes_value(true), ) .arg( Arg::with_name("cpu") .long("cpu") .takes_value(true) .multiple(true) .possible_value("totals") .help("Enable statistics from CPU subsystem"), ) .arg( Arg::with_name("disk") .long("disk") .takes_value(true) .multiple(true) .possible_value("totals") .help("Enable statistics from Disk subsystem"), ) .arg( Arg::with_name("ebpf") .long("ebpf") .takes_value(true) .multiple(true) .possible_value("all") .possible_value("block") .possible_value("ext4") .possible_value("scheduler") .possible_value("xfs") .help("Enable statistics from eBPF"), ) .arg( Arg::with_name("network") .long("network") .takes_value(true) .multiple(true) .possible_value("totals") .help("Enable statistics from Network subsystem"), ) .arg( Arg::with_name("perf") .long("perf") .takes_value(true) .multiple(true) .possible_value("totals") .possible_value("per-cgroup") .help("Enable statistics from Perf Events subsystem"), ) .arg( Arg::with_name("memcache") .long("memcache") .required(false) .takes_value(true) .value_name("IP:PORT") .help("Connect to the given memcache server and produce stats"), ) .arg( Arg::with_name("stats-log") .long("stats-log") .required(false) .takes_value(true) .value_name("LOG FILE") .help("Enable logging of stats to file"), ) .arg( Arg::with_name("sidecar") .long("sidecar") .required(false) .help("Enables Mesos sidecar mode, instrumenting the container"), ) .get_matches(); let listen = matches .value_of("listen") .unwrap() .parse() .unwrap_or_else(|_| { println!("ERROR: listen address is malformed"); process::exit(1); }); let memcache = if let Some(sock) = matches.value_of("memcache") { let socket = sock.parse().unwrap_or_else(|_| { println!("ERROR: memcache address is malformed"); process::exit(1); }); Some(socket) } else { None }; let sample_rate = parse_float_arg(&matches, "sample-rate").unwrap_or(DEFAULT_SAMPLE_RATE_HZ); let sampler_timeout = Duration::from_millis( parse_numeric_arg(&matches, "sampler-timeout") .unwrap_or(DEFAULT_SAMPLER_TIMEOUT_MILLISECONDS) as u64, ); let max_sampler_timeouts = parse_numeric_arg(&matches, "max-sampler-timeouts") .unwrap_or(DEFAULT_MAX_SAMPLER_TIMEOUTS); let interval = parse_numeric_arg(&matches, "interval").unwrap_or(DEFAULT_INTERVAL_SECONDS) as u64 * SECOND; let cores = hardware_threads().unwrap_or(1); let mut stats_enabled = Flags::new(); for subsystem in &["cpu", "disk", "ebpf", "network", "perf"] { if let Some(values) = matches.values_of(subsystem) { let flags: Vec<&str> = values.collect(); for flag in flags { stats_enabled.insert(subsystem, flag); } } } let loglevel = match matches.occurrences_of("verbose") { 0 => Level::Info, 1 => Level::Debug, _ => Level::Trace, }; let stats_log = matches .value_of("stats-log") .map(std::string::ToString::to_string); let sidecar = matches.is_present("sidecar"); Config { cores, flags: stats_enabled, sample_rate, sampler_timeout, max_sampler_timeouts, interval, listen, loglevel, memcache, stats_log, sidecar, } } /// what interval should the stats library latch on pub fn interval(&self) -> u64 { self.interval } /// what frequency the stats should be sampled on pub fn sample_rate(&self) -> f64 { self.sample_rate } /// the timeout for sampler execution pub fn sampler_timeout(&self) -> Duration { self.sampler_timeout } /// maximum consecutive sampler timeouts pub fn max_sampler_timeouts(&self) -> usize { self.max_sampler_timeouts } /// get listen address pub fn listen(&self) -> SocketAddr { self.listen } /// get log level pub fn loglevel(&self) -> Level { self.loglevel } /// how many cores on the host? pub fn cores(&self) -> usize { self.cores } pub fn memcache(&self) -> Option<SocketAddr> { self.memcache } /// is a flag enabled for a subsystem? pub fn is_enabled(&self, subsystem: &str, flag: &str) -> bool { self.flags.contains(subsystem, flag) } pub fn is_subsystem_enabled(&self, subsystem: &str) -> bool { self.flags.contains_pkey(subsystem) } pub fn stats_log(&self) -> Option<String> { self.stats_log.clone() } } /// a helper function to parse a numeric argument by name from `ArgMatches` fn parse_numeric_arg(matches: &ArgMatches, key: &str) -> Option<usize> { matches.value_of(key).map(|f| { f.parse().unwrap_or_else(|_| { println!("ERROR: could not parse {}", key); process::exit(1); }) }) } /// a helper function to parse a floating point argument by name from `ArgMatches` fn parse_float_arg(matches: &ArgMatches, key: &str) -> Option<f64> { matches.value_of(key).map(|f| { f.parse().unwrap_or_else(|_| { println!("ERROR: could not parse {}", key); process::exit(1); }) }) } /// helper function to discover the number of hardware threads pub fn hardware_threads() -> Result<usize, ()> { let path = "/sys/devices/system/cpu/present"; let f = File::open(path).map_err(|e| error!("failed to open file ({:?}): {}", path, e))?; let mut f = BufReader::new(f); let mut line = String::new(); f.read_line(&mut line) .map_err(|_| error!("failed to read line"))?; let line = line.trim(); let a: Vec<&str> = line.split('-').collect(); a.last() .unwrap_or(&"0") .parse::<usize>() .map_err(|e| error!("could not parse num cpus from file ({:?}): {}", path, e)) .map(|i| i + 1) }
contains_pkey
identifier_name
config.rs
// Copyright 2019 Twitter, Inc. // Licensed under the Apache License, Version 2.0 // http://www.apache.org/licenses/LICENSE-2.0 use crate::common::*; use clap::{App, Arg, ArgMatches}; use logger::*; use std::collections::{HashMap, HashSet}; use std::fs::File; use std::io::{BufRead, BufReader}; use std::net::SocketAddr; use std::process; use std::time::Duration; pub const VERSION: &str = env!("CARGO_PKG_VERSION"); pub const NAME: &str = env!("CARGO_PKG_NAME"); pub const DEFAULT_SAMPLE_RATE_HZ: f64 = 1.0; pub const DEFAULT_SAMPLER_TIMEOUT_MILLISECONDS: usize = 50; pub const DEFAULT_MAX_SAMPLER_TIMEOUTS: usize = 5; pub const DEFAULT_INTERVAL_SECONDS: usize = 60; /// This struct contains the configuration of the agent. #[derive(Clone)] pub struct Config { /// the latching interval for stats interval: u64, /// sample rate for counters in Hz sample_rate: f64, /// the sampler timeout sampler_timeout: Duration, /// maximum consecutive sampler timeouts max_sampler_timeouts: usize, /// the listen address for the stats port listen: SocketAddr, /// the logging level loglevel: Level, /// memcache instance to instrument memcache: Option<SocketAddr>, /// flags for enabled statistics subsystems flags: Flags, /// the number of cores on the host cores: usize, /// an optional file to log stats to stats_log: Option<String>, /// flag to indicate Mesos sidecar mode sidecar: bool, } #[derive(Clone)] /// `Flags` is a simple wrapper for a doubly-keyed `HashSet` pub struct Flags { data: HashMap<String, HashSet<String>>, } impl Flags { /// Creates a new empty set of `Flags` pub fn new() -> Self { Self { data: HashMap::new(), } } /// Insert a `pkey`+`lkey` into the set pub fn insert(&mut self, pkey: &str, lkey: &str) { let mut entry = self.data.remove(pkey).unwrap_or_default(); entry.insert(lkey.to_owned()); self.data.insert(pkey.to_owned(), entry); } /// True if the set contains `pkey`+`lkey` pub fn contains(&self, pkey: &str, lkey: &str) -> bool { if let Some(entry) = self.data.get(pkey) { entry.get(lkey).is_some() } else { false } } /// True if the set contains the `pkey` pub fn contains_pkey(&self, pkey: &str) -> bool { self.data.get(pkey).is_some() } /// Remove a `pkey`+`lkey` pub fn remove(&mut self, pkey: &str, lkey: &str) { if let Some(entry) = self.data.get_mut(pkey) { entry.remove(lkey); } } /// Remove the `pkey` and all `lkey`s under it pub fn remove_pkey(&mut self, pkey: &str) { self.data.remove(pkey); } } impl Config { /// parse command line options and return `Config` pub fn new() -> Config { let matches = App::new(NAME) .version(VERSION) .author("Brian Martin <[email protected]>") .about("high-resolution systems performance telemetry agent") .arg( Arg::with_name("listen") .short("l") .long("listen") .required(true) .takes_value(true) .value_name("IP:PORT") .help("Sets the listen address for metrics"), ) .arg( Arg::with_name("verbose") .short("v") .long("verbose") .multiple(true) .help("Increase verbosity by one level. Can be used more than once"), ) .arg( Arg::with_name("interval") .long("interval") .value_name("Seconds") .help("Integration window duration and stats endpoint refresh time") .takes_value(true), ) .arg( Arg::with_name("sample-rate") .long("sample-rate") .value_name("Hertz") .help("Sets the sampling frequency for the counters") .takes_value(true), ) .arg( Arg::with_name("sampler-timeout") .long("sampler-timeout") .value_name("MS") .help("Sets the timeout for per-sampler execution") .takes_value(true), ) .arg( Arg::with_name("max-sampler-timeouts") .long("max-sampler-timeouts") .value_name("MS") .help("Sets the maximum number of consecutive sampler timeouts") .takes_value(true), ) .arg( Arg::with_name("cpu") .long("cpu") .takes_value(true) .multiple(true) .possible_value("totals") .help("Enable statistics from CPU subsystem"), ) .arg( Arg::with_name("disk") .long("disk") .takes_value(true) .multiple(true) .possible_value("totals") .help("Enable statistics from Disk subsystem"), ) .arg( Arg::with_name("ebpf") .long("ebpf") .takes_value(true) .multiple(true) .possible_value("all") .possible_value("block") .possible_value("ext4") .possible_value("scheduler") .possible_value("xfs") .help("Enable statistics from eBPF"), ) .arg( Arg::with_name("network") .long("network") .takes_value(true) .multiple(true) .possible_value("totals") .help("Enable statistics from Network subsystem"), ) .arg( Arg::with_name("perf") .long("perf") .takes_value(true) .multiple(true) .possible_value("totals") .possible_value("per-cgroup") .help("Enable statistics from Perf Events subsystem"), ) .arg( Arg::with_name("memcache") .long("memcache") .required(false) .takes_value(true) .value_name("IP:PORT") .help("Connect to the given memcache server and produce stats"), ) .arg( Arg::with_name("stats-log") .long("stats-log") .required(false) .takes_value(true) .value_name("LOG FILE") .help("Enable logging of stats to file"), ) .arg( Arg::with_name("sidecar") .long("sidecar") .required(false) .help("Enables Mesos sidecar mode, instrumenting the container"), ) .get_matches(); let listen = matches .value_of("listen") .unwrap() .parse() .unwrap_or_else(|_| { println!("ERROR: listen address is malformed"); process::exit(1); }); let memcache = if let Some(sock) = matches.value_of("memcache") { let socket = sock.parse().unwrap_or_else(|_| { println!("ERROR: memcache address is malformed"); process::exit(1); }); Some(socket) } else { None }; let sample_rate = parse_float_arg(&matches, "sample-rate").unwrap_or(DEFAULT_SAMPLE_RATE_HZ); let sampler_timeout = Duration::from_millis( parse_numeric_arg(&matches, "sampler-timeout") .unwrap_or(DEFAULT_SAMPLER_TIMEOUT_MILLISECONDS) as u64, ); let max_sampler_timeouts = parse_numeric_arg(&matches, "max-sampler-timeouts") .unwrap_or(DEFAULT_MAX_SAMPLER_TIMEOUTS); let interval = parse_numeric_arg(&matches, "interval").unwrap_or(DEFAULT_INTERVAL_SECONDS) as u64 * SECOND; let cores = hardware_threads().unwrap_or(1); let mut stats_enabled = Flags::new(); for subsystem in &["cpu", "disk", "ebpf", "network", "perf"] { if let Some(values) = matches.values_of(subsystem) { let flags: Vec<&str> = values.collect(); for flag in flags { stats_enabled.insert(subsystem, flag); } } } let loglevel = match matches.occurrences_of("verbose") { 0 => Level::Info, 1 => Level::Debug, _ => Level::Trace, }; let stats_log = matches .value_of("stats-log") .map(std::string::ToString::to_string); let sidecar = matches.is_present("sidecar"); Config { cores, flags: stats_enabled, sample_rate, sampler_timeout, max_sampler_timeouts, interval, listen, loglevel, memcache, stats_log, sidecar, } } /// what interval should the stats library latch on pub fn interval(&self) -> u64 { self.interval } /// what frequency the stats should be sampled on pub fn sample_rate(&self) -> f64
/// the timeout for sampler execution pub fn sampler_timeout(&self) -> Duration { self.sampler_timeout } /// maximum consecutive sampler timeouts pub fn max_sampler_timeouts(&self) -> usize { self.max_sampler_timeouts } /// get listen address pub fn listen(&self) -> SocketAddr { self.listen } /// get log level pub fn loglevel(&self) -> Level { self.loglevel } /// how many cores on the host? pub fn cores(&self) -> usize { self.cores } pub fn memcache(&self) -> Option<SocketAddr> { self.memcache } /// is a flag enabled for a subsystem? pub fn is_enabled(&self, subsystem: &str, flag: &str) -> bool { self.flags.contains(subsystem, flag) } pub fn is_subsystem_enabled(&self, subsystem: &str) -> bool { self.flags.contains_pkey(subsystem) } pub fn stats_log(&self) -> Option<String> { self.stats_log.clone() } } /// a helper function to parse a numeric argument by name from `ArgMatches` fn parse_numeric_arg(matches: &ArgMatches, key: &str) -> Option<usize> { matches.value_of(key).map(|f| { f.parse().unwrap_or_else(|_| { println!("ERROR: could not parse {}", key); process::exit(1); }) }) } /// a helper function to parse a floating point argument by name from `ArgMatches` fn parse_float_arg(matches: &ArgMatches, key: &str) -> Option<f64> { matches.value_of(key).map(|f| { f.parse().unwrap_or_else(|_| { println!("ERROR: could not parse {}", key); process::exit(1); }) }) } /// helper function to discover the number of hardware threads pub fn hardware_threads() -> Result<usize, ()> { let path = "/sys/devices/system/cpu/present"; let f = File::open(path).map_err(|e| error!("failed to open file ({:?}): {}", path, e))?; let mut f = BufReader::new(f); let mut line = String::new(); f.read_line(&mut line) .map_err(|_| error!("failed to read line"))?; let line = line.trim(); let a: Vec<&str> = line.split('-').collect(); a.last() .unwrap_or(&"0") .parse::<usize>() .map_err(|e| error!("could not parse num cpus from file ({:?}): {}", path, e)) .map(|i| i + 1) }
{ self.sample_rate }
identifier_body
config.rs
// Copyright 2019 Twitter, Inc. // Licensed under the Apache License, Version 2.0 // http://www.apache.org/licenses/LICENSE-2.0 use crate::common::*; use clap::{App, Arg, ArgMatches}; use logger::*; use std::collections::{HashMap, HashSet}; use std::fs::File; use std::io::{BufRead, BufReader}; use std::net::SocketAddr; use std::process; use std::time::Duration; pub const VERSION: &str = env!("CARGO_PKG_VERSION"); pub const NAME: &str = env!("CARGO_PKG_NAME"); pub const DEFAULT_SAMPLE_RATE_HZ: f64 = 1.0; pub const DEFAULT_SAMPLER_TIMEOUT_MILLISECONDS: usize = 50; pub const DEFAULT_MAX_SAMPLER_TIMEOUTS: usize = 5; pub const DEFAULT_INTERVAL_SECONDS: usize = 60; /// This struct contains the configuration of the agent. #[derive(Clone)] pub struct Config { /// the latching interval for stats interval: u64, /// sample rate for counters in Hz sample_rate: f64, /// the sampler timeout sampler_timeout: Duration, /// maximum consecutive sampler timeouts max_sampler_timeouts: usize, /// the listen address for the stats port listen: SocketAddr, /// the logging level loglevel: Level, /// memcache instance to instrument memcache: Option<SocketAddr>, /// flags for enabled statistics subsystems flags: Flags, /// the number of cores on the host cores: usize, /// an optional file to log stats to stats_log: Option<String>, /// flag to indicate Mesos sidecar mode sidecar: bool, } #[derive(Clone)] /// `Flags` is a simple wrapper for a doubly-keyed `HashSet` pub struct Flags { data: HashMap<String, HashSet<String>>, } impl Flags { /// Creates a new empty set of `Flags` pub fn new() -> Self { Self { data: HashMap::new(), } } /// Insert a `pkey`+`lkey` into the set pub fn insert(&mut self, pkey: &str, lkey: &str) { let mut entry = self.data.remove(pkey).unwrap_or_default(); entry.insert(lkey.to_owned()); self.data.insert(pkey.to_owned(), entry); } /// True if the set contains `pkey`+`lkey` pub fn contains(&self, pkey: &str, lkey: &str) -> bool { if let Some(entry) = self.data.get(pkey) { entry.get(lkey).is_some() } else { false } } /// True if the set contains the `pkey` pub fn contains_pkey(&self, pkey: &str) -> bool { self.data.get(pkey).is_some() } /// Remove a `pkey`+`lkey` pub fn remove(&mut self, pkey: &str, lkey: &str) { if let Some(entry) = self.data.get_mut(pkey) { entry.remove(lkey); } } /// Remove the `pkey` and all `lkey`s under it pub fn remove_pkey(&mut self, pkey: &str) { self.data.remove(pkey); } } impl Config { /// parse command line options and return `Config` pub fn new() -> Config { let matches = App::new(NAME) .version(VERSION) .author("Brian Martin <[email protected]>") .about("high-resolution systems performance telemetry agent") .arg( Arg::with_name("listen") .short("l") .long("listen") .required(true) .takes_value(true) .value_name("IP:PORT") .help("Sets the listen address for metrics"), ) .arg( Arg::with_name("verbose") .short("v") .long("verbose") .multiple(true) .help("Increase verbosity by one level. Can be used more than once"), ) .arg( Arg::with_name("interval") .long("interval") .value_name("Seconds") .help("Integration window duration and stats endpoint refresh time") .takes_value(true), ) .arg( Arg::with_name("sample-rate") .long("sample-rate") .value_name("Hertz") .help("Sets the sampling frequency for the counters") .takes_value(true), ) .arg( Arg::with_name("sampler-timeout") .long("sampler-timeout") .value_name("MS") .help("Sets the timeout for per-sampler execution") .takes_value(true), ) .arg( Arg::with_name("max-sampler-timeouts") .long("max-sampler-timeouts") .value_name("MS") .help("Sets the maximum number of consecutive sampler timeouts") .takes_value(true), ) .arg( Arg::with_name("cpu") .long("cpu") .takes_value(true) .multiple(true) .possible_value("totals") .help("Enable statistics from CPU subsystem"), ) .arg( Arg::with_name("disk") .long("disk") .takes_value(true) .multiple(true) .possible_value("totals") .help("Enable statistics from Disk subsystem"), ) .arg( Arg::with_name("ebpf") .long("ebpf") .takes_value(true) .multiple(true) .possible_value("all") .possible_value("block") .possible_value("ext4") .possible_value("scheduler") .possible_value("xfs") .help("Enable statistics from eBPF"), ) .arg( Arg::with_name("network") .long("network") .takes_value(true) .multiple(true) .possible_value("totals") .help("Enable statistics from Network subsystem"), ) .arg( Arg::with_name("perf") .long("perf") .takes_value(true) .multiple(true) .possible_value("totals") .possible_value("per-cgroup") .help("Enable statistics from Perf Events subsystem"), ) .arg( Arg::with_name("memcache") .long("memcache") .required(false) .takes_value(true) .value_name("IP:PORT") .help("Connect to the given memcache server and produce stats"), ) .arg( Arg::with_name("stats-log") .long("stats-log") .required(false) .takes_value(true) .value_name("LOG FILE") .help("Enable logging of stats to file"), ) .arg( Arg::with_name("sidecar") .long("sidecar") .required(false) .help("Enables Mesos sidecar mode, instrumenting the container"), ) .get_matches(); let listen = matches .value_of("listen") .unwrap() .parse() .unwrap_or_else(|_| { println!("ERROR: listen address is malformed"); process::exit(1); }); let memcache = if let Some(sock) = matches.value_of("memcache")
else { None }; let sample_rate = parse_float_arg(&matches, "sample-rate").unwrap_or(DEFAULT_SAMPLE_RATE_HZ); let sampler_timeout = Duration::from_millis( parse_numeric_arg(&matches, "sampler-timeout") .unwrap_or(DEFAULT_SAMPLER_TIMEOUT_MILLISECONDS) as u64, ); let max_sampler_timeouts = parse_numeric_arg(&matches, "max-sampler-timeouts") .unwrap_or(DEFAULT_MAX_SAMPLER_TIMEOUTS); let interval = parse_numeric_arg(&matches, "interval").unwrap_or(DEFAULT_INTERVAL_SECONDS) as u64 * SECOND; let cores = hardware_threads().unwrap_or(1); let mut stats_enabled = Flags::new(); for subsystem in &["cpu", "disk", "ebpf", "network", "perf"] { if let Some(values) = matches.values_of(subsystem) { let flags: Vec<&str> = values.collect(); for flag in flags { stats_enabled.insert(subsystem, flag); } } } let loglevel = match matches.occurrences_of("verbose") { 0 => Level::Info, 1 => Level::Debug, _ => Level::Trace, }; let stats_log = matches .value_of("stats-log") .map(std::string::ToString::to_string); let sidecar = matches.is_present("sidecar"); Config { cores, flags: stats_enabled, sample_rate, sampler_timeout, max_sampler_timeouts, interval, listen, loglevel, memcache, stats_log, sidecar, } } /// what interval should the stats library latch on pub fn interval(&self) -> u64 { self.interval } /// what frequency the stats should be sampled on pub fn sample_rate(&self) -> f64 { self.sample_rate } /// the timeout for sampler execution pub fn sampler_timeout(&self) -> Duration { self.sampler_timeout } /// maximum consecutive sampler timeouts pub fn max_sampler_timeouts(&self) -> usize { self.max_sampler_timeouts } /// get listen address pub fn listen(&self) -> SocketAddr { self.listen } /// get log level pub fn loglevel(&self) -> Level { self.loglevel } /// how many cores on the host? pub fn cores(&self) -> usize { self.cores } pub fn memcache(&self) -> Option<SocketAddr> { self.memcache } /// is a flag enabled for a subsystem? pub fn is_enabled(&self, subsystem: &str, flag: &str) -> bool { self.flags.contains(subsystem, flag) } pub fn is_subsystem_enabled(&self, subsystem: &str) -> bool { self.flags.contains_pkey(subsystem) } pub fn stats_log(&self) -> Option<String> { self.stats_log.clone() } } /// a helper function to parse a numeric argument by name from `ArgMatches` fn parse_numeric_arg(matches: &ArgMatches, key: &str) -> Option<usize> { matches.value_of(key).map(|f| { f.parse().unwrap_or_else(|_| { println!("ERROR: could not parse {}", key); process::exit(1); }) }) } /// a helper function to parse a floating point argument by name from `ArgMatches` fn parse_float_arg(matches: &ArgMatches, key: &str) -> Option<f64> { matches.value_of(key).map(|f| { f.parse().unwrap_or_else(|_| { println!("ERROR: could not parse {}", key); process::exit(1); }) }) } /// helper function to discover the number of hardware threads pub fn hardware_threads() -> Result<usize, ()> { let path = "/sys/devices/system/cpu/present"; let f = File::open(path).map_err(|e| error!("failed to open file ({:?}): {}", path, e))?; let mut f = BufReader::new(f); let mut line = String::new(); f.read_line(&mut line) .map_err(|_| error!("failed to read line"))?; let line = line.trim(); let a: Vec<&str> = line.split('-').collect(); a.last() .unwrap_or(&"0") .parse::<usize>() .map_err(|e| error!("could not parse num cpus from file ({:?}): {}", path, e)) .map(|i| i + 1) }
{ let socket = sock.parse().unwrap_or_else(|_| { println!("ERROR: memcache address is malformed"); process::exit(1); }); Some(socket) }
conditional_block
config.rs
// Copyright 2019 Twitter, Inc. // Licensed under the Apache License, Version 2.0 // http://www.apache.org/licenses/LICENSE-2.0 use crate::common::*; use clap::{App, Arg, ArgMatches}; use logger::*; use std::collections::{HashMap, HashSet}; use std::fs::File; use std::io::{BufRead, BufReader}; use std::net::SocketAddr; use std::process; use std::time::Duration; pub const VERSION: &str = env!("CARGO_PKG_VERSION"); pub const NAME: &str = env!("CARGO_PKG_NAME"); pub const DEFAULT_SAMPLE_RATE_HZ: f64 = 1.0; pub const DEFAULT_SAMPLER_TIMEOUT_MILLISECONDS: usize = 50; pub const DEFAULT_MAX_SAMPLER_TIMEOUTS: usize = 5; pub const DEFAULT_INTERVAL_SECONDS: usize = 60; /// This struct contains the configuration of the agent. #[derive(Clone)] pub struct Config { /// the latching interval for stats interval: u64, /// sample rate for counters in Hz sample_rate: f64, /// the sampler timeout sampler_timeout: Duration, /// maximum consecutive sampler timeouts max_sampler_timeouts: usize, /// the listen address for the stats port listen: SocketAddr, /// the logging level loglevel: Level, /// memcache instance to instrument memcache: Option<SocketAddr>, /// flags for enabled statistics subsystems flags: Flags, /// the number of cores on the host cores: usize, /// an optional file to log stats to stats_log: Option<String>, /// flag to indicate Mesos sidecar mode sidecar: bool, } #[derive(Clone)] /// `Flags` is a simple wrapper for a doubly-keyed `HashSet` pub struct Flags { data: HashMap<String, HashSet<String>>, } impl Flags { /// Creates a new empty set of `Flags` pub fn new() -> Self { Self { data: HashMap::new(), } } /// Insert a `pkey`+`lkey` into the set pub fn insert(&mut self, pkey: &str, lkey: &str) { let mut entry = self.data.remove(pkey).unwrap_or_default(); entry.insert(lkey.to_owned()); self.data.insert(pkey.to_owned(), entry); } /// True if the set contains `pkey`+`lkey` pub fn contains(&self, pkey: &str, lkey: &str) -> bool { if let Some(entry) = self.data.get(pkey) { entry.get(lkey).is_some() } else { false } } /// True if the set contains the `pkey` pub fn contains_pkey(&self, pkey: &str) -> bool { self.data.get(pkey).is_some() } /// Remove a `pkey`+`lkey` pub fn remove(&mut self, pkey: &str, lkey: &str) { if let Some(entry) = self.data.get_mut(pkey) { entry.remove(lkey); } } /// Remove the `pkey` and all `lkey`s under it pub fn remove_pkey(&mut self, pkey: &str) { self.data.remove(pkey); } } impl Config { /// parse command line options and return `Config` pub fn new() -> Config { let matches = App::new(NAME) .version(VERSION) .author("Brian Martin <[email protected]>") .about("high-resolution systems performance telemetry agent") .arg( Arg::with_name("listen") .short("l") .long("listen") .required(true) .takes_value(true) .value_name("IP:PORT") .help("Sets the listen address for metrics"), ) .arg( Arg::with_name("verbose") .short("v") .long("verbose") .multiple(true) .help("Increase verbosity by one level. Can be used more than once"), ) .arg( Arg::with_name("interval") .long("interval") .value_name("Seconds") .help("Integration window duration and stats endpoint refresh time") .takes_value(true), ) .arg( Arg::with_name("sample-rate") .long("sample-rate") .value_name("Hertz") .help("Sets the sampling frequency for the counters") .takes_value(true), ) .arg( Arg::with_name("sampler-timeout") .long("sampler-timeout") .value_name("MS") .help("Sets the timeout for per-sampler execution") .takes_value(true), ) .arg( Arg::with_name("max-sampler-timeouts") .long("max-sampler-timeouts") .value_name("MS") .help("Sets the maximum number of consecutive sampler timeouts") .takes_value(true), ) .arg( Arg::with_name("cpu") .long("cpu") .takes_value(true) .multiple(true) .possible_value("totals") .help("Enable statistics from CPU subsystem"), ) .arg( Arg::with_name("disk") .long("disk") .takes_value(true) .multiple(true) .possible_value("totals") .help("Enable statistics from Disk subsystem"), ) .arg( Arg::with_name("ebpf") .long("ebpf") .takes_value(true) .multiple(true) .possible_value("all") .possible_value("block") .possible_value("ext4") .possible_value("scheduler") .possible_value("xfs") .help("Enable statistics from eBPF"), ) .arg( Arg::with_name("network") .long("network") .takes_value(true) .multiple(true) .possible_value("totals") .help("Enable statistics from Network subsystem"), ) .arg( Arg::with_name("perf") .long("perf") .takes_value(true) .multiple(true) .possible_value("totals") .possible_value("per-cgroup") .help("Enable statistics from Perf Events subsystem"), ) .arg( Arg::with_name("memcache") .long("memcache") .required(false) .takes_value(true) .value_name("IP:PORT") .help("Connect to the given memcache server and produce stats"), ) .arg( Arg::with_name("stats-log") .long("stats-log") .required(false) .takes_value(true) .value_name("LOG FILE") .help("Enable logging of stats to file"), ) .arg( Arg::with_name("sidecar") .long("sidecar") .required(false) .help("Enables Mesos sidecar mode, instrumenting the container"), ) .get_matches(); let listen = matches .value_of("listen") .unwrap() .parse() .unwrap_or_else(|_| { println!("ERROR: listen address is malformed"); process::exit(1); }); let memcache = if let Some(sock) = matches.value_of("memcache") { let socket = sock.parse().unwrap_or_else(|_| { println!("ERROR: memcache address is malformed"); process::exit(1); }); Some(socket) } else { None }; let sample_rate = parse_float_arg(&matches, "sample-rate").unwrap_or(DEFAULT_SAMPLE_RATE_HZ); let sampler_timeout = Duration::from_millis( parse_numeric_arg(&matches, "sampler-timeout") .unwrap_or(DEFAULT_SAMPLER_TIMEOUT_MILLISECONDS) as u64, ); let max_sampler_timeouts = parse_numeric_arg(&matches, "max-sampler-timeouts") .unwrap_or(DEFAULT_MAX_SAMPLER_TIMEOUTS); let interval = parse_numeric_arg(&matches, "interval").unwrap_or(DEFAULT_INTERVAL_SECONDS) as u64 * SECOND; let cores = hardware_threads().unwrap_or(1); let mut stats_enabled = Flags::new(); for subsystem in &["cpu", "disk", "ebpf", "network", "perf"] { if let Some(values) = matches.values_of(subsystem) { let flags: Vec<&str> = values.collect(); for flag in flags { stats_enabled.insert(subsystem, flag); } } } let loglevel = match matches.occurrences_of("verbose") { 0 => Level::Info, 1 => Level::Debug, _ => Level::Trace, }; let stats_log = matches .value_of("stats-log") .map(std::string::ToString::to_string); let sidecar = matches.is_present("sidecar"); Config { cores, flags: stats_enabled, sample_rate, sampler_timeout, max_sampler_timeouts, interval, listen, loglevel, memcache, stats_log, sidecar, } } /// what interval should the stats library latch on pub fn interval(&self) -> u64 { self.interval } /// what frequency the stats should be sampled on pub fn sample_rate(&self) -> f64 { self.sample_rate } /// the timeout for sampler execution pub fn sampler_timeout(&self) -> Duration { self.sampler_timeout } /// maximum consecutive sampler timeouts pub fn max_sampler_timeouts(&self) -> usize { self.max_sampler_timeouts } /// get listen address pub fn listen(&self) -> SocketAddr { self.listen } /// get log level pub fn loglevel(&self) -> Level { self.loglevel } /// how many cores on the host? pub fn cores(&self) -> usize { self.cores } pub fn memcache(&self) -> Option<SocketAddr> { self.memcache } /// is a flag enabled for a subsystem? pub fn is_enabled(&self, subsystem: &str, flag: &str) -> bool { self.flags.contains(subsystem, flag) } pub fn is_subsystem_enabled(&self, subsystem: &str) -> bool { self.flags.contains_pkey(subsystem) } pub fn stats_log(&self) -> Option<String> { self.stats_log.clone() } } /// a helper function to parse a numeric argument by name from `ArgMatches` fn parse_numeric_arg(matches: &ArgMatches, key: &str) -> Option<usize> { matches.value_of(key).map(|f| { f.parse().unwrap_or_else(|_| { println!("ERROR: could not parse {}", key); process::exit(1); }) }) } /// a helper function to parse a floating point argument by name from `ArgMatches` fn parse_float_arg(matches: &ArgMatches, key: &str) -> Option<f64> { matches.value_of(key).map(|f| { f.parse().unwrap_or_else(|_| { println!("ERROR: could not parse {}", key); process::exit(1);
} /// helper function to discover the number of hardware threads pub fn hardware_threads() -> Result<usize, ()> { let path = "/sys/devices/system/cpu/present"; let f = File::open(path).map_err(|e| error!("failed to open file ({:?}): {}", path, e))?; let mut f = BufReader::new(f); let mut line = String::new(); f.read_line(&mut line) .map_err(|_| error!("failed to read line"))?; let line = line.trim(); let a: Vec<&str> = line.split('-').collect(); a.last() .unwrap_or(&"0") .parse::<usize>() .map_err(|e| error!("could not parse num cpus from file ({:?}): {}", path, e)) .map(|i| i + 1) }
}) })
random_line_split
tag.rs
//! Utilities and constants for OpenType tags. //! //! See also the [`tag!`](../macro.tag.html) macro for creating tags from a byte string. use crate::error::ParseError; use std::{fmt, str}; /// Generate a 4-byte OpenType tag from byte string /// /// Example: /// /// ``` /// use allsorts::tag; /// assert_eq!(tag!(b"glyf"), 0x676C7966); /// ``` #[macro_export] macro_rules! tag { ($w:expr) => { u32::from_be_bytes(*$w) }; } /// Wrapper type for a tag that implements `Display` /// /// Example: /// /// ``` /// use allsorts::tag::{self, DisplayTag}; /// /// // ASCII tag comes out as a string /// assert_eq!(&DisplayTag(tag::NAME).to_string(), "name"); /// // Non-ASCII tag comes out as hex /// assert_eq!(&DisplayTag(0x12345678).to_string(), "0x12345678"); /// /// println!("DisplayTag is handy for printing a tag: '{}'", DisplayTag(tag::CFF)); /// ``` #[derive(PartialEq, Eq, Clone, Copy)] pub struct DisplayTag(pub u32); pub fn from_string(s: &str) -> Result<u32, ParseError> { if s.len() > 4 { return Err(ParseError::BadValue); } let mut tag: u32 = 0; let mut count = 0; for c in s.chars() { if!c.is_ascii() || c.is_ascii_control() { return Err(ParseError::BadValue); } tag = (tag << 8) | (c as u32); count += 1; } while count < 4 { tag = (tag << 8) | (''as u32); count += 1; } Ok(tag) } impl fmt::Display for DisplayTag { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let tag = self.0; let bytes = tag.to_be_bytes(); if bytes.iter().all(|c| c.is_ascii() &&!c.is_ascii_control()) { let s = str::from_utf8(&bytes).unwrap(); // unwrap safe due to above check s.fmt(f) } else { write!(f, "0x{:08x}", tag) } } } impl fmt::Debug for DisplayTag { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(self, f) } } /// `abvf` pub const ABVF: u32 = tag!(b"abvf"); /// `abvm` pub const ABVM: u32 = tag!(b"abvm"); /// `abvs` pub const ABVS: u32 = tag!(b"abvs"); /// `acnt` pub const ACNT: u32 = tag!(b"acnt"); /// `afrc` pub const AFRC: u32 = tag!(b"afrc"); /// `akhn` pub const AKHN: u32 = tag!(b"akhn"); /// `arab` pub const ARAB: u32 = tag!(b"arab"); /// `avar` pub const AVAR: u32 = tag!(b"avar"); /// `BASE` pub const BASE: u32 = tag!(b"BASE"); /// `bdat` pub const BDAT: u32 = tag!(b"bdat"); /// `beng` pub const BENG: u32 = tag!(b"beng"); /// `bloc` pub const BLOC: u32 = tag!(b"bloc"); /// `blwf` pub const BLWF: u32 = tag!(b"blwf"); /// `blwm` pub const BLWM: u32 = tag!(b"blwm"); /// `blws` pub const BLWS: u32 = tag!(b"blws"); /// `bng2` pub const BNG2: u32 = tag!(b"bng2"); /// `bsln` pub const BSLN: u32 = tag!(b"bsln"); /// `c2sc` pub const C2SC: u32 = tag!(b"c2sc"); /// `calt` pub const CALT: u32 = tag!(b"calt"); /// `CBDT` pub const CBDT: u32 = tag!(b"CBDT"); /// `CBLC` pub const CBLC: u32 = tag!(b"CBLC"); /// `ccmp` pub const CCMP: u32 = tag!(b"ccmp"); /// `cfar` pub const CFAR: u32 = tag!(b"cfar"); /// `CFF ` pub const CFF: u32 = tag!(b"CFF "); /// `cjct` pub const CJCT: u32 = tag!(b"cjct"); /// `clig` pub const CLIG: u32 = tag!(b"clig"); /// `cmap` pub const CMAP: u32 = tag!(b"cmap"); /// `COLR` pub const COLR: u32 = tag!(b"COLR"); /// `CPAL` pub const CPAL: u32 = tag!(b"CPAL"); /// `curs` pub const CURS: u32 = tag!(b"curs"); /// `cvar` pub const CVAR: u32 = tag!(b"cvar"); /// `cvt ` pub const CVT: u32 = tag!(b"cvt "); /// `cyrl` pub const CYRL: u32 = tag!(b"cyrl"); /// `dev2` pub const DEV2: u32 = tag!(b"dev2"); /// `deva` pub const DEVA: u32 = tag!(b"deva"); /// `DFLT` pub const DFLT: u32 = tag!(b"DFLT"); /// `dist` pub const DIST: u32 = tag!(b"dist"); /// `dlig` pub const DLIG: u32 = tag!(b"dlig"); /// `dupe` pub const DUPE: u32 = tag!(b"dupe"); /// `EBDT` pub const EBDT: u32 = tag!(b"EBDT"); /// `EBLC` pub const EBLC: u32 = tag!(b"EBLC"); /// `EBSC` pub const EBSC: u32 = tag!(b"EBSC"); /// `FAR ` pub const FAR: u32 = tag!(b"FAR "); /// `fdsc` pub const FDSC: u32 = tag!(b"fdsc"); /// `Feat` pub const FEAT2: u32 = tag!(b"Feat"); /// `feat` pub const FEAT: u32 = tag!(b"feat"); /// `fin2` pub const FIN2: u32 = tag!(b"fin2"); /// `fin3` pub const FIN3: u32 = tag!(b"fin3"); /// `fina` pub const FINA: u32 = tag!(b"fina"); /// `fmtx` pub const FMTX: u32 = tag!(b"fmtx"); /// `fpgm` pub const FPGM: u32 = tag!(b"fpgm"); /// `frac` pub const FRAC: u32 = tag!(b"frac"); /// `fvar` pub const FVAR: u32 = tag!(b"fvar"); /// `gasp` pub const GASP: u32 = tag!(b"gasp"); /// `GDEF` pub const GDEF: u32 = tag!(b"GDEF"); /// `gjr2` pub const GJR2: u32 = tag!(b"gjr2"); /// `Glat` pub const GLAT: u32 = tag!(b"Glat"); /// `Gloc` pub const GLOC: u32 = tag!(b"Gloc"); /// `glyf` pub const GLYF: u32 = tag!(b"glyf"); /// `GPOS` pub const GPOS: u32 = tag!(b"GPOS"); /// `grek` pub const GREK: u32 = tag!(b"grek"); /// `GSUB` pub const GSUB: u32 = tag!(b"GSUB"); /// `gujr` pub const GUJR: u32 = tag!(b"gujr"); /// `gur2` pub const GUR2: u32 = tag!(b"gur2"); /// `guru` pub const GURU: u32 = tag!(b"guru"); /// `gvar` pub const GVAR: u32 = tag!(b"gvar"); /// `half` pub const HALF: u32 = tag!(b"half"); /// `haln` pub const HALN: u32 = tag!(b"haln"); /// `hdmx` pub const HDMX: u32 = tag!(b"hdmx"); /// `head` pub const HEAD: u32 = tag!(b"head"); /// `hhea` pub const HHEA: u32 = tag!(b"hhea"); /// `hlig` pub const HLIG: u32 = tag!(b"hlig"); /// `hmtx` pub const HMTX: u32 = tag!(b"hmtx"); /// `hsty` pub const HSTY: u32 = tag!(b"hsty"); /// `init` pub const INIT: u32 = tag!(b"init"); /// `isol` pub const ISOL: u32 = tag!(b"isol"); /// `jpg ` pub const JPG: u32 = tag!(b"jpg "); /// `JSTF` pub const JSTF: u32 = tag!(b"JSTF"); /// `just` pub const JUST: u32 = tag!(b"just"); /// `kern` pub const KERN: u32 = tag!(b"kern"); /// `khmr` pub const KHMR: u32 = tag!(b"khmr"); /// `knd2` pub const KND2: u32 = tag!(b"knd2"); /// `knda` pub const KNDA: u32 = tag!(b"knda"); /// `lao ` pub const LAO: u32 = tag!(b"lao "); /// `latn` pub const LATN: u32 = tag!(b"latn"); /// `lcar` pub const LCAR: u32 = tag!(b"lcar"); /// `liga` pub const LIGA: u32 = tag!(b"liga"); /// `lnum` pub const LNUM: u32 = tag!(b"lnum"); /// `loca` pub const LOCA: u32 = tag!(b"loca"); /// `locl` pub const LOCL: u32 = tag!(b"locl"); /// `LTSH` pub const LTSH: u32 = tag!(b"LTSH"); /// `mark` pub const MARK: u32 = tag!(b"mark"); /// `MATH` pub const MATH: u32 = tag!(b"MATH"); /// `maxp` pub const MAXP: u32 = tag!(b"maxp"); /// `med2` pub const MED2: u32 = tag!(b"med2"); /// `medi` pub const MEDI: u32 = tag!(b"medi"); /// `mkmk` pub const MKMK: u32 = tag!(b"mkmk"); /// `mlm2` pub const MLM2: u32 = tag!(b"mlm2"); /// `mlym` pub const MLYM: u32 = tag!(b"mlym"); /// `mort` pub const MORT: u32 = tag!(b"mort"); /// `morx` pub const MORX: u32 = tag!(b"morx"); /// `mset` pub const MSET: u32 = tag!(b"mset"); /// `name` pub const NAME: u32 = tag!(b"name"); /// `nukt` pub const NUKT: u32 = tag!(b"nukt"); /// `onum` pub const ONUM: u32 = tag!(b"onum"); /// `opbd` pub const OPBD: u32 = tag!(b"opbd"); /// `ordn` pub const ORDN: u32 = tag!(b"ordn"); /// `ory2` pub const ORY2: u32 = tag!(b"ory2"); /// `orya` pub const ORYA: u32 = tag!(b"orya"); /// `OS/2` pub const OS_2: u32 = tag!(b"OS/2"); /// `OTTO` pub const OTTO: u32 = tag!(b"OTTO"); /// `PCLT` pub const PCLT: u32 = tag!(b"PCLT"); /// `pnum` pub const PNUM: u32 = tag!(b"pnum"); /// `png ` pub const PNG: u32 = tag!(b"png "); /// `post` pub const POST: u32 = tag!(b"post"); /// `pref` pub const PREF: u32 = tag!(b"pref"); /// `prep` pub const PREP: u32 = tag!(b"prep"); /// `pres` pub const PRES: u32 = tag!(b"pres"); /// `prop` pub const PROP: u32 = tag!(b"prop"); /// `pstf` pub const PSTF: u32 = tag!(b"pstf"); /// `psts` pub const PSTS: u32 = tag!(b"psts"); /// `rclt` pub const RCLT: u32 = tag!(b"rclt"); /// `rkrf` pub const RKRF: u32 = tag!(b"rkrf"); /// `rlig` pub const RLIG: u32 = tag!(b"rlig"); /// `rphf` pub const RPHF: u32 = tag!(b"rphf"); /// `sbix` pub const SBIX: u32 = tag!(b"sbix"); /// `Silf` pub const SILF: u32 = tag!(b"Silf"); /// `Sill` pub const SILL: u32 = tag!(b"Sill"); /// `sinh` pub const SINH: u32 = tag!(b"sinh"); /// `smcp` pub const SMCP: u32 = tag!(b"smcp"); /// `SND ` pub const SND: u32 = tag!(b"SND "); /// `SVG ` pub const SVG: u32 = tag!(b"SVG "); /// `syrc` pub const SYRC: u32 = tag!(b"syrc"); /// `taml` pub const TAML: u32 = tag!(b"taml"); /// `tel2` pub const TEL2: u32 = tag!(b"tel2"); /// `telu` pub const TELU: u32 = tag!(b"telu"); /// `thai` pub const THAI: u32 = tag!(b"thai"); /// `tiff` pub const TIFF: u32 = tag!(b"tiff"); /// `tml2` pub const TML2: u32 = tag!(b"tml2"); /// `tnum` pub const TNUM: u32 = tag!(b"tnum"); /// `trak` pub const TRAK: u32 = tag!(b"trak"); /// `ttcf` pub const TTCF: u32 = tag!(b"ttcf"); /// `URD ` pub const URD: u32 = tag!(b"URD "); /// `vatu` pub const VATU: u32 = tag!(b"vatu"); /// `VDMX` pub const VDMX: u32 = tag!(b"VDMX"); /// `vert` pub const VERT: u32 = tag!(b"vert"); /// `vhea` pub const VHEA: u32 = tag!(b"vhea"); /// `vmtx` pub const VMTX: u32 = tag!(b"vmtx"); /// `VORG` pub const VORG: u32 = tag!(b"VORG"); /// `vrt2` pub const VRT2: u32 = tag!(b"vrt2"); /// `Zapf` pub const ZAPF: u32 = tag!(b"Zapf"); /// `zero` pub const ZERO: u32 = tag!(b"zero"); #[cfg(test)] mod tests { use super::*; mod from_string { use super::*; #[test] fn test_four_chars()
#[test] fn test_three_chars() { let tag = from_string("BEN").expect("invalid tag"); assert_eq!(tag, 1111838240); } } mod display_tag { use crate::tag::{DisplayTag, NAME}; #[test] fn test_simple_ascii() { assert_eq!(DisplayTag(NAME).to_string(), "name".to_string()); } #[test] fn test_non_ascii() { assert_eq!(DisplayTag(0x12345678).to_string(), "0x12345678".to_string()); } #[test] fn test_debug() { assert_eq!(format!("{:?}", DisplayTag(NAME)), "name".to_string()); } } }
{ let tag = from_string("beng").expect("invalid tag"); assert_eq!(tag, 1650814567); }
identifier_body
tag.rs
//! Utilities and constants for OpenType tags. //! //! See also the [`tag!`](../macro.tag.html) macro for creating tags from a byte string. use crate::error::ParseError; use std::{fmt, str}; /// Generate a 4-byte OpenType tag from byte string /// /// Example: /// /// ``` /// use allsorts::tag; /// assert_eq!(tag!(b"glyf"), 0x676C7966); /// ``` #[macro_export] macro_rules! tag { ($w:expr) => { u32::from_be_bytes(*$w) }; } /// Wrapper type for a tag that implements `Display` /// /// Example: /// /// ``` /// use allsorts::tag::{self, DisplayTag}; /// /// // ASCII tag comes out as a string /// assert_eq!(&DisplayTag(tag::NAME).to_string(), "name"); /// // Non-ASCII tag comes out as hex /// assert_eq!(&DisplayTag(0x12345678).to_string(), "0x12345678"); /// /// println!("DisplayTag is handy for printing a tag: '{}'", DisplayTag(tag::CFF)); /// ``` #[derive(PartialEq, Eq, Clone, Copy)] pub struct DisplayTag(pub u32); pub fn from_string(s: &str) -> Result<u32, ParseError> { if s.len() > 4 { return Err(ParseError::BadValue); } let mut tag: u32 = 0; let mut count = 0; for c in s.chars() { if!c.is_ascii() || c.is_ascii_control() { return Err(ParseError::BadValue); } tag = (tag << 8) | (c as u32); count += 1; } while count < 4 { tag = (tag << 8) | (''as u32); count += 1; } Ok(tag) } impl fmt::Display for DisplayTag { fn
(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let tag = self.0; let bytes = tag.to_be_bytes(); if bytes.iter().all(|c| c.is_ascii() &&!c.is_ascii_control()) { let s = str::from_utf8(&bytes).unwrap(); // unwrap safe due to above check s.fmt(f) } else { write!(f, "0x{:08x}", tag) } } } impl fmt::Debug for DisplayTag { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(self, f) } } /// `abvf` pub const ABVF: u32 = tag!(b"abvf"); /// `abvm` pub const ABVM: u32 = tag!(b"abvm"); /// `abvs` pub const ABVS: u32 = tag!(b"abvs"); /// `acnt` pub const ACNT: u32 = tag!(b"acnt"); /// `afrc` pub const AFRC: u32 = tag!(b"afrc"); /// `akhn` pub const AKHN: u32 = tag!(b"akhn"); /// `arab` pub const ARAB: u32 = tag!(b"arab"); /// `avar` pub const AVAR: u32 = tag!(b"avar"); /// `BASE` pub const BASE: u32 = tag!(b"BASE"); /// `bdat` pub const BDAT: u32 = tag!(b"bdat"); /// `beng` pub const BENG: u32 = tag!(b"beng"); /// `bloc` pub const BLOC: u32 = tag!(b"bloc"); /// `blwf` pub const BLWF: u32 = tag!(b"blwf"); /// `blwm` pub const BLWM: u32 = tag!(b"blwm"); /// `blws` pub const BLWS: u32 = tag!(b"blws"); /// `bng2` pub const BNG2: u32 = tag!(b"bng2"); /// `bsln` pub const BSLN: u32 = tag!(b"bsln"); /// `c2sc` pub const C2SC: u32 = tag!(b"c2sc"); /// `calt` pub const CALT: u32 = tag!(b"calt"); /// `CBDT` pub const CBDT: u32 = tag!(b"CBDT"); /// `CBLC` pub const CBLC: u32 = tag!(b"CBLC"); /// `ccmp` pub const CCMP: u32 = tag!(b"ccmp"); /// `cfar` pub const CFAR: u32 = tag!(b"cfar"); /// `CFF ` pub const CFF: u32 = tag!(b"CFF "); /// `cjct` pub const CJCT: u32 = tag!(b"cjct"); /// `clig` pub const CLIG: u32 = tag!(b"clig"); /// `cmap` pub const CMAP: u32 = tag!(b"cmap"); /// `COLR` pub const COLR: u32 = tag!(b"COLR"); /// `CPAL` pub const CPAL: u32 = tag!(b"CPAL"); /// `curs` pub const CURS: u32 = tag!(b"curs"); /// `cvar` pub const CVAR: u32 = tag!(b"cvar"); /// `cvt ` pub const CVT: u32 = tag!(b"cvt "); /// `cyrl` pub const CYRL: u32 = tag!(b"cyrl"); /// `dev2` pub const DEV2: u32 = tag!(b"dev2"); /// `deva` pub const DEVA: u32 = tag!(b"deva"); /// `DFLT` pub const DFLT: u32 = tag!(b"DFLT"); /// `dist` pub const DIST: u32 = tag!(b"dist"); /// `dlig` pub const DLIG: u32 = tag!(b"dlig"); /// `dupe` pub const DUPE: u32 = tag!(b"dupe"); /// `EBDT` pub const EBDT: u32 = tag!(b"EBDT"); /// `EBLC` pub const EBLC: u32 = tag!(b"EBLC"); /// `EBSC` pub const EBSC: u32 = tag!(b"EBSC"); /// `FAR ` pub const FAR: u32 = tag!(b"FAR "); /// `fdsc` pub const FDSC: u32 = tag!(b"fdsc"); /// `Feat` pub const FEAT2: u32 = tag!(b"Feat"); /// `feat` pub const FEAT: u32 = tag!(b"feat"); /// `fin2` pub const FIN2: u32 = tag!(b"fin2"); /// `fin3` pub const FIN3: u32 = tag!(b"fin3"); /// `fina` pub const FINA: u32 = tag!(b"fina"); /// `fmtx` pub const FMTX: u32 = tag!(b"fmtx"); /// `fpgm` pub const FPGM: u32 = tag!(b"fpgm"); /// `frac` pub const FRAC: u32 = tag!(b"frac"); /// `fvar` pub const FVAR: u32 = tag!(b"fvar"); /// `gasp` pub const GASP: u32 = tag!(b"gasp"); /// `GDEF` pub const GDEF: u32 = tag!(b"GDEF"); /// `gjr2` pub const GJR2: u32 = tag!(b"gjr2"); /// `Glat` pub const GLAT: u32 = tag!(b"Glat"); /// `Gloc` pub const GLOC: u32 = tag!(b"Gloc"); /// `glyf` pub const GLYF: u32 = tag!(b"glyf"); /// `GPOS` pub const GPOS: u32 = tag!(b"GPOS"); /// `grek` pub const GREK: u32 = tag!(b"grek"); /// `GSUB` pub const GSUB: u32 = tag!(b"GSUB"); /// `gujr` pub const GUJR: u32 = tag!(b"gujr"); /// `gur2` pub const GUR2: u32 = tag!(b"gur2"); /// `guru` pub const GURU: u32 = tag!(b"guru"); /// `gvar` pub const GVAR: u32 = tag!(b"gvar"); /// `half` pub const HALF: u32 = tag!(b"half"); /// `haln` pub const HALN: u32 = tag!(b"haln"); /// `hdmx` pub const HDMX: u32 = tag!(b"hdmx"); /// `head` pub const HEAD: u32 = tag!(b"head"); /// `hhea` pub const HHEA: u32 = tag!(b"hhea"); /// `hlig` pub const HLIG: u32 = tag!(b"hlig"); /// `hmtx` pub const HMTX: u32 = tag!(b"hmtx"); /// `hsty` pub const HSTY: u32 = tag!(b"hsty"); /// `init` pub const INIT: u32 = tag!(b"init"); /// `isol` pub const ISOL: u32 = tag!(b"isol"); /// `jpg ` pub const JPG: u32 = tag!(b"jpg "); /// `JSTF` pub const JSTF: u32 = tag!(b"JSTF"); /// `just` pub const JUST: u32 = tag!(b"just"); /// `kern` pub const KERN: u32 = tag!(b"kern"); /// `khmr` pub const KHMR: u32 = tag!(b"khmr"); /// `knd2` pub const KND2: u32 = tag!(b"knd2"); /// `knda` pub const KNDA: u32 = tag!(b"knda"); /// `lao ` pub const LAO: u32 = tag!(b"lao "); /// `latn` pub const LATN: u32 = tag!(b"latn"); /// `lcar` pub const LCAR: u32 = tag!(b"lcar"); /// `liga` pub const LIGA: u32 = tag!(b"liga"); /// `lnum` pub const LNUM: u32 = tag!(b"lnum"); /// `loca` pub const LOCA: u32 = tag!(b"loca"); /// `locl` pub const LOCL: u32 = tag!(b"locl"); /// `LTSH` pub const LTSH: u32 = tag!(b"LTSH"); /// `mark` pub const MARK: u32 = tag!(b"mark"); /// `MATH` pub const MATH: u32 = tag!(b"MATH"); /// `maxp` pub const MAXP: u32 = tag!(b"maxp"); /// `med2` pub const MED2: u32 = tag!(b"med2"); /// `medi` pub const MEDI: u32 = tag!(b"medi"); /// `mkmk` pub const MKMK: u32 = tag!(b"mkmk"); /// `mlm2` pub const MLM2: u32 = tag!(b"mlm2"); /// `mlym` pub const MLYM: u32 = tag!(b"mlym"); /// `mort` pub const MORT: u32 = tag!(b"mort"); /// `morx` pub const MORX: u32 = tag!(b"morx"); /// `mset` pub const MSET: u32 = tag!(b"mset"); /// `name` pub const NAME: u32 = tag!(b"name"); /// `nukt` pub const NUKT: u32 = tag!(b"nukt"); /// `onum` pub const ONUM: u32 = tag!(b"onum"); /// `opbd` pub const OPBD: u32 = tag!(b"opbd"); /// `ordn` pub const ORDN: u32 = tag!(b"ordn"); /// `ory2` pub const ORY2: u32 = tag!(b"ory2"); /// `orya` pub const ORYA: u32 = tag!(b"orya"); /// `OS/2` pub const OS_2: u32 = tag!(b"OS/2"); /// `OTTO` pub const OTTO: u32 = tag!(b"OTTO"); /// `PCLT` pub const PCLT: u32 = tag!(b"PCLT"); /// `pnum` pub const PNUM: u32 = tag!(b"pnum"); /// `png ` pub const PNG: u32 = tag!(b"png "); /// `post` pub const POST: u32 = tag!(b"post"); /// `pref` pub const PREF: u32 = tag!(b"pref"); /// `prep` pub const PREP: u32 = tag!(b"prep"); /// `pres` pub const PRES: u32 = tag!(b"pres"); /// `prop` pub const PROP: u32 = tag!(b"prop"); /// `pstf` pub const PSTF: u32 = tag!(b"pstf"); /// `psts` pub const PSTS: u32 = tag!(b"psts"); /// `rclt` pub const RCLT: u32 = tag!(b"rclt"); /// `rkrf` pub const RKRF: u32 = tag!(b"rkrf"); /// `rlig` pub const RLIG: u32 = tag!(b"rlig"); /// `rphf` pub const RPHF: u32 = tag!(b"rphf"); /// `sbix` pub const SBIX: u32 = tag!(b"sbix"); /// `Silf` pub const SILF: u32 = tag!(b"Silf"); /// `Sill` pub const SILL: u32 = tag!(b"Sill"); /// `sinh` pub const SINH: u32 = tag!(b"sinh"); /// `smcp` pub const SMCP: u32 = tag!(b"smcp"); /// `SND ` pub const SND: u32 = tag!(b"SND "); /// `SVG ` pub const SVG: u32 = tag!(b"SVG "); /// `syrc` pub const SYRC: u32 = tag!(b"syrc"); /// `taml` pub const TAML: u32 = tag!(b"taml"); /// `tel2` pub const TEL2: u32 = tag!(b"tel2"); /// `telu` pub const TELU: u32 = tag!(b"telu"); /// `thai` pub const THAI: u32 = tag!(b"thai"); /// `tiff` pub const TIFF: u32 = tag!(b"tiff"); /// `tml2` pub const TML2: u32 = tag!(b"tml2"); /// `tnum` pub const TNUM: u32 = tag!(b"tnum"); /// `trak` pub const TRAK: u32 = tag!(b"trak"); /// `ttcf` pub const TTCF: u32 = tag!(b"ttcf"); /// `URD ` pub const URD: u32 = tag!(b"URD "); /// `vatu` pub const VATU: u32 = tag!(b"vatu"); /// `VDMX` pub const VDMX: u32 = tag!(b"VDMX"); /// `vert` pub const VERT: u32 = tag!(b"vert"); /// `vhea` pub const VHEA: u32 = tag!(b"vhea"); /// `vmtx` pub const VMTX: u32 = tag!(b"vmtx"); /// `VORG` pub const VORG: u32 = tag!(b"VORG"); /// `vrt2` pub const VRT2: u32 = tag!(b"vrt2"); /// `Zapf` pub const ZAPF: u32 = tag!(b"Zapf"); /// `zero` pub const ZERO: u32 = tag!(b"zero"); #[cfg(test)] mod tests { use super::*; mod from_string { use super::*; #[test] fn test_four_chars() { let tag = from_string("beng").expect("invalid tag"); assert_eq!(tag, 1650814567); } #[test] fn test_three_chars() { let tag = from_string("BEN").expect("invalid tag"); assert_eq!(tag, 1111838240); } } mod display_tag { use crate::tag::{DisplayTag, NAME}; #[test] fn test_simple_ascii() { assert_eq!(DisplayTag(NAME).to_string(), "name".to_string()); } #[test] fn test_non_ascii() { assert_eq!(DisplayTag(0x12345678).to_string(), "0x12345678".to_string()); } #[test] fn test_debug() { assert_eq!(format!("{:?}", DisplayTag(NAME)), "name".to_string()); } } }
fmt
identifier_name
tag.rs
//! Utilities and constants for OpenType tags. //! //! See also the [`tag!`](../macro.tag.html) macro for creating tags from a byte string. use crate::error::ParseError; use std::{fmt, str}; /// Generate a 4-byte OpenType tag from byte string /// /// Example: /// /// ``` /// use allsorts::tag; /// assert_eq!(tag!(b"glyf"), 0x676C7966); /// ``` #[macro_export] macro_rules! tag { ($w:expr) => { u32::from_be_bytes(*$w) }; } /// Wrapper type for a tag that implements `Display` /// /// Example: /// /// ``` /// use allsorts::tag::{self, DisplayTag}; /// /// // ASCII tag comes out as a string /// assert_eq!(&DisplayTag(tag::NAME).to_string(), "name"); /// // Non-ASCII tag comes out as hex /// assert_eq!(&DisplayTag(0x12345678).to_string(), "0x12345678"); /// /// println!("DisplayTag is handy for printing a tag: '{}'", DisplayTag(tag::CFF)); /// ``` #[derive(PartialEq, Eq, Clone, Copy)] pub struct DisplayTag(pub u32); pub fn from_string(s: &str) -> Result<u32, ParseError> { if s.len() > 4 { return Err(ParseError::BadValue); } let mut tag: u32 = 0; let mut count = 0; for c in s.chars() { if!c.is_ascii() || c.is_ascii_control() { return Err(ParseError::BadValue); } tag = (tag << 8) | (c as u32); count += 1; } while count < 4 { tag = (tag << 8) | (''as u32); count += 1; } Ok(tag) } impl fmt::Display for DisplayTag { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let tag = self.0; let bytes = tag.to_be_bytes(); if bytes.iter().all(|c| c.is_ascii() &&!c.is_ascii_control()) { let s = str::from_utf8(&bytes).unwrap(); // unwrap safe due to above check s.fmt(f) } else { write!(f, "0x{:08x}", tag) } } } impl fmt::Debug for DisplayTag { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(self, f) } } /// `abvf` pub const ABVF: u32 = tag!(b"abvf"); /// `abvm` pub const ABVM: u32 = tag!(b"abvm"); /// `abvs` pub const ABVS: u32 = tag!(b"abvs"); /// `acnt` pub const ACNT: u32 = tag!(b"acnt"); /// `afrc` pub const AFRC: u32 = tag!(b"afrc"); /// `akhn` pub const AKHN: u32 = tag!(b"akhn"); /// `arab` pub const ARAB: u32 = tag!(b"arab"); /// `avar` pub const AVAR: u32 = tag!(b"avar"); /// `BASE` pub const BASE: u32 = tag!(b"BASE"); /// `bdat` pub const BDAT: u32 = tag!(b"bdat"); /// `beng` pub const BENG: u32 = tag!(b"beng"); /// `bloc` pub const BLOC: u32 = tag!(b"bloc"); /// `blwf` pub const BLWF: u32 = tag!(b"blwf"); /// `blwm` pub const BLWM: u32 = tag!(b"blwm"); /// `blws` pub const BLWS: u32 = tag!(b"blws"); /// `bng2` pub const BNG2: u32 = tag!(b"bng2"); /// `bsln` pub const BSLN: u32 = tag!(b"bsln"); /// `c2sc` pub const C2SC: u32 = tag!(b"c2sc"); /// `calt` pub const CALT: u32 = tag!(b"calt"); /// `CBDT` pub const CBDT: u32 = tag!(b"CBDT"); /// `CBLC` pub const CBLC: u32 = tag!(b"CBLC"); /// `ccmp` pub const CCMP: u32 = tag!(b"ccmp"); /// `cfar` pub const CFAR: u32 = tag!(b"cfar"); /// `CFF ` pub const CFF: u32 = tag!(b"CFF "); /// `cjct` pub const CJCT: u32 = tag!(b"cjct"); /// `clig` pub const CLIG: u32 = tag!(b"clig"); /// `cmap` pub const CMAP: u32 = tag!(b"cmap"); /// `COLR` pub const COLR: u32 = tag!(b"COLR"); /// `CPAL` pub const CPAL: u32 = tag!(b"CPAL"); /// `curs` pub const CURS: u32 = tag!(b"curs"); /// `cvar` pub const CVAR: u32 = tag!(b"cvar"); /// `cvt ` pub const CVT: u32 = tag!(b"cvt "); /// `cyrl` pub const CYRL: u32 = tag!(b"cyrl"); /// `dev2` pub const DEV2: u32 = tag!(b"dev2"); /// `deva` pub const DEVA: u32 = tag!(b"deva"); /// `DFLT` pub const DFLT: u32 = tag!(b"DFLT"); /// `dist` pub const DIST: u32 = tag!(b"dist"); /// `dlig` pub const DLIG: u32 = tag!(b"dlig"); /// `dupe` pub const DUPE: u32 = tag!(b"dupe"); /// `EBDT` pub const EBDT: u32 = tag!(b"EBDT"); /// `EBLC` pub const EBLC: u32 = tag!(b"EBLC"); /// `EBSC` pub const EBSC: u32 = tag!(b"EBSC"); /// `FAR ` pub const FAR: u32 = tag!(b"FAR "); /// `fdsc` pub const FDSC: u32 = tag!(b"fdsc"); /// `Feat` pub const FEAT2: u32 = tag!(b"Feat"); /// `feat` pub const FEAT: u32 = tag!(b"feat"); /// `fin2` pub const FIN2: u32 = tag!(b"fin2"); /// `fin3` pub const FIN3: u32 = tag!(b"fin3"); /// `fina` pub const FINA: u32 = tag!(b"fina"); /// `fmtx` pub const FMTX: u32 = tag!(b"fmtx"); /// `fpgm` pub const FPGM: u32 = tag!(b"fpgm"); /// `frac` pub const FRAC: u32 = tag!(b"frac"); /// `fvar` pub const FVAR: u32 = tag!(b"fvar"); /// `gasp` pub const GASP: u32 = tag!(b"gasp"); /// `GDEF` pub const GDEF: u32 = tag!(b"GDEF"); /// `gjr2` pub const GJR2: u32 = tag!(b"gjr2"); /// `Glat` pub const GLAT: u32 = tag!(b"Glat"); /// `Gloc` pub const GLOC: u32 = tag!(b"Gloc"); /// `glyf` pub const GLYF: u32 = tag!(b"glyf"); /// `GPOS` pub const GPOS: u32 = tag!(b"GPOS"); /// `grek` pub const GREK: u32 = tag!(b"grek"); /// `GSUB` pub const GSUB: u32 = tag!(b"GSUB"); /// `gujr` pub const GUJR: u32 = tag!(b"gujr"); /// `gur2` pub const GUR2: u32 = tag!(b"gur2"); /// `guru` pub const GURU: u32 = tag!(b"guru"); /// `gvar` pub const GVAR: u32 = tag!(b"gvar"); /// `half` pub const HALF: u32 = tag!(b"half"); /// `haln` pub const HALN: u32 = tag!(b"haln"); /// `hdmx` pub const HDMX: u32 = tag!(b"hdmx"); /// `head` pub const HEAD: u32 = tag!(b"head"); /// `hhea` pub const HHEA: u32 = tag!(b"hhea"); /// `hlig` pub const HLIG: u32 = tag!(b"hlig"); /// `hmtx` pub const HMTX: u32 = tag!(b"hmtx"); /// `hsty` pub const HSTY: u32 = tag!(b"hsty"); /// `init` pub const INIT: u32 = tag!(b"init"); /// `isol` pub const ISOL: u32 = tag!(b"isol"); /// `jpg ` pub const JPG: u32 = tag!(b"jpg "); /// `JSTF` pub const JSTF: u32 = tag!(b"JSTF"); /// `just` pub const JUST: u32 = tag!(b"just"); /// `kern` pub const KERN: u32 = tag!(b"kern"); /// `khmr` pub const KHMR: u32 = tag!(b"khmr"); /// `knd2` pub const KND2: u32 = tag!(b"knd2"); /// `knda` pub const KNDA: u32 = tag!(b"knda"); /// `lao ` pub const LAO: u32 = tag!(b"lao "); /// `latn` pub const LATN: u32 = tag!(b"latn"); /// `lcar` pub const LCAR: u32 = tag!(b"lcar"); /// `liga` pub const LIGA: u32 = tag!(b"liga"); /// `lnum` pub const LNUM: u32 = tag!(b"lnum"); /// `loca` pub const LOCA: u32 = tag!(b"loca"); /// `locl` pub const LOCL: u32 = tag!(b"locl"); /// `LTSH` pub const LTSH: u32 = tag!(b"LTSH"); /// `mark` pub const MARK: u32 = tag!(b"mark"); /// `MATH` pub const MATH: u32 = tag!(b"MATH"); /// `maxp` pub const MAXP: u32 = tag!(b"maxp"); /// `med2` pub const MED2: u32 = tag!(b"med2"); /// `medi`
pub const MKMK: u32 = tag!(b"mkmk"); /// `mlm2` pub const MLM2: u32 = tag!(b"mlm2"); /// `mlym` pub const MLYM: u32 = tag!(b"mlym"); /// `mort` pub const MORT: u32 = tag!(b"mort"); /// `morx` pub const MORX: u32 = tag!(b"morx"); /// `mset` pub const MSET: u32 = tag!(b"mset"); /// `name` pub const NAME: u32 = tag!(b"name"); /// `nukt` pub const NUKT: u32 = tag!(b"nukt"); /// `onum` pub const ONUM: u32 = tag!(b"onum"); /// `opbd` pub const OPBD: u32 = tag!(b"opbd"); /// `ordn` pub const ORDN: u32 = tag!(b"ordn"); /// `ory2` pub const ORY2: u32 = tag!(b"ory2"); /// `orya` pub const ORYA: u32 = tag!(b"orya"); /// `OS/2` pub const OS_2: u32 = tag!(b"OS/2"); /// `OTTO` pub const OTTO: u32 = tag!(b"OTTO"); /// `PCLT` pub const PCLT: u32 = tag!(b"PCLT"); /// `pnum` pub const PNUM: u32 = tag!(b"pnum"); /// `png ` pub const PNG: u32 = tag!(b"png "); /// `post` pub const POST: u32 = tag!(b"post"); /// `pref` pub const PREF: u32 = tag!(b"pref"); /// `prep` pub const PREP: u32 = tag!(b"prep"); /// `pres` pub const PRES: u32 = tag!(b"pres"); /// `prop` pub const PROP: u32 = tag!(b"prop"); /// `pstf` pub const PSTF: u32 = tag!(b"pstf"); /// `psts` pub const PSTS: u32 = tag!(b"psts"); /// `rclt` pub const RCLT: u32 = tag!(b"rclt"); /// `rkrf` pub const RKRF: u32 = tag!(b"rkrf"); /// `rlig` pub const RLIG: u32 = tag!(b"rlig"); /// `rphf` pub const RPHF: u32 = tag!(b"rphf"); /// `sbix` pub const SBIX: u32 = tag!(b"sbix"); /// `Silf` pub const SILF: u32 = tag!(b"Silf"); /// `Sill` pub const SILL: u32 = tag!(b"Sill"); /// `sinh` pub const SINH: u32 = tag!(b"sinh"); /// `smcp` pub const SMCP: u32 = tag!(b"smcp"); /// `SND ` pub const SND: u32 = tag!(b"SND "); /// `SVG ` pub const SVG: u32 = tag!(b"SVG "); /// `syrc` pub const SYRC: u32 = tag!(b"syrc"); /// `taml` pub const TAML: u32 = tag!(b"taml"); /// `tel2` pub const TEL2: u32 = tag!(b"tel2"); /// `telu` pub const TELU: u32 = tag!(b"telu"); /// `thai` pub const THAI: u32 = tag!(b"thai"); /// `tiff` pub const TIFF: u32 = tag!(b"tiff"); /// `tml2` pub const TML2: u32 = tag!(b"tml2"); /// `tnum` pub const TNUM: u32 = tag!(b"tnum"); /// `trak` pub const TRAK: u32 = tag!(b"trak"); /// `ttcf` pub const TTCF: u32 = tag!(b"ttcf"); /// `URD ` pub const URD: u32 = tag!(b"URD "); /// `vatu` pub const VATU: u32 = tag!(b"vatu"); /// `VDMX` pub const VDMX: u32 = tag!(b"VDMX"); /// `vert` pub const VERT: u32 = tag!(b"vert"); /// `vhea` pub const VHEA: u32 = tag!(b"vhea"); /// `vmtx` pub const VMTX: u32 = tag!(b"vmtx"); /// `VORG` pub const VORG: u32 = tag!(b"VORG"); /// `vrt2` pub const VRT2: u32 = tag!(b"vrt2"); /// `Zapf` pub const ZAPF: u32 = tag!(b"Zapf"); /// `zero` pub const ZERO: u32 = tag!(b"zero"); #[cfg(test)] mod tests { use super::*; mod from_string { use super::*; #[test] fn test_four_chars() { let tag = from_string("beng").expect("invalid tag"); assert_eq!(tag, 1650814567); } #[test] fn test_three_chars() { let tag = from_string("BEN").expect("invalid tag"); assert_eq!(tag, 1111838240); } } mod display_tag { use crate::tag::{DisplayTag, NAME}; #[test] fn test_simple_ascii() { assert_eq!(DisplayTag(NAME).to_string(), "name".to_string()); } #[test] fn test_non_ascii() { assert_eq!(DisplayTag(0x12345678).to_string(), "0x12345678".to_string()); } #[test] fn test_debug() { assert_eq!(format!("{:?}", DisplayTag(NAME)), "name".to_string()); } } }
pub const MEDI: u32 = tag!(b"medi"); /// `mkmk`
random_line_split
tag.rs
//! Utilities and constants for OpenType tags. //! //! See also the [`tag!`](../macro.tag.html) macro for creating tags from a byte string. use crate::error::ParseError; use std::{fmt, str}; /// Generate a 4-byte OpenType tag from byte string /// /// Example: /// /// ``` /// use allsorts::tag; /// assert_eq!(tag!(b"glyf"), 0x676C7966); /// ``` #[macro_export] macro_rules! tag { ($w:expr) => { u32::from_be_bytes(*$w) }; } /// Wrapper type for a tag that implements `Display` /// /// Example: /// /// ``` /// use allsorts::tag::{self, DisplayTag}; /// /// // ASCII tag comes out as a string /// assert_eq!(&DisplayTag(tag::NAME).to_string(), "name"); /// // Non-ASCII tag comes out as hex /// assert_eq!(&DisplayTag(0x12345678).to_string(), "0x12345678"); /// /// println!("DisplayTag is handy for printing a tag: '{}'", DisplayTag(tag::CFF)); /// ``` #[derive(PartialEq, Eq, Clone, Copy)] pub struct DisplayTag(pub u32); pub fn from_string(s: &str) -> Result<u32, ParseError> { if s.len() > 4 { return Err(ParseError::BadValue); } let mut tag: u32 = 0; let mut count = 0; for c in s.chars() { if!c.is_ascii() || c.is_ascii_control()
tag = (tag << 8) | (c as u32); count += 1; } while count < 4 { tag = (tag << 8) | (''as u32); count += 1; } Ok(tag) } impl fmt::Display for DisplayTag { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let tag = self.0; let bytes = tag.to_be_bytes(); if bytes.iter().all(|c| c.is_ascii() &&!c.is_ascii_control()) { let s = str::from_utf8(&bytes).unwrap(); // unwrap safe due to above check s.fmt(f) } else { write!(f, "0x{:08x}", tag) } } } impl fmt::Debug for DisplayTag { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(self, f) } } /// `abvf` pub const ABVF: u32 = tag!(b"abvf"); /// `abvm` pub const ABVM: u32 = tag!(b"abvm"); /// `abvs` pub const ABVS: u32 = tag!(b"abvs"); /// `acnt` pub const ACNT: u32 = tag!(b"acnt"); /// `afrc` pub const AFRC: u32 = tag!(b"afrc"); /// `akhn` pub const AKHN: u32 = tag!(b"akhn"); /// `arab` pub const ARAB: u32 = tag!(b"arab"); /// `avar` pub const AVAR: u32 = tag!(b"avar"); /// `BASE` pub const BASE: u32 = tag!(b"BASE"); /// `bdat` pub const BDAT: u32 = tag!(b"bdat"); /// `beng` pub const BENG: u32 = tag!(b"beng"); /// `bloc` pub const BLOC: u32 = tag!(b"bloc"); /// `blwf` pub const BLWF: u32 = tag!(b"blwf"); /// `blwm` pub const BLWM: u32 = tag!(b"blwm"); /// `blws` pub const BLWS: u32 = tag!(b"blws"); /// `bng2` pub const BNG2: u32 = tag!(b"bng2"); /// `bsln` pub const BSLN: u32 = tag!(b"bsln"); /// `c2sc` pub const C2SC: u32 = tag!(b"c2sc"); /// `calt` pub const CALT: u32 = tag!(b"calt"); /// `CBDT` pub const CBDT: u32 = tag!(b"CBDT"); /// `CBLC` pub const CBLC: u32 = tag!(b"CBLC"); /// `ccmp` pub const CCMP: u32 = tag!(b"ccmp"); /// `cfar` pub const CFAR: u32 = tag!(b"cfar"); /// `CFF ` pub const CFF: u32 = tag!(b"CFF "); /// `cjct` pub const CJCT: u32 = tag!(b"cjct"); /// `clig` pub const CLIG: u32 = tag!(b"clig"); /// `cmap` pub const CMAP: u32 = tag!(b"cmap"); /// `COLR` pub const COLR: u32 = tag!(b"COLR"); /// `CPAL` pub const CPAL: u32 = tag!(b"CPAL"); /// `curs` pub const CURS: u32 = tag!(b"curs"); /// `cvar` pub const CVAR: u32 = tag!(b"cvar"); /// `cvt ` pub const CVT: u32 = tag!(b"cvt "); /// `cyrl` pub const CYRL: u32 = tag!(b"cyrl"); /// `dev2` pub const DEV2: u32 = tag!(b"dev2"); /// `deva` pub const DEVA: u32 = tag!(b"deva"); /// `DFLT` pub const DFLT: u32 = tag!(b"DFLT"); /// `dist` pub const DIST: u32 = tag!(b"dist"); /// `dlig` pub const DLIG: u32 = tag!(b"dlig"); /// `dupe` pub const DUPE: u32 = tag!(b"dupe"); /// `EBDT` pub const EBDT: u32 = tag!(b"EBDT"); /// `EBLC` pub const EBLC: u32 = tag!(b"EBLC"); /// `EBSC` pub const EBSC: u32 = tag!(b"EBSC"); /// `FAR ` pub const FAR: u32 = tag!(b"FAR "); /// `fdsc` pub const FDSC: u32 = tag!(b"fdsc"); /// `Feat` pub const FEAT2: u32 = tag!(b"Feat"); /// `feat` pub const FEAT: u32 = tag!(b"feat"); /// `fin2` pub const FIN2: u32 = tag!(b"fin2"); /// `fin3` pub const FIN3: u32 = tag!(b"fin3"); /// `fina` pub const FINA: u32 = tag!(b"fina"); /// `fmtx` pub const FMTX: u32 = tag!(b"fmtx"); /// `fpgm` pub const FPGM: u32 = tag!(b"fpgm"); /// `frac` pub const FRAC: u32 = tag!(b"frac"); /// `fvar` pub const FVAR: u32 = tag!(b"fvar"); /// `gasp` pub const GASP: u32 = tag!(b"gasp"); /// `GDEF` pub const GDEF: u32 = tag!(b"GDEF"); /// `gjr2` pub const GJR2: u32 = tag!(b"gjr2"); /// `Glat` pub const GLAT: u32 = tag!(b"Glat"); /// `Gloc` pub const GLOC: u32 = tag!(b"Gloc"); /// `glyf` pub const GLYF: u32 = tag!(b"glyf"); /// `GPOS` pub const GPOS: u32 = tag!(b"GPOS"); /// `grek` pub const GREK: u32 = tag!(b"grek"); /// `GSUB` pub const GSUB: u32 = tag!(b"GSUB"); /// `gujr` pub const GUJR: u32 = tag!(b"gujr"); /// `gur2` pub const GUR2: u32 = tag!(b"gur2"); /// `guru` pub const GURU: u32 = tag!(b"guru"); /// `gvar` pub const GVAR: u32 = tag!(b"gvar"); /// `half` pub const HALF: u32 = tag!(b"half"); /// `haln` pub const HALN: u32 = tag!(b"haln"); /// `hdmx` pub const HDMX: u32 = tag!(b"hdmx"); /// `head` pub const HEAD: u32 = tag!(b"head"); /// `hhea` pub const HHEA: u32 = tag!(b"hhea"); /// `hlig` pub const HLIG: u32 = tag!(b"hlig"); /// `hmtx` pub const HMTX: u32 = tag!(b"hmtx"); /// `hsty` pub const HSTY: u32 = tag!(b"hsty"); /// `init` pub const INIT: u32 = tag!(b"init"); /// `isol` pub const ISOL: u32 = tag!(b"isol"); /// `jpg ` pub const JPG: u32 = tag!(b"jpg "); /// `JSTF` pub const JSTF: u32 = tag!(b"JSTF"); /// `just` pub const JUST: u32 = tag!(b"just"); /// `kern` pub const KERN: u32 = tag!(b"kern"); /// `khmr` pub const KHMR: u32 = tag!(b"khmr"); /// `knd2` pub const KND2: u32 = tag!(b"knd2"); /// `knda` pub const KNDA: u32 = tag!(b"knda"); /// `lao ` pub const LAO: u32 = tag!(b"lao "); /// `latn` pub const LATN: u32 = tag!(b"latn"); /// `lcar` pub const LCAR: u32 = tag!(b"lcar"); /// `liga` pub const LIGA: u32 = tag!(b"liga"); /// `lnum` pub const LNUM: u32 = tag!(b"lnum"); /// `loca` pub const LOCA: u32 = tag!(b"loca"); /// `locl` pub const LOCL: u32 = tag!(b"locl"); /// `LTSH` pub const LTSH: u32 = tag!(b"LTSH"); /// `mark` pub const MARK: u32 = tag!(b"mark"); /// `MATH` pub const MATH: u32 = tag!(b"MATH"); /// `maxp` pub const MAXP: u32 = tag!(b"maxp"); /// `med2` pub const MED2: u32 = tag!(b"med2"); /// `medi` pub const MEDI: u32 = tag!(b"medi"); /// `mkmk` pub const MKMK: u32 = tag!(b"mkmk"); /// `mlm2` pub const MLM2: u32 = tag!(b"mlm2"); /// `mlym` pub const MLYM: u32 = tag!(b"mlym"); /// `mort` pub const MORT: u32 = tag!(b"mort"); /// `morx` pub const MORX: u32 = tag!(b"morx"); /// `mset` pub const MSET: u32 = tag!(b"mset"); /// `name` pub const NAME: u32 = tag!(b"name"); /// `nukt` pub const NUKT: u32 = tag!(b"nukt"); /// `onum` pub const ONUM: u32 = tag!(b"onum"); /// `opbd` pub const OPBD: u32 = tag!(b"opbd"); /// `ordn` pub const ORDN: u32 = tag!(b"ordn"); /// `ory2` pub const ORY2: u32 = tag!(b"ory2"); /// `orya` pub const ORYA: u32 = tag!(b"orya"); /// `OS/2` pub const OS_2: u32 = tag!(b"OS/2"); /// `OTTO` pub const OTTO: u32 = tag!(b"OTTO"); /// `PCLT` pub const PCLT: u32 = tag!(b"PCLT"); /// `pnum` pub const PNUM: u32 = tag!(b"pnum"); /// `png ` pub const PNG: u32 = tag!(b"png "); /// `post` pub const POST: u32 = tag!(b"post"); /// `pref` pub const PREF: u32 = tag!(b"pref"); /// `prep` pub const PREP: u32 = tag!(b"prep"); /// `pres` pub const PRES: u32 = tag!(b"pres"); /// `prop` pub const PROP: u32 = tag!(b"prop"); /// `pstf` pub const PSTF: u32 = tag!(b"pstf"); /// `psts` pub const PSTS: u32 = tag!(b"psts"); /// `rclt` pub const RCLT: u32 = tag!(b"rclt"); /// `rkrf` pub const RKRF: u32 = tag!(b"rkrf"); /// `rlig` pub const RLIG: u32 = tag!(b"rlig"); /// `rphf` pub const RPHF: u32 = tag!(b"rphf"); /// `sbix` pub const SBIX: u32 = tag!(b"sbix"); /// `Silf` pub const SILF: u32 = tag!(b"Silf"); /// `Sill` pub const SILL: u32 = tag!(b"Sill"); /// `sinh` pub const SINH: u32 = tag!(b"sinh"); /// `smcp` pub const SMCP: u32 = tag!(b"smcp"); /// `SND ` pub const SND: u32 = tag!(b"SND "); /// `SVG ` pub const SVG: u32 = tag!(b"SVG "); /// `syrc` pub const SYRC: u32 = tag!(b"syrc"); /// `taml` pub const TAML: u32 = tag!(b"taml"); /// `tel2` pub const TEL2: u32 = tag!(b"tel2"); /// `telu` pub const TELU: u32 = tag!(b"telu"); /// `thai` pub const THAI: u32 = tag!(b"thai"); /// `tiff` pub const TIFF: u32 = tag!(b"tiff"); /// `tml2` pub const TML2: u32 = tag!(b"tml2"); /// `tnum` pub const TNUM: u32 = tag!(b"tnum"); /// `trak` pub const TRAK: u32 = tag!(b"trak"); /// `ttcf` pub const TTCF: u32 = tag!(b"ttcf"); /// `URD ` pub const URD: u32 = tag!(b"URD "); /// `vatu` pub const VATU: u32 = tag!(b"vatu"); /// `VDMX` pub const VDMX: u32 = tag!(b"VDMX"); /// `vert` pub const VERT: u32 = tag!(b"vert"); /// `vhea` pub const VHEA: u32 = tag!(b"vhea"); /// `vmtx` pub const VMTX: u32 = tag!(b"vmtx"); /// `VORG` pub const VORG: u32 = tag!(b"VORG"); /// `vrt2` pub const VRT2: u32 = tag!(b"vrt2"); /// `Zapf` pub const ZAPF: u32 = tag!(b"Zapf"); /// `zero` pub const ZERO: u32 = tag!(b"zero"); #[cfg(test)] mod tests { use super::*; mod from_string { use super::*; #[test] fn test_four_chars() { let tag = from_string("beng").expect("invalid tag"); assert_eq!(tag, 1650814567); } #[test] fn test_three_chars() { let tag = from_string("BEN").expect("invalid tag"); assert_eq!(tag, 1111838240); } } mod display_tag { use crate::tag::{DisplayTag, NAME}; #[test] fn test_simple_ascii() { assert_eq!(DisplayTag(NAME).to_string(), "name".to_string()); } #[test] fn test_non_ascii() { assert_eq!(DisplayTag(0x12345678).to_string(), "0x12345678".to_string()); } #[test] fn test_debug() { assert_eq!(format!("{:?}", DisplayTag(NAME)), "name".to_string()); } } }
{ return Err(ParseError::BadValue); }
conditional_block
lib.rs
/*! * This library provides an API client for Diffbot. * * Making API requests * ------------------- * * There are a handful of different ways to make API calls: * * 1. The most basic way to make a request is with the ``call()`` function. * Everything must be specified for each request. * * 2. Use the ``Diffbot`` struct to keep track of your token and API version * and then use its ``.call()`` method to make API calls. This has the * advantage that you can specify those things just once and they'll be * retained. * * 3. Instead of making a request in one step, you can make it two steps with * the ``prepare_request()`` function. This allows you to specify to Diffbot * certain details of how *it* should make the request. That gives you a * ``Request`` object. * * 4. In the same manner, if you have a ``Diffbot`` struct you can call the * ``.prepare_request()`` method on it. * * Prepared requests * ----------------- * * If you use the ``prepare_request()`` function or method, you can tweak the * request that will be sent to Diffbot. You can alter the User-Agent, Referer * or Cookie headers that it will send and then call ``.call()`` to make the * request, or you can call ``.post_body()`` to send the HTML yourself, if it * is not publicly available to the wider Internet. * * Getting data out of the result * ------------------------------ * * At present, the successful return value of a request is simply a JSON object, * a tree map. This *will* make it moderately difficult to work with, but if * you're determined, it's possible. You'll end up with results like these: * * // First of all, you must, of course, have a response to work on. * let mut response: TreeMap<~str, Json> * = diffbot::call(..., "article",...).unwrap(); * * // Get the title of the article * let title = match response.pop(&~"title").unwrap() { * json::String(s) => s, * _ => unreachable!(), * }; * * // Get the URL of each image * let image_urls: ~[Url] = match response.pop(&~"images").unwrap() { * json::List(images) => images.move_iter().map(|image| match image { * json::Object(~mut o) => { * match o.pop(&~"url").unwrap() { * json::String(ref s) => from_str(s), * _ => unreachable!(), * } * }, * _ => unreachable!(), * }), * _ => unreachable!(), * }.collect(); * * (Yep, I'll freely admit that these are clumsier than they might be in another * language, which might allow something like this: * * let response =...; * * let title = response.title; * let image_urls = [from_str(image.url) for image in response.images]; * * In time we may get strongly typed interfaces which would be much nicer, but * for now, you'd need to do that yourself. It can be done with the tools in * ``extra::serialize``, by the way.) */ #[crate_id = "diffbot#1.0"]; #[crate_type = "dylib"]; #[crate_type = "rlib"]; #[doc(html_logo_url = "diffy-d.png", html_favicon_url = "http://www.diffbot.com/favicon.ico")]; extern mod extra = "extra#0.10-pre"; extern mod http = "http#0.1-pre"; use std::io::net::tcp::TcpStream; use extra::json; use extra::url::Url; use http::client::RequestWriter; use http::method::{Get, Post}; use http::headers::content_type::MediaType; /// A convenience type which simply keeps track of a developer token and version /// number. /// /// There is no necessity to use this type; you can call ``call()`` directly /// should you so desire. #[deriving(Eq, Clone)] pub struct Diffbot { /// The developer's token token: ~str, /// The API version number version: uint, } // Basic methods impl Diffbot { /// Construct a new ``Diffbot`` instance from the passed parameters. pub fn new(token: ~str, version: uint) -> Diffbot { Diffbot { token: token, version: version, } } /// Make a call to any Diffbot API with the stored token and API version. /// /// See the ``call()`` function for an explanation of the parameters. pub fn
(&self, url: &Url, api: &str, fields: &[&str]) -> Result<json::Object, Error> { call(url, self.token, api, fields, self.version) } /// Prepare a request to any Diffbot API with the stored token and API version. /// /// See the ``call()`` function for an explanation of the parameters. pub fn prepare_request(&self, url: &Url, api: &str, fields: &[&str]) -> Request { prepare_request(url, self.token, api, fields, self.version) } } /// An in-progress Diffbot API call. pub struct Request { priv request: RequestWriter<TcpStream>, } impl Request { /// Set the value for Diffbot to send as the ``User-Agent`` header when /// making your request. pub fn user_agent(&mut self, user_agent: ~str) { self.request.headers.extensions.insert(~"X-Forwarded-User-Agent", user_agent); } /// Set the value for Diffbot to send as the ``Referer`` header when /// making your request. pub fn referer(&mut self, referer: ~str) { self.request.headers.extensions.insert(~"X-Forwarded-Referer", referer); } /// Set the value for Diffbot to send as the ``Cookie`` header when /// making your request. pub fn cookie(&mut self, cookie: ~str) { self.request.headers.extensions.insert(~"X-Forwarded-Cookie", cookie); } /// Set Diffbot's timeout, in milliseconds. The default is five seconds. pub fn timeout(&mut self, milliseconds: u64) { self.request.url.query.push((~"timeout", milliseconds.to_str())); } /// Execute the request and get the results. pub fn call(self) -> Result<json::Object, Error> { let mut response = match self.request.read_response() { Ok(response) => response, Err(_request) => return Err(IoError), // Request failed }; let json = match json::from_reader(&mut response as &mut Reader) { Ok(json) => json, Err(error) => return Err(JsonError(error)), // It... wasn't JSON!? }; // Now let's see if this is an API error or not. // API errors are of the form {"error":"Invalid API.","errorCode":500} match json { json::Object(~mut o) => { match o.pop(&~"errorCode") { Some(json::Number(num)) => { let num = num as uint; let msg = match o.pop(&~"error") .expect("JSON had errorCode but not error") { json::String(s) => s, uh_oh => fail!("error was {} instead of a string", uh_oh.to_str()), }; Err(ApiError(msg, num)) }, Some(uh_oh) => fail!("errorCode was {} instead of a number", uh_oh.to_str()), None => Ok(o), } }, // All API responses must be objects. // If it's not, there's something screwy going on. _ => fail!("API return value wasn't a JSON object"), } } /// Execute the request as a POST request, sending it through with the given /// text/html entity body. /// /// This has the effect that Diffbot will skip requesting the URL and will /// instead take the passed body as the HTML it is to check. This is mainly /// useful for non-public websites. pub fn post_body(mut self, body: &[u8]) -> Result<json::Object, Error> { self.request.method = Post; self.request.headers.content_type = Some(MediaType(~"text", ~"html", ~[])); self.request.headers.content_length = Some(body.len()); // Calling write_headers is an extra and unnecessary safety guard which // will cause the task to fail if the request has already started to be // sent (which would render the three statements above ineffectual) self.request.write_headers(); self.request.write(body); self.call() } } /// Error code: "unauthorized token" pub static UNAUTHORIZED_TOKEN: uint = 401; /// Error code: "requested page not found" pub static REQUESTED_PAGE_NOT_FOUND: uint = 404; /// Error code: "your token has exceeded the allowed number of calls, or has /// otherwise been throttled for API abuse." pub static TOKEN_EXCEEDED_OR_THROTTLED: uint = 429; /// Error code: "error processing the page. Specific information will be /// returned in the JSON response." pub static ERROR_PROCESSING: uint = 500; /// Something went wrong with the Diffbot API call. #[deriving(Eq)] pub enum Error { /// An error code returned by the Diffbot API, with message and code. /// Refer to http://www.diffbot.com/dev/docs/error/ for an explanation of /// the error codes. /// /// When comparing the error code, you should use these constants: /// /// - ``UNAUTHORIZED_TOKEN``: "unauthorized token" /// - ``REQUESTED_PAGE_NOT_FOUND``: "requested page not found" /// - ``TOKEN_EXCEEDED_OR_THROTTLED``: "your token has exceeded the allowed /// number of calls, or has otherwise been throttled for API abuse." /// - ``ERROR_PROCESSING``: "error processing the page. Specific information /// will be returned in the JSON response." ApiError(~str, uint), /// The JSON was not valid. This is one of those ones that *should* never /// happen; you know... /// /// Actually, I can percieve that it might happen if a document returned /// included invalid UTF-8, but this case has not been tested. JsonError(json::Error), /// An I/O error occurred and the condition was trapped somewhere (by you). IoError, } impl ToStr for Error { fn to_str(&self) -> ~str { match *self { ApiError(ref msg, code) => format!("API error {}: {}", code, *msg), JsonError(ref error) => format!("JSON error: {}", error.to_str()), IoError => format!("I/O error (already handled)"), } } } /// Make a simple Diffbot API call. /// /// For more complex requests, use ``Diffbot`` or ``prepare_request()``. /// /// Arguments /// ========= /// /// - ``url`` is the URL that you wish Diffbot to operate upon. If this is a /// publicly-inaccessible URL, you should use ``post_body()`` on a prepared /// request instead of ``call()``. /// /// - ``token`` is the developer's token. /// /// - ``api`` is the name of the API endpoint, e.g. "article", "product". /// /// - ``fields`` is the set of fields you want the API call to return; it /// follows the form specified by the Diffbot API and so should have values /// like "*", "meta", "querystring", "images(*)". /// /// - ``version`` is the Diffbot API version number. pub fn call(url: &Url, token: &str, api: &str, fields: &[&str], version: uint) -> Result<json::Object, Error> { prepare_request(url, token, api, fields, version).call() } /// Prepare, but do not send, a request. /// /// This allows you to use some of the more advanced features of the API like /// setting certain headers for Diffbot to use, or uploading a private document /// for it. pub fn prepare_request(url: &Url, token: &str, api: &str, fields: &[&str], version: uint) -> Request { // First of all we must calculate the GET parameters. let mut query = ~[(~"token", token.to_owned()), (~"url", url.to_str())]; if fields.len() > 0 { query.push((~"fields", fields.connect(","))); } // Now that we've got that, we can figure out the complete URL. let url = Url::new(~"http", // scheme None, // user ~"api.diffbot.com", // host None, // port format!("/v{}/{}", version, api), // path query, // query None); // fragment // And with that, we can now make the request. Whee! Request { request: RequestWriter::new(Get, url) } }
call
identifier_name