file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
day_14.rs
//! This is my solution for [Advent of Code - Day 14](https://adventofcode.com/2020/day/14) - //! _Docking Data_ //! //! This was themed around bitwise operations. The challenge was mostly parsing the puzzle //! description into the bitwise operations needed. This was the first time I needed an Either //! implementation rather than just using an enum as I needed to be able to store the current Mask //! in a variable that is explicitly a Mask rather than an Instruction that could be either a Mask //! or a Mem. use std::fs; use regex::Regex; use im::{HashMap, HashSet}; use either::Either; use either::Either::*; /// The entry point for running the solutions with the'real' puzzle input. /// /// - The puzzle input is expected to be at `<project_root>/res/day-14-input` /// - It is expected this will be called by [`super::main()`] when the user elects to run day 14. pub fn run() { let contents = fs::read_to_string("res/day-14-input").expect("Failed to read file"); let memory = run_program_v1(contents.as_str()); let sum = sum_memory(memory); println!("The sum of memory values after running the program v1 is: {}", sum); let memory = run_program_v2(contents.as_str()); let sum = sum_memory(memory); println!("The sum of memory values after running the program v2 is: {}", sum); } /// Representing an input line that overwrites the current bitmask, see [`parse_line`]. #[derive(Debug, Eq, PartialEq)] struct Mask { mask: usize, data: usize } /// Represents an input line that updates the current memory values, see [`parse_line`]. #[derive(Debug, Eq, PartialEq)] struct Mem { address: usize, value: usize } /// Parse a line from the puzzle input into structured data /// /// A line will be of one of the two following formats: /// * `mask = 000000000000000000000000000000X1001X` /// * `mem[8] = 11` /// /// ## Masks /// For both parts of the puzzle the mask has two uses, where the character is a `0 `or `1` it /// should be treated a raw data that will in someway override other input, and `X` will be used as /// the mask. It is easier to store this as two bitmaps, one for the data and one for the mask, as /// these are used separately. /// /// ## Memory Updates /// Whilst the two parts use the mask to modify where/what actually gets written `mem[8] = 11` /// should be interpreted as address = 8, value = 11. /// /// # Examples from Tests /// ``` /// assert_eq!( /// Left(Mask { /// mask: 0b111111111111111111111111111111111111, /// data: 0b000000000000000000000000000000000000, /// }), /// parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") /// ); /// assert_eq!( /// Left(Mask { /// mask: 0b111111111111111111111111111110111101, /// data: 0b000000000000000000000000000001000000, /// }), /// parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X") /// ); /// /// assert_eq!( /// Right(Mem { address: 8, value: 11 }), /// parse_line("mem[8] = 11") /// ); /// assert_eq!( /// Right(Mem { address: 7, value: 101 }), /// parse_line("mem[7] = 101") /// ); /// assert_eq!( /// Right(Mem { address: 8, value: 0 }), /// parse_line("mem[8] = 0") /// ); /// ``` fn parse_line(line: &str) -> Either<Mask, Mem> { let mut parts = line.split(" = "); let inst = parts.next().expect("Invalid line"); let value = parts.next().expect("Invalid line"); if inst == "mask"
else { let re = Regex::new(r"^mem\[(\d+)]$").unwrap(); match re.captures(inst) { Some(cap) => Right(Mem { address: cap.get(1).unwrap().as_str().parse::<usize>().unwrap(), value: value.parse::<usize>().unwrap(), }), None => panic!("Invalid line") } } } /// Takes the string input and returns the memory state after that has been interpreted using the /// part 1 protocol /// /// > The current bitmask is applied to values immediately before they are written to memory: a 0 or /// > 1 overwrites the corresponding bit in the value, while an X leaves the bit in the value /// > unchanged. /// /// # Example from Tests /// ``` /// let mut expected: HashMap<usize, usize> = HashMap::new(); /// /// let program_1 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X\nmem[8] = 11"; /// /// expected.insert(8, 73); /// assert_eq!(expected, run_program_v1(program_1)); /// /// let program_2 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X /// mem[8] = 11 /// mem[7] = 101 /// mem[8] = 0"; /// /// expected.insert(7, 101); /// expected.insert(8, 64); /// let memory = run_program_v1(program_2); /// /// assert_eq!(expected, memory); /// /// assert_eq!(165usize, sum_memory(memory)); /// ``` fn run_program_v1(program: &str) -> HashMap<usize, usize> { let mut memory = HashMap::new(); let mut current_mask = Mask { mask: 0, data: 0 }; for line in program.lines() { match parse_line(line) { Left(Mask { mask, data }) => current_mask = Mask { mask, data }, Right(Mem { address, value }) => { memory.insert( address, value & current_mask.mask | current_mask.data, ); } } } return memory; } /// Takes the string input and returns the memory state after that has been interpreted using the /// part 2 protocol. /// /// > Immediately before a value is written to memory, each bit in the bitmask modifies the /// > corresponding bit of the destination memory address in the following way: /// > - If the bitmask bit is 0, the corresponding memory address bit is unchanged. /// > - If the bitmask bit is 1, the corresponding memory address bit is overwritten with 1. /// > - If the bitmask bit is X, the corresponding memory address bit is floating. /// > /// > A floating bit is not connected to anything and instead fluctuates unpredictably. In practice, /// > this means the floating bits will take on all possible values, potentially causing many memory /// > addresses to be written all at once! /// /// The set of addresses a mask will write to is given by [`explode_addresses`] /// /// # Example from Tests /// ``` /// let program = "mask = 000000000000000000000000000000X1001X /// mem[42] = 100 /// mask = 00000000000000000000000000000000X0XX /// mem[26] = 1"; /// /// let memory = run_program_v2(program); /// assert_eq!(208usize, sum_memory(memory)); /// ``` fn run_program_v2(program: &str) -> HashMap<usize, usize> { let mut memory = HashMap::new(); let mut current_mask = Mask { mask: 0, data: 0 }; for line in program.lines() { match parse_line(line) { Left(Mask { mask, data }) => current_mask = Mask { mask, data }, Right(Mem { address, value }) => for address in explode_addresses(&current_mask, address) { memory.insert(address, value); }, } } return memory; } /// Because floating bits can take on any value, this returns all the addresses that a given mask /// applied to the input address refers to. /// /// 1. The base address is the address where all the `X` values in the mask are `0`. Additionally /// bits where the mask data is 1 all should be 1 for all addresses in the final output i.e. /// `(input | mask.data) &!mask.mask` /// 2. Iterate through the bits, and where the mask is `X` add an additional address to each of the /// existing combinations for the address where that bit is `1` rather than `0`, so the set /// doubles in size each time we encounter an `X`. With some boiler plate as the existing set /// can't be appended to as it's being iterated. /// /// # Examples from Tests /// ``` /// let expected: HashSet<usize> = vec!(26usize, 27usize, 58usize, 59usize).into_iter().collect(); /// assert_eq!( /// expected, /// explode_addresses( /// &Mask { /// mask: 0b000000000000000000000000000000100001, /// data: 0b000000000000000000000000000000010010, /// }, /// 42, /// ) /// ); /// /// let expected: HashSet<usize> = /// vec!(16usize, 17usize, 18usize, 19usize, 24usize, 25usize, 26usize, 27usize) /// .into_iter().collect(); /// assert_eq!( /// expected, /// explode_addresses( /// &parse_line("mask = 00000000000000000000000000000000X0XX") /// .expect_left("Failed to parse as mask"), /// 26, /// ) /// ); /// ``` fn explode_addresses(mask: &Mask, input: usize) -> HashSet<usize> { let mut addresses = HashSet::new(); addresses.insert((input | mask.data) &!mask.mask); for i in 0..36 { if (1 << i) & mask.mask!= 0 { let mut new_addresses = HashSet::new(); for &address in addresses.iter() { new_addresses.insert(address | (1 << i)); } for &new_address in new_addresses.iter() { addresses.insert(new_address); }; } } addresses } /// Sum a memory snapshot /// /// Both puzzle parts finally sum all the memory registers into a single number as the expected /// answer. Extracted into a function to avoid repetition. fn sum_memory(memory: HashMap<usize, usize>) -> usize { memory.iter().map(|(_, v)| *v).sum() } #[cfg(test)] mod tests { use day_14::{parse_line, Mask, Mem, run_program_v1, sum_memory, explode_addresses, run_program_v2}; use either::Either::*; use im::{HashMap, HashSet}; //noinspection SpellCheckingInspection #[test] fn can_parse() { assert_eq!( Left(Mask { mask: 0b111111111111111111111111111111111111, data: 0b000000000000000000000000000000000000, }), parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") ); assert_eq!( Left(Mask { mask: 0b111111111111111111111111111110111101, data: 0b000000000000000000000000000001000000, }), parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X") ); assert_eq!( Right(Mem { address: 8, value: 11 }), parse_line("mem[8] = 11") ); assert_eq!( Right(Mem { address: 7, value: 101 }), parse_line("mem[7] = 101") ); assert_eq!( Right(Mem { address: 8, value: 0 }), parse_line("mem[8] = 0") ); } //noinspection SpellCheckingInspection #[test] fn can_run_program_v1() { let mut expected: HashMap<usize, usize> = HashMap::new(); let program_1 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X\nmem[8] = 11"; expected.insert(8, 73); assert_eq!(expected, run_program_v1(program_1)); let program_2 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X mem[8] = 11 mem[7] = 101 mem[8] = 0"; expected.insert(7, 101); expected.insert(8, 64); let memory = run_program_v1(program_2); assert_eq!(expected, memory); assert_eq!(165usize, sum_memory(memory)); } #[test] fn can_explode_addresses() { let expected: HashSet<usize> = vec!(26usize, 27usize, 58usize, 59usize).into_iter().collect(); assert_eq!( expected, explode_addresses( &Mask { mask: 0b000000000000000000000000000000100001, data: 0b000000000000000000000000000000010010, }, 42, ) ); let expected: HashSet<usize> = vec!(16usize, 17usize, 18usize, 19usize, 24usize, 25usize, 26usize, 27usize) .into_iter().collect(); assert_eq!( expected, explode_addresses( &parse_line("mask = 00000000000000000000000000000000X0XX") .expect_left("Failed to parse as mask"), 26, ) ); } #[test] fn can_run_program_v2() { let program = "mask = 000000000000000000000000000000X1001X mem[42] = 100 mask = 00000000000000000000000000000000X0XX mem[26] = 1"; let memory = run_program_v2(program); assert_eq!(208usize, sum_memory(memory)); } }
{ let (mask, data) = value.chars().fold( (0usize, 0usize), |(mask, data), char| ( mask << 1 | if char == 'X' { 1 } else { 0 }, data << 1 | if char == '1' { 1 } else { 0 } ), ); Left(Mask { mask, data }) }
conditional_block
schedule.rs
use crate::{ borrow::{Exclusive, RefMut}, command::CommandBuffer, resource::ResourceTypeId, storage::ComponentTypeId, world::World, }; use bit_set::BitSet; use itertools::izip; use std::iter::repeat; use std::{ collections::{HashMap, HashSet}, sync::atomic::{AtomicUsize, Ordering}, }; #[cfg(feature = "par-iter")] use rayon::prelude::*; /// Empty trait which defines a `System` as schedulable by the dispatcher - this requires that the /// type is both `Send` and `Sync`. /// /// This is automatically implemented for all types that implement `Runnable` which meet the requirements. pub trait Schedulable: Runnable + Send + Sync {} impl<T> Schedulable for T where T: Runnable + Send + Sync {} /// Describes which archetypes a system declares access to. pub enum ArchetypeAccess { /// All archetypes. All, /// Some archetypes. Some(BitSet), } impl ArchetypeAccess { pub fn is_disjoint(&self, other: &ArchetypeAccess) -> bool { match self { Self::All => false, Self::Some(mine) => match other { Self::All => false, Self::Some(theirs) => mine.is_disjoint(theirs), }, } } } /// Trait describing a schedulable type. This is implemented by `System` pub trait Runnable { fn name(&self) -> &str; fn reads(&self) -> (&[ResourceTypeId], &[ComponentTypeId]); fn writes(&self) -> (&[ResourceTypeId], &[ComponentTypeId]); fn prepare(&mut self, world: &World); fn accesses_archetypes(&self) -> &ArchetypeAccess; fn run(&self, world: &World); fn dispose(self: Box<Self>, world: &mut World); fn command_buffer_mut(&self) -> RefMut<Exclusive, CommandBuffer>; } /// Stages represent discrete steps of a game's loop, such as "start", "update", "draw", "end", etc. /// Stages have a defined execution order. /// /// Systems run within a stage, and commit any buffered changes to the ecs at the end of a stage /// (which may or may not be the stage within which they run, but cannot be an earlier stage). trait Stage: Copy + PartialOrd + Ord + PartialEq + Eq {} /// Executes all systems that are to be run within a single given stage. pub struct StageExecutor<'a> { systems: &'a mut [Box<dyn Schedulable>], #[cfg(feature = "par-iter")] pool: &'a rayon::ThreadPool, #[cfg(feature = "par-iter")] static_dependants: Vec<Vec<usize>>, #[cfg(feature = "par-iter")] dynamic_dependants: Vec<Vec<usize>>, #[cfg(feature = "par-iter")] static_dependency_counts: Vec<AtomicUsize>, #[cfg(feature = "par-iter")] awaiting: Vec<AtomicUsize>, } impl<'a> StageExecutor<'a> { #[cfg(not(feature = "par-iter"))] pub fn new(systems: &'a mut [Box<dyn Schedulable>]) -> Self { Self { systems } } /// Constructs a new executor for all systems to be run in a single stage. /// /// Systems are provided in the order in which side-effects (e.g. writes to resources or entities) /// are to be observed. #[cfg(feature = "par-iter")] #[allow(clippy::cognitive_complexity)] // TODO: we should break this up pub fn new(systems: &'a mut [Box<dyn Schedulable>], pool: &'a rayon::ThreadPool) -> Self { if systems.len() > 1 { let mut static_dependency_counts = Vec::with_capacity(systems.len()); let mut static_dependants: Vec<Vec<_>> = repeat(Vec::with_capacity(64)).take(systems.len()).collect(); let mut dynamic_dependants: Vec<Vec<_>> = repeat(Vec::with_capacity(64)).take(systems.len()).collect(); let mut resource_last_mutated = HashMap::<ResourceTypeId, usize>::with_capacity(64); let mut resource_last_read = HashMap::<ResourceTypeId, usize>::with_capacity(64);
let mut component_mutated = HashMap::<ComponentTypeId, Vec<usize>>::with_capacity(64); for (i, system) in systems.iter().enumerate() { log::debug!("Building dependency: {}", system.name()); let (read_res, read_comp) = system.reads(); let (write_res, write_comp) = system.writes(); // find resource access dependencies let mut dependencies = HashSet::with_capacity(64); for res in read_res { log::trace!("Read resource: {:?}", res); if let Some(n) = resource_last_mutated.get(res) { dependencies.insert(*n); } resource_last_read.insert(*res, i); } for res in write_res { log::trace!("Write resource: {:?}", res); // Writes have to be exclusive, so we are dependent on reads too if let Some(n) = resource_last_read.get(res) { log::trace!("Added dep: {:?}", n); dependencies.insert(*n); } if let Some(n) = resource_last_mutated.get(res) { log::trace!("Added dep: {:?}", n); dependencies.insert(*n); } resource_last_mutated.insert(*res, i); } static_dependency_counts.push(AtomicUsize::from(dependencies.len())); log::debug!("dependencies: {:?}", dependencies); for dep in dependencies { log::debug!("static_dependants.push: {:?}", dep); static_dependants[dep].push(i); } // find component access dependencies let mut comp_dependencies = HashSet::new(); for comp in read_comp { if let Some(ns) = component_mutated.get(comp) { for n in ns { comp_dependencies.insert(*n); } } } for comp in write_comp { if let Some(ns) = component_mutated.get(comp) { for n in ns { comp_dependencies.insert(*n); } } component_mutated .entry(*comp) .or_insert_with(Vec::new) .push(i); } log::debug!("comp_dependencies: {:?}", &comp_dependencies); for dep in comp_dependencies { dynamic_dependants[dep].push(i); } } if log::log_enabled!(log::Level::Debug) { log::debug!("static_dependants: {:?}", static_dependants); log::debug!("dynamic_dependants: {:?}", dynamic_dependants); } let mut awaiting = Vec::with_capacity(systems.len()); systems .iter() .for_each(|_| awaiting.push(AtomicUsize::new(0))); Self { pool, awaiting, static_dependants, dynamic_dependants, static_dependency_counts, systems, } } else { Self { pool, awaiting: Vec::with_capacity(0), static_dependants: Vec::with_capacity(0), dynamic_dependants: Vec::with_capacity(0), static_dependency_counts: Vec::with_capacity(0), systems, } } } /// This is a linear executor which just runs the system in their given order. /// /// Only enabled with par-iter is disabled #[cfg(not(feature = "par-iter"))] pub fn execute(&mut self, world: &mut World) { self.systems.iter_mut().for_each(|system| { system.run(world); }); // Flush the command buffers of all the systems self.systems.iter().for_each(|system| { system.command_buffer_mut().write(world); }); } /// Executes this stage. Execution is recursively conducted in a draining fashion. Systems are /// ordered based on 1. their resource access, and then 2. their insertion order. systems are /// executed in the pool provided at construction, and this function does not return until all /// systems in this stage have completed. #[cfg(feature = "par-iter")] pub fn execute(&mut self, world: &mut World) { log::trace!("execute"); rayon::join( || {}, || { match self.systems.len() { 1 => { log::trace!("Single system, just run it"); self.systems[0].run(world); } _ => { log::trace!("Begin pool execution"); let systems = &mut self.systems; let static_dependency_counts = &self.static_dependency_counts; let awaiting = &mut self.awaiting; // prepare all systems - archetype filters are pre-executed here systems.par_iter_mut().for_each(|sys| sys.prepare(world)); // determine dynamic dependencies izip!( systems.iter(), self.static_dependants.iter_mut(), self.dynamic_dependants.iter_mut() ) .par_bridge() .for_each(|(sys, static_dep, dyn_dep)| { let archetypes = sys.accesses_archetypes(); for i in (0..dyn_dep.len()).rev() { let dep = dyn_dep[i]; let other = &systems[dep]; // if the archetype sets intersect, // then we can move the dynamic dependant into the static dependants set if!other.accesses_archetypes().is_disjoint(archetypes) { static_dep.push(dep); dyn_dep.swap_remove(i); static_dependency_counts[dep].fetch_add(1, Ordering::Relaxed); } } }); // initialize dependency tracking for (i, count) in static_dependency_counts.iter().enumerate() { awaiting[i].store(count.load(Ordering::Relaxed), Ordering::Relaxed); } log::trace!("Initialized awaiting: {:?}", awaiting); let awaiting = &self.awaiting; // execute all systems with no outstanding dependencies (0..systems.len()) .into_par_iter() .filter(|i| awaiting[*i].load(Ordering::SeqCst) == 0) .for_each(|i| { self.run_recursive(i, world); }); } } }, ); // Flush the command buffers of all the systems self.systems.iter().for_each(|system| { system.command_buffer_mut().write(world); }); } /// Recursively execute through the generated depedency cascade and exhaust it. #[cfg(feature = "par-iter")] fn run_recursive(&self, i: usize, world: &World) { log::trace!("run_recursive: {}", i); self.systems[i].run(world); self.static_dependants[i].par_iter().for_each(|dep| { match self.awaiting[*dep].compare_exchange( 1, std::usize::MAX, Ordering::Relaxed, Ordering::Relaxed, ) { Ok(_) => { self.run_recursive(*dep, world); } Err(_) => { self.awaiting[*dep].fetch_sub(1, Ordering::Relaxed); } } }); } }
random_line_split
schedule.rs
use crate::{ borrow::{Exclusive, RefMut}, command::CommandBuffer, resource::ResourceTypeId, storage::ComponentTypeId, world::World, }; use bit_set::BitSet; use itertools::izip; use std::iter::repeat; use std::{ collections::{HashMap, HashSet}, sync::atomic::{AtomicUsize, Ordering}, }; #[cfg(feature = "par-iter")] use rayon::prelude::*; /// Empty trait which defines a `System` as schedulable by the dispatcher - this requires that the /// type is both `Send` and `Sync`. /// /// This is automatically implemented for all types that implement `Runnable` which meet the requirements. pub trait Schedulable: Runnable + Send + Sync {} impl<T> Schedulable for T where T: Runnable + Send + Sync {} /// Describes which archetypes a system declares access to. pub enum ArchetypeAccess { /// All archetypes. All, /// Some archetypes. Some(BitSet), } impl ArchetypeAccess { pub fn is_disjoint(&self, other: &ArchetypeAccess) -> bool { match self { Self::All => false, Self::Some(mine) => match other { Self::All => false, Self::Some(theirs) => mine.is_disjoint(theirs), }, } } } /// Trait describing a schedulable type. This is implemented by `System` pub trait Runnable { fn name(&self) -> &str; fn reads(&self) -> (&[ResourceTypeId], &[ComponentTypeId]); fn writes(&self) -> (&[ResourceTypeId], &[ComponentTypeId]); fn prepare(&mut self, world: &World); fn accesses_archetypes(&self) -> &ArchetypeAccess; fn run(&self, world: &World); fn dispose(self: Box<Self>, world: &mut World); fn command_buffer_mut(&self) -> RefMut<Exclusive, CommandBuffer>; } /// Stages represent discrete steps of a game's loop, such as "start", "update", "draw", "end", etc. /// Stages have a defined execution order. /// /// Systems run within a stage, and commit any buffered changes to the ecs at the end of a stage /// (which may or may not be the stage within which they run, but cannot be an earlier stage). trait Stage: Copy + PartialOrd + Ord + PartialEq + Eq {} /// Executes all systems that are to be run within a single given stage. pub struct StageExecutor<'a> { systems: &'a mut [Box<dyn Schedulable>], #[cfg(feature = "par-iter")] pool: &'a rayon::ThreadPool, #[cfg(feature = "par-iter")] static_dependants: Vec<Vec<usize>>, #[cfg(feature = "par-iter")] dynamic_dependants: Vec<Vec<usize>>, #[cfg(feature = "par-iter")] static_dependency_counts: Vec<AtomicUsize>, #[cfg(feature = "par-iter")] awaiting: Vec<AtomicUsize>, } impl<'a> StageExecutor<'a> { #[cfg(not(feature = "par-iter"))] pub fn new(systems: &'a mut [Box<dyn Schedulable>]) -> Self { Self { systems } } /// Constructs a new executor for all systems to be run in a single stage. /// /// Systems are provided in the order in which side-effects (e.g. writes to resources or entities) /// are to be observed. #[cfg(feature = "par-iter")] #[allow(clippy::cognitive_complexity)] // TODO: we should break this up pub fn new(systems: &'a mut [Box<dyn Schedulable>], pool: &'a rayon::ThreadPool) -> Self { if systems.len() > 1 { let mut static_dependency_counts = Vec::with_capacity(systems.len()); let mut static_dependants: Vec<Vec<_>> = repeat(Vec::with_capacity(64)).take(systems.len()).collect(); let mut dynamic_dependants: Vec<Vec<_>> = repeat(Vec::with_capacity(64)).take(systems.len()).collect(); let mut resource_last_mutated = HashMap::<ResourceTypeId, usize>::with_capacity(64); let mut resource_last_read = HashMap::<ResourceTypeId, usize>::with_capacity(64); let mut component_mutated = HashMap::<ComponentTypeId, Vec<usize>>::with_capacity(64); for (i, system) in systems.iter().enumerate() { log::debug!("Building dependency: {}", system.name()); let (read_res, read_comp) = system.reads(); let (write_res, write_comp) = system.writes(); // find resource access dependencies let mut dependencies = HashSet::with_capacity(64); for res in read_res { log::trace!("Read resource: {:?}", res); if let Some(n) = resource_last_mutated.get(res) { dependencies.insert(*n); } resource_last_read.insert(*res, i); } for res in write_res { log::trace!("Write resource: {:?}", res); // Writes have to be exclusive, so we are dependent on reads too if let Some(n) = resource_last_read.get(res) { log::trace!("Added dep: {:?}", n); dependencies.insert(*n); } if let Some(n) = resource_last_mutated.get(res) { log::trace!("Added dep: {:?}", n); dependencies.insert(*n); } resource_last_mutated.insert(*res, i); } static_dependency_counts.push(AtomicUsize::from(dependencies.len())); log::debug!("dependencies: {:?}", dependencies); for dep in dependencies { log::debug!("static_dependants.push: {:?}", dep); static_dependants[dep].push(i); } // find component access dependencies let mut comp_dependencies = HashSet::new(); for comp in read_comp { if let Some(ns) = component_mutated.get(comp) { for n in ns { comp_dependencies.insert(*n); } } } for comp in write_comp { if let Some(ns) = component_mutated.get(comp) { for n in ns { comp_dependencies.insert(*n); } } component_mutated .entry(*comp) .or_insert_with(Vec::new) .push(i); } log::debug!("comp_dependencies: {:?}", &comp_dependencies); for dep in comp_dependencies { dynamic_dependants[dep].push(i); } } if log::log_enabled!(log::Level::Debug) { log::debug!("static_dependants: {:?}", static_dependants); log::debug!("dynamic_dependants: {:?}", dynamic_dependants); } let mut awaiting = Vec::with_capacity(systems.len()); systems .iter() .for_each(|_| awaiting.push(AtomicUsize::new(0))); Self { pool, awaiting, static_dependants, dynamic_dependants, static_dependency_counts, systems, } } else { Self { pool, awaiting: Vec::with_capacity(0), static_dependants: Vec::with_capacity(0), dynamic_dependants: Vec::with_capacity(0), static_dependency_counts: Vec::with_capacity(0), systems, } } } /// This is a linear executor which just runs the system in their given order. /// /// Only enabled with par-iter is disabled #[cfg(not(feature = "par-iter"))] pub fn
(&mut self, world: &mut World) { self.systems.iter_mut().for_each(|system| { system.run(world); }); // Flush the command buffers of all the systems self.systems.iter().for_each(|system| { system.command_buffer_mut().write(world); }); } /// Executes this stage. Execution is recursively conducted in a draining fashion. Systems are /// ordered based on 1. their resource access, and then 2. their insertion order. systems are /// executed in the pool provided at construction, and this function does not return until all /// systems in this stage have completed. #[cfg(feature = "par-iter")] pub fn execute(&mut self, world: &mut World) { log::trace!("execute"); rayon::join( || {}, || { match self.systems.len() { 1 => { log::trace!("Single system, just run it"); self.systems[0].run(world); } _ => { log::trace!("Begin pool execution"); let systems = &mut self.systems; let static_dependency_counts = &self.static_dependency_counts; let awaiting = &mut self.awaiting; // prepare all systems - archetype filters are pre-executed here systems.par_iter_mut().for_each(|sys| sys.prepare(world)); // determine dynamic dependencies izip!( systems.iter(), self.static_dependants.iter_mut(), self.dynamic_dependants.iter_mut() ) .par_bridge() .for_each(|(sys, static_dep, dyn_dep)| { let archetypes = sys.accesses_archetypes(); for i in (0..dyn_dep.len()).rev() { let dep = dyn_dep[i]; let other = &systems[dep]; // if the archetype sets intersect, // then we can move the dynamic dependant into the static dependants set if!other.accesses_archetypes().is_disjoint(archetypes) { static_dep.push(dep); dyn_dep.swap_remove(i); static_dependency_counts[dep].fetch_add(1, Ordering::Relaxed); } } }); // initialize dependency tracking for (i, count) in static_dependency_counts.iter().enumerate() { awaiting[i].store(count.load(Ordering::Relaxed), Ordering::Relaxed); } log::trace!("Initialized awaiting: {:?}", awaiting); let awaiting = &self.awaiting; // execute all systems with no outstanding dependencies (0..systems.len()) .into_par_iter() .filter(|i| awaiting[*i].load(Ordering::SeqCst) == 0) .for_each(|i| { self.run_recursive(i, world); }); } } }, ); // Flush the command buffers of all the systems self.systems.iter().for_each(|system| { system.command_buffer_mut().write(world); }); } /// Recursively execute through the generated depedency cascade and exhaust it. #[cfg(feature = "par-iter")] fn run_recursive(&self, i: usize, world: &World) { log::trace!("run_recursive: {}", i); self.systems[i].run(world); self.static_dependants[i].par_iter().for_each(|dep| { match self.awaiting[*dep].compare_exchange( 1, std::usize::MAX, Ordering::Relaxed, Ordering::Relaxed, ) { Ok(_) => { self.run_recursive(*dep, world); } Err(_) => { self.awaiting[*dep].fetch_sub(1, Ordering::Relaxed); } } }); } }
execute
identifier_name
edge.rs
use delaunator::{Point, Triangulation}; use itertools::Itertools; use std::collections::HashMap; #[derive(Clone, Copy)] pub struct Edge { pub vertex1: usize, pub vertex2: usize, pub edge_type1: EdgeType, pub edge_type2: EdgeType, pub active: bool, pub length: f64, } #[derive(PartialEq, Copy, Clone)] pub enum EdgeType { Long, Short, Medium, Empty, } impl Edge { pub fn other(&self, index: usize) -> usize { if self.vertex1 == index { return self.vertex2; } self.vertex1 } pub fn edge_type(&self, perspective: usize) -> EdgeType { if self.vertex1 == perspective { return self.edge_type1; } self.edge_type2 } pub fn new(v1: usize, v2: usize, t1: EdgeType, t2: EdgeType, length: f64) -> Edge { Edge { vertex1: v1, vertex2: v2, edge_type1: t1, edge_type2: t2, active: true, length, } } } impl PartialEq for Edge { fn eq(&self, other: &Self) -> bool { (self.vertex1 == other.vertex1 && self.vertex2 == other.vertex2) || (self.vertex1 == other.vertex2 && self.vertex2 == other.vertex1) } } #[derive(Clone)] pub struct Graph { pub verticies: Vec<Vertex>, pub active_edges: Vec<Edge>, pub mean_std_deviation: f64, } pub trait FindLabel { fn find_label_for(&self, vertex: &Vertex) -> Option<usize>; fn find_label_for_index(&self, index: usize) -> Option<usize>; } pub trait Reasign { fn reasign(&mut self, from: usize, to: usize); } impl Graph { fn edge_is_active(&self, e: usize) -> bool { self.active_edges[e].active } pub fn is_short(&self, from: &Vertex, to: &Vertex) -> bool { let length = distance(from, to); length < from.local_mean - self.mean_std_deviation } pub fn is_long(&self, from: &Vertex, to: &Vertex) -> bool { let length = distance(from, to); length > from.local_mean + self.mean_std_deviation } pub fn calculate_type(&self, from: &Vertex, to: &Vertex) -> EdgeType { if self.is_long(from, to) { return EdgeType::Long; } if self.is_short(from, to) { return EdgeType::Short; } EdgeType::Medium } pub fn filter_edges(&self) -> Graph { let mut result = self.clone(); for edge in result.active_edges.iter_mut() { if edge.edge_type1!= EdgeType::Medium && edge.edge_type2!= EdgeType::Medium { edge.active = false; } } result } fn build_connected_component(&mut self, vertex_index: usize, label: usize)
pub fn calculate_connected_components(&mut self) { let mut cc_index = 1; while let Some(v) = self .verticies .iter_mut() .position(|x|!x.edges.is_empty() && x.label == 0) { self.build_connected_component(v, cc_index); cc_index += 1; } let groups = self.calculate_cc_sizes(); for (label, size) in groups { if size == 1 { for v in 0..self.verticies.len() { if self.verticies[v].label == label { self.verticies[v].label = 0; break; } } } } } fn calculate_cc_sizes(&self) -> HashMap<usize, usize> { let mut cc_sizes: HashMap<usize, usize> = HashMap::new(); for vertex in &self.verticies { *cc_sizes.entry(vertex.label).or_insert(0) += 1; } cc_sizes } fn reassign( &mut self, vertex_index: usize, label: usize, cc_sizes: &mut HashMap<usize, usize>, ) { if self.verticies[vertex_index].label!= label { *cc_sizes .get_mut(&self.verticies[vertex_index].label) .unwrap() -= 1; *cc_sizes.get_mut(&label).unwrap() += 1; let vertex = &mut self.verticies[vertex_index]; vertex.label = label; for e in 0..vertex.edges.len() { let edge = self.verticies[vertex_index].edges[e]; let other = self.active_edges[edge].other(vertex_index); if self.active_edges[edge].edge_type(vertex_index) == EdgeType::Short && self.verticies[other].label == label { self.active_edges[edge].active = true; } if self.verticies[other].label!= label { self.active_edges[edge].active = false; } } } } pub fn restore_edges(&mut self) { struct LabelReference { size: usize, label: usize, edge_index: usize, }; let mut cc_sizes = self.calculate_cc_sizes(); let mut reassign_map: HashMap<usize, usize> = HashMap::new(); for i in 0..self.verticies.len() { let short_edges: Vec<&Edge> = self.verticies[i] .edges .iter() .filter(|e| self.active_edges[**e].edge_type(i) == EdgeType::Short) .map(|x| &self.active_edges[*x]) .collect(); let label = self.verticies[i].label; let mut possible_labels: Vec<LabelReference> = vec![]; for (i, e) in short_edges.iter().enumerate() { let other_label = self.verticies[e.other(i)].label; if other_label!= 0 && label!= other_label { let other_size = cc_sizes[&other_label]; if matches!( possible_labels.iter_mut().find(|x| x.label == other_label), None ) { possible_labels.push(LabelReference { size: other_size, label: other_label, edge_index: i, }) } } } if let Some(best_label) = possible_labels.iter().max_by_key(|x| x.size) { if best_label.label!= label { *reassign_map.entry(i).or_insert(0) = best_label.label; } } } for (vertex, label) in reassign_map { self.reassign(vertex, label, &mut cc_sizes); } for i in 0..self.verticies.len() { for &edge in self.verticies[i].edges.iter() { if self.active_edges[edge].edge_type(i) == EdgeType::Short && self.verticies[self.active_edges[edge].other(i)].label == self.verticies[i].label { self.active_edges[edge].active = true; } } } } fn recalculate_k_neighbourhood(&mut self, vertex_index: usize) { let mut edge_count: usize = 0; let mut edge_sum = 0.0; for &edge1 in self.verticies[vertex_index].edges.iter() { if self.edge_is_active(edge1) { let other = self.active_edges[edge1].other(vertex_index); for &edge2 in self.verticies[other].edges.iter() { if self.edge_is_active(edge2) { edge_sum += self.active_edges[edge2].length; edge_count += 1; } } } } self.verticies[vertex_index].local_mean = edge_sum / edge_count as f64; self.verticies[vertex_index].local_std_dev = local_std_deviation(vertex_index, &self.active_edges, &self.verticies) } pub fn recalculate_mean_with_k_neighbourhood(&mut self) { for v in 0..self.verticies.len() { self.recalculate_k_neighbourhood(v); self.verticies[v].label = 0; } self.mean_std_deviation = self.mean_std_deviation(); for v in 0..self.verticies.len() { for e in 0..self.verticies[v].edges.len() { let other = self.active_edges[self.verticies[v].edges[e]].other(v); for e2 in 0..self.verticies[other].edges.len() { let is_long = self.active_edges[self.verticies[other].edges[e2]].length > self.verticies[v].local_mean + self.mean_std_deviation; if is_long { self.active_edges[self.verticies[other].edges[e2]].active = false; self.mean_std_deviation = self.mean_std_deviation(); self.verticies[v].local_std_dev = local_std_deviation(v, &self.active_edges, &self.verticies); } } } } } fn mean_std_deviation(&self) -> f64 { self.verticies .iter() .fold(0.0, |acc, v| acc + v.local_std_dev) / self.verticies.len() as f64 } } #[derive(Clone, Debug)] pub struct Vertex { index: usize, pub point: Point, local_mean: f64, local_std_dev: f64, edges: Vec<usize>, pub label: usize, } impl Vertex { fn new(index: usize, point: Point, edges: Vec<usize>) -> Vertex { Vertex { index, point, local_mean: 0.0, local_std_dev: 0.0, edges, label: 0, } } } pub trait ToGraph { fn to_graph(&self, points: &[Point]) -> Graph; } impl ToGraph for Triangulation { fn to_graph(&self, points: &[Point]) -> Graph { let all_edges = all_edges(self, points); let mut verticies: Vec<Vertex> = vec![]; for (i, p) in points.iter().enumerate() { let vertex = Vertex::new(i, Point { x: p.x, y: p.y }, neighborhood(i, &all_edges)); verticies.push(vertex); } for v in verticies.iter_mut() { v.local_mean = local_mean(&all_edges, &v.edges) } for i in 0..points.len() { verticies[i].local_std_dev = local_std_deviation(i, &all_edges, &verticies); } let mut result = Graph { verticies, mean_std_deviation: 0.0, active_edges: all_edges, }; result.mean_std_deviation = result.mean_std_deviation(); for i in 0..result.active_edges.len() { result.active_edges[i].edge_type1 = result.calculate_type( &result.verticies[result.active_edges[i].vertex1], &result.verticies[result.active_edges[i].vertex2], ); result.active_edges[i].edge_type2 = result.calculate_type( &result.verticies[result.active_edges[i].vertex2], &result.verticies[result.active_edges[i].vertex1], ); } result } } fn distance(p1: &Vertex, p2: &Vertex) -> f64 { ((p1.point.x - p2.point.x).powi(2) + (p1.point.y - p2.point.y).powi(2)).sqrt() } fn distance_point(p1: &Point, p2: &Point) -> f64 { ((p1.x - p2.x).powi(2) + (p1.y - p2.y).powi(2)).sqrt() } pub fn all_edges(graph: &Triangulation, points: &[Point]) -> Vec<Edge> { let mut result: Vec<Edge> = vec![]; for t in graph.triangles.iter().batching(|it| match it.next() { None => None, Some(x) => match it.next() { None => None, Some(y) => match it.next() { None => None, Some(z) => Some((*x, *y, *z)), }, }, }) { let e1 = Edge::new( t.0, t.1, EdgeType::Empty, EdgeType::Empty, distance_point(&points[t.0], &points[t.1]), ); let e2 = Edge::new( t.1, t.2, EdgeType::Empty, EdgeType::Empty, distance_point(&points[t.1], &points[t.2]), ); let e3 = Edge::new( t.2, t.0, EdgeType::Empty, EdgeType::Empty, distance_point(&points[t.2], &points[t.0]), ); if!result.contains(&e1) { result.push(e1); } if!result.contains(&e2) { result.push(e2); } if!result.contains(&e3) { result.push(e3); } } result } pub fn edges(point_index: usize, all_edges: &[Edge]) -> Vec<usize> { let mut result: Vec<usize> = vec![]; for (i, edge) in all_edges.iter().enumerate() { if edge.vertex1 == point_index || edge.vertex2 == point_index { result.push(i); } } result } fn neighborhood(point_index: usize, all_edges: &[Edge]) -> Vec<usize> { edges(point_index, all_edges) } pub fn local_mean(edges: &[Edge], edge_indicies: &[usize]) -> f64 { let mut result = 0.0; for index in edge_indicies { result += edges[*index].length; } result / (edge_indicies.len() as f64) } fn local_std_deviation(point_index: usize, edges: &[Edge], points: &[Vertex]) -> f64 { let mut result = 0.0; let current_vertex = &points[point_index]; for edge in current_vertex .edges .iter() .map(|x| &edges[*x]) .filter(|x| x.active) { result += (current_vertex.local_mean - edge.length).powi(2) / (current_vertex .edges .iter() .map(|x| &edges[*x]) .filter(|x| x.active) .count() as f64); } result.sqrt() }
{ if self.verticies[vertex_index].label != label { self.verticies[vertex_index].label = label; for i in 0..self.verticies[vertex_index].edges.len() { let edge_index = self.verticies[vertex_index].edges[i]; if self.edge_is_active(edge_index) && self.verticies[self.active_edges[edge_index].other(vertex_index)].label == 0 { self.build_connected_component( self.active_edges[edge_index].other(vertex_index), label, ); } } } }
identifier_body
edge.rs
use delaunator::{Point, Triangulation}; use itertools::Itertools; use std::collections::HashMap; #[derive(Clone, Copy)] pub struct Edge { pub vertex1: usize, pub vertex2: usize, pub edge_type1: EdgeType, pub edge_type2: EdgeType, pub active: bool, pub length: f64, } #[derive(PartialEq, Copy, Clone)] pub enum EdgeType { Long, Short, Medium, Empty, } impl Edge { pub fn other(&self, index: usize) -> usize { if self.vertex1 == index { return self.vertex2; } self.vertex1 } pub fn edge_type(&self, perspective: usize) -> EdgeType { if self.vertex1 == perspective { return self.edge_type1; } self.edge_type2 } pub fn new(v1: usize, v2: usize, t1: EdgeType, t2: EdgeType, length: f64) -> Edge { Edge { vertex1: v1, vertex2: v2, edge_type1: t1, edge_type2: t2, active: true, length, } } } impl PartialEq for Edge { fn eq(&self, other: &Self) -> bool { (self.vertex1 == other.vertex1 && self.vertex2 == other.vertex2) || (self.vertex1 == other.vertex2 && self.vertex2 == other.vertex1) } } #[derive(Clone)] pub struct Graph { pub verticies: Vec<Vertex>, pub active_edges: Vec<Edge>, pub mean_std_deviation: f64, } pub trait FindLabel { fn find_label_for(&self, vertex: &Vertex) -> Option<usize>; fn find_label_for_index(&self, index: usize) -> Option<usize>; } pub trait Reasign { fn reasign(&mut self, from: usize, to: usize); } impl Graph { fn edge_is_active(&self, e: usize) -> bool { self.active_edges[e].active } pub fn is_short(&self, from: &Vertex, to: &Vertex) -> bool { let length = distance(from, to); length < from.local_mean - self.mean_std_deviation } pub fn is_long(&self, from: &Vertex, to: &Vertex) -> bool { let length = distance(from, to); length > from.local_mean + self.mean_std_deviation } pub fn calculate_type(&self, from: &Vertex, to: &Vertex) -> EdgeType { if self.is_long(from, to) { return EdgeType::Long; } if self.is_short(from, to) { return EdgeType::Short; } EdgeType::Medium } pub fn filter_edges(&self) -> Graph { let mut result = self.clone(); for edge in result.active_edges.iter_mut() { if edge.edge_type1!= EdgeType::Medium && edge.edge_type2!= EdgeType::Medium { edge.active = false; } } result } fn build_connected_component(&mut self, vertex_index: usize, label: usize) { if self.verticies[vertex_index].label!= label { self.verticies[vertex_index].label = label; for i in 0..self.verticies[vertex_index].edges.len() { let edge_index = self.verticies[vertex_index].edges[i]; if self.edge_is_active(edge_index) && self.verticies[self.active_edges[edge_index].other(vertex_index)].label == 0 { self.build_connected_component( self.active_edges[edge_index].other(vertex_index), label, ); } } } } pub fn calculate_connected_components(&mut self) { let mut cc_index = 1; while let Some(v) = self .verticies .iter_mut() .position(|x|!x.edges.is_empty() && x.label == 0) { self.build_connected_component(v, cc_index); cc_index += 1; } let groups = self.calculate_cc_sizes(); for (label, size) in groups { if size == 1 { for v in 0..self.verticies.len() { if self.verticies[v].label == label { self.verticies[v].label = 0; break; } } } } } fn calculate_cc_sizes(&self) -> HashMap<usize, usize> { let mut cc_sizes: HashMap<usize, usize> = HashMap::new(); for vertex in &self.verticies { *cc_sizes.entry(vertex.label).or_insert(0) += 1; } cc_sizes } fn reassign( &mut self, vertex_index: usize, label: usize, cc_sizes: &mut HashMap<usize, usize>, ) { if self.verticies[vertex_index].label!= label { *cc_sizes .get_mut(&self.verticies[vertex_index].label) .unwrap() -= 1; *cc_sizes.get_mut(&label).unwrap() += 1; let vertex = &mut self.verticies[vertex_index]; vertex.label = label; for e in 0..vertex.edges.len() { let edge = self.verticies[vertex_index].edges[e]; let other = self.active_edges[edge].other(vertex_index); if self.active_edges[edge].edge_type(vertex_index) == EdgeType::Short && self.verticies[other].label == label {
} } } } pub fn restore_edges(&mut self) { struct LabelReference { size: usize, label: usize, edge_index: usize, }; let mut cc_sizes = self.calculate_cc_sizes(); let mut reassign_map: HashMap<usize, usize> = HashMap::new(); for i in 0..self.verticies.len() { let short_edges: Vec<&Edge> = self.verticies[i] .edges .iter() .filter(|e| self.active_edges[**e].edge_type(i) == EdgeType::Short) .map(|x| &self.active_edges[*x]) .collect(); let label = self.verticies[i].label; let mut possible_labels: Vec<LabelReference> = vec![]; for (i, e) in short_edges.iter().enumerate() { let other_label = self.verticies[e.other(i)].label; if other_label!= 0 && label!= other_label { let other_size = cc_sizes[&other_label]; if matches!( possible_labels.iter_mut().find(|x| x.label == other_label), None ) { possible_labels.push(LabelReference { size: other_size, label: other_label, edge_index: i, }) } } } if let Some(best_label) = possible_labels.iter().max_by_key(|x| x.size) { if best_label.label!= label { *reassign_map.entry(i).or_insert(0) = best_label.label; } } } for (vertex, label) in reassign_map { self.reassign(vertex, label, &mut cc_sizes); } for i in 0..self.verticies.len() { for &edge in self.verticies[i].edges.iter() { if self.active_edges[edge].edge_type(i) == EdgeType::Short && self.verticies[self.active_edges[edge].other(i)].label == self.verticies[i].label { self.active_edges[edge].active = true; } } } } fn recalculate_k_neighbourhood(&mut self, vertex_index: usize) { let mut edge_count: usize = 0; let mut edge_sum = 0.0; for &edge1 in self.verticies[vertex_index].edges.iter() { if self.edge_is_active(edge1) { let other = self.active_edges[edge1].other(vertex_index); for &edge2 in self.verticies[other].edges.iter() { if self.edge_is_active(edge2) { edge_sum += self.active_edges[edge2].length; edge_count += 1; } } } } self.verticies[vertex_index].local_mean = edge_sum / edge_count as f64; self.verticies[vertex_index].local_std_dev = local_std_deviation(vertex_index, &self.active_edges, &self.verticies) } pub fn recalculate_mean_with_k_neighbourhood(&mut self) { for v in 0..self.verticies.len() { self.recalculate_k_neighbourhood(v); self.verticies[v].label = 0; } self.mean_std_deviation = self.mean_std_deviation(); for v in 0..self.verticies.len() { for e in 0..self.verticies[v].edges.len() { let other = self.active_edges[self.verticies[v].edges[e]].other(v); for e2 in 0..self.verticies[other].edges.len() { let is_long = self.active_edges[self.verticies[other].edges[e2]].length > self.verticies[v].local_mean + self.mean_std_deviation; if is_long { self.active_edges[self.verticies[other].edges[e2]].active = false; self.mean_std_deviation = self.mean_std_deviation(); self.verticies[v].local_std_dev = local_std_deviation(v, &self.active_edges, &self.verticies); } } } } } fn mean_std_deviation(&self) -> f64 { self.verticies .iter() .fold(0.0, |acc, v| acc + v.local_std_dev) / self.verticies.len() as f64 } } #[derive(Clone, Debug)] pub struct Vertex { index: usize, pub point: Point, local_mean: f64, local_std_dev: f64, edges: Vec<usize>, pub label: usize, } impl Vertex { fn new(index: usize, point: Point, edges: Vec<usize>) -> Vertex { Vertex { index, point, local_mean: 0.0, local_std_dev: 0.0, edges, label: 0, } } } pub trait ToGraph { fn to_graph(&self, points: &[Point]) -> Graph; } impl ToGraph for Triangulation { fn to_graph(&self, points: &[Point]) -> Graph { let all_edges = all_edges(self, points); let mut verticies: Vec<Vertex> = vec![]; for (i, p) in points.iter().enumerate() { let vertex = Vertex::new(i, Point { x: p.x, y: p.y }, neighborhood(i, &all_edges)); verticies.push(vertex); } for v in verticies.iter_mut() { v.local_mean = local_mean(&all_edges, &v.edges) } for i in 0..points.len() { verticies[i].local_std_dev = local_std_deviation(i, &all_edges, &verticies); } let mut result = Graph { verticies, mean_std_deviation: 0.0, active_edges: all_edges, }; result.mean_std_deviation = result.mean_std_deviation(); for i in 0..result.active_edges.len() { result.active_edges[i].edge_type1 = result.calculate_type( &result.verticies[result.active_edges[i].vertex1], &result.verticies[result.active_edges[i].vertex2], ); result.active_edges[i].edge_type2 = result.calculate_type( &result.verticies[result.active_edges[i].vertex2], &result.verticies[result.active_edges[i].vertex1], ); } result } } fn distance(p1: &Vertex, p2: &Vertex) -> f64 { ((p1.point.x - p2.point.x).powi(2) + (p1.point.y - p2.point.y).powi(2)).sqrt() } fn distance_point(p1: &Point, p2: &Point) -> f64 { ((p1.x - p2.x).powi(2) + (p1.y - p2.y).powi(2)).sqrt() } pub fn all_edges(graph: &Triangulation, points: &[Point]) -> Vec<Edge> { let mut result: Vec<Edge> = vec![]; for t in graph.triangles.iter().batching(|it| match it.next() { None => None, Some(x) => match it.next() { None => None, Some(y) => match it.next() { None => None, Some(z) => Some((*x, *y, *z)), }, }, }) { let e1 = Edge::new( t.0, t.1, EdgeType::Empty, EdgeType::Empty, distance_point(&points[t.0], &points[t.1]), ); let e2 = Edge::new( t.1, t.2, EdgeType::Empty, EdgeType::Empty, distance_point(&points[t.1], &points[t.2]), ); let e3 = Edge::new( t.2, t.0, EdgeType::Empty, EdgeType::Empty, distance_point(&points[t.2], &points[t.0]), ); if!result.contains(&e1) { result.push(e1); } if!result.contains(&e2) { result.push(e2); } if!result.contains(&e3) { result.push(e3); } } result } pub fn edges(point_index: usize, all_edges: &[Edge]) -> Vec<usize> { let mut result: Vec<usize> = vec![]; for (i, edge) in all_edges.iter().enumerate() { if edge.vertex1 == point_index || edge.vertex2 == point_index { result.push(i); } } result } fn neighborhood(point_index: usize, all_edges: &[Edge]) -> Vec<usize> { edges(point_index, all_edges) } pub fn local_mean(edges: &[Edge], edge_indicies: &[usize]) -> f64 { let mut result = 0.0; for index in edge_indicies { result += edges[*index].length; } result / (edge_indicies.len() as f64) } fn local_std_deviation(point_index: usize, edges: &[Edge], points: &[Vertex]) -> f64 { let mut result = 0.0; let current_vertex = &points[point_index]; for edge in current_vertex .edges .iter() .map(|x| &edges[*x]) .filter(|x| x.active) { result += (current_vertex.local_mean - edge.length).powi(2) / (current_vertex .edges .iter() .map(|x| &edges[*x]) .filter(|x| x.active) .count() as f64); } result.sqrt() }
self.active_edges[edge].active = true; } if self.verticies[other].label != label { self.active_edges[edge].active = false;
random_line_split
edge.rs
use delaunator::{Point, Triangulation}; use itertools::Itertools; use std::collections::HashMap; #[derive(Clone, Copy)] pub struct Edge { pub vertex1: usize, pub vertex2: usize, pub edge_type1: EdgeType, pub edge_type2: EdgeType, pub active: bool, pub length: f64, } #[derive(PartialEq, Copy, Clone)] pub enum EdgeType { Long, Short, Medium, Empty, } impl Edge { pub fn other(&self, index: usize) -> usize { if self.vertex1 == index { return self.vertex2; } self.vertex1 } pub fn edge_type(&self, perspective: usize) -> EdgeType { if self.vertex1 == perspective { return self.edge_type1; } self.edge_type2 } pub fn new(v1: usize, v2: usize, t1: EdgeType, t2: EdgeType, length: f64) -> Edge { Edge { vertex1: v1, vertex2: v2, edge_type1: t1, edge_type2: t2, active: true, length, } } } impl PartialEq for Edge { fn eq(&self, other: &Self) -> bool { (self.vertex1 == other.vertex1 && self.vertex2 == other.vertex2) || (self.vertex1 == other.vertex2 && self.vertex2 == other.vertex1) } } #[derive(Clone)] pub struct Graph { pub verticies: Vec<Vertex>, pub active_edges: Vec<Edge>, pub mean_std_deviation: f64, } pub trait FindLabel { fn find_label_for(&self, vertex: &Vertex) -> Option<usize>; fn find_label_for_index(&self, index: usize) -> Option<usize>; } pub trait Reasign { fn reasign(&mut self, from: usize, to: usize); } impl Graph { fn edge_is_active(&self, e: usize) -> bool { self.active_edges[e].active } pub fn is_short(&self, from: &Vertex, to: &Vertex) -> bool { let length = distance(from, to); length < from.local_mean - self.mean_std_deviation } pub fn is_long(&self, from: &Vertex, to: &Vertex) -> bool { let length = distance(from, to); length > from.local_mean + self.mean_std_deviation } pub fn calculate_type(&self, from: &Vertex, to: &Vertex) -> EdgeType { if self.is_long(from, to) { return EdgeType::Long; } if self.is_short(from, to) { return EdgeType::Short; } EdgeType::Medium } pub fn filter_edges(&self) -> Graph { let mut result = self.clone(); for edge in result.active_edges.iter_mut() { if edge.edge_type1!= EdgeType::Medium && edge.edge_type2!= EdgeType::Medium { edge.active = false; } } result } fn build_connected_component(&mut self, vertex_index: usize, label: usize) { if self.verticies[vertex_index].label!= label
} pub fn calculate_connected_components(&mut self) { let mut cc_index = 1; while let Some(v) = self .verticies .iter_mut() .position(|x|!x.edges.is_empty() && x.label == 0) { self.build_connected_component(v, cc_index); cc_index += 1; } let groups = self.calculate_cc_sizes(); for (label, size) in groups { if size == 1 { for v in 0..self.verticies.len() { if self.verticies[v].label == label { self.verticies[v].label = 0; break; } } } } } fn calculate_cc_sizes(&self) -> HashMap<usize, usize> { let mut cc_sizes: HashMap<usize, usize> = HashMap::new(); for vertex in &self.verticies { *cc_sizes.entry(vertex.label).or_insert(0) += 1; } cc_sizes } fn reassign( &mut self, vertex_index: usize, label: usize, cc_sizes: &mut HashMap<usize, usize>, ) { if self.verticies[vertex_index].label!= label { *cc_sizes .get_mut(&self.verticies[vertex_index].label) .unwrap() -= 1; *cc_sizes.get_mut(&label).unwrap() += 1; let vertex = &mut self.verticies[vertex_index]; vertex.label = label; for e in 0..vertex.edges.len() { let edge = self.verticies[vertex_index].edges[e]; let other = self.active_edges[edge].other(vertex_index); if self.active_edges[edge].edge_type(vertex_index) == EdgeType::Short && self.verticies[other].label == label { self.active_edges[edge].active = true; } if self.verticies[other].label!= label { self.active_edges[edge].active = false; } } } } pub fn restore_edges(&mut self) { struct LabelReference { size: usize, label: usize, edge_index: usize, }; let mut cc_sizes = self.calculate_cc_sizes(); let mut reassign_map: HashMap<usize, usize> = HashMap::new(); for i in 0..self.verticies.len() { let short_edges: Vec<&Edge> = self.verticies[i] .edges .iter() .filter(|e| self.active_edges[**e].edge_type(i) == EdgeType::Short) .map(|x| &self.active_edges[*x]) .collect(); let label = self.verticies[i].label; let mut possible_labels: Vec<LabelReference> = vec![]; for (i, e) in short_edges.iter().enumerate() { let other_label = self.verticies[e.other(i)].label; if other_label!= 0 && label!= other_label { let other_size = cc_sizes[&other_label]; if matches!( possible_labels.iter_mut().find(|x| x.label == other_label), None ) { possible_labels.push(LabelReference { size: other_size, label: other_label, edge_index: i, }) } } } if let Some(best_label) = possible_labels.iter().max_by_key(|x| x.size) { if best_label.label!= label { *reassign_map.entry(i).or_insert(0) = best_label.label; } } } for (vertex, label) in reassign_map { self.reassign(vertex, label, &mut cc_sizes); } for i in 0..self.verticies.len() { for &edge in self.verticies[i].edges.iter() { if self.active_edges[edge].edge_type(i) == EdgeType::Short && self.verticies[self.active_edges[edge].other(i)].label == self.verticies[i].label { self.active_edges[edge].active = true; } } } } fn recalculate_k_neighbourhood(&mut self, vertex_index: usize) { let mut edge_count: usize = 0; let mut edge_sum = 0.0; for &edge1 in self.verticies[vertex_index].edges.iter() { if self.edge_is_active(edge1) { let other = self.active_edges[edge1].other(vertex_index); for &edge2 in self.verticies[other].edges.iter() { if self.edge_is_active(edge2) { edge_sum += self.active_edges[edge2].length; edge_count += 1; } } } } self.verticies[vertex_index].local_mean = edge_sum / edge_count as f64; self.verticies[vertex_index].local_std_dev = local_std_deviation(vertex_index, &self.active_edges, &self.verticies) } pub fn recalculate_mean_with_k_neighbourhood(&mut self) { for v in 0..self.verticies.len() { self.recalculate_k_neighbourhood(v); self.verticies[v].label = 0; } self.mean_std_deviation = self.mean_std_deviation(); for v in 0..self.verticies.len() { for e in 0..self.verticies[v].edges.len() { let other = self.active_edges[self.verticies[v].edges[e]].other(v); for e2 in 0..self.verticies[other].edges.len() { let is_long = self.active_edges[self.verticies[other].edges[e2]].length > self.verticies[v].local_mean + self.mean_std_deviation; if is_long { self.active_edges[self.verticies[other].edges[e2]].active = false; self.mean_std_deviation = self.mean_std_deviation(); self.verticies[v].local_std_dev = local_std_deviation(v, &self.active_edges, &self.verticies); } } } } } fn mean_std_deviation(&self) -> f64 { self.verticies .iter() .fold(0.0, |acc, v| acc + v.local_std_dev) / self.verticies.len() as f64 } } #[derive(Clone, Debug)] pub struct Vertex { index: usize, pub point: Point, local_mean: f64, local_std_dev: f64, edges: Vec<usize>, pub label: usize, } impl Vertex { fn new(index: usize, point: Point, edges: Vec<usize>) -> Vertex { Vertex { index, point, local_mean: 0.0, local_std_dev: 0.0, edges, label: 0, } } } pub trait ToGraph { fn to_graph(&self, points: &[Point]) -> Graph; } impl ToGraph for Triangulation { fn to_graph(&self, points: &[Point]) -> Graph { let all_edges = all_edges(self, points); let mut verticies: Vec<Vertex> = vec![]; for (i, p) in points.iter().enumerate() { let vertex = Vertex::new(i, Point { x: p.x, y: p.y }, neighborhood(i, &all_edges)); verticies.push(vertex); } for v in verticies.iter_mut() { v.local_mean = local_mean(&all_edges, &v.edges) } for i in 0..points.len() { verticies[i].local_std_dev = local_std_deviation(i, &all_edges, &verticies); } let mut result = Graph { verticies, mean_std_deviation: 0.0, active_edges: all_edges, }; result.mean_std_deviation = result.mean_std_deviation(); for i in 0..result.active_edges.len() { result.active_edges[i].edge_type1 = result.calculate_type( &result.verticies[result.active_edges[i].vertex1], &result.verticies[result.active_edges[i].vertex2], ); result.active_edges[i].edge_type2 = result.calculate_type( &result.verticies[result.active_edges[i].vertex2], &result.verticies[result.active_edges[i].vertex1], ); } result } } fn distance(p1: &Vertex, p2: &Vertex) -> f64 { ((p1.point.x - p2.point.x).powi(2) + (p1.point.y - p2.point.y).powi(2)).sqrt() } fn distance_point(p1: &Point, p2: &Point) -> f64 { ((p1.x - p2.x).powi(2) + (p1.y - p2.y).powi(2)).sqrt() } pub fn all_edges(graph: &Triangulation, points: &[Point]) -> Vec<Edge> { let mut result: Vec<Edge> = vec![]; for t in graph.triangles.iter().batching(|it| match it.next() { None => None, Some(x) => match it.next() { None => None, Some(y) => match it.next() { None => None, Some(z) => Some((*x, *y, *z)), }, }, }) { let e1 = Edge::new( t.0, t.1, EdgeType::Empty, EdgeType::Empty, distance_point(&points[t.0], &points[t.1]), ); let e2 = Edge::new( t.1, t.2, EdgeType::Empty, EdgeType::Empty, distance_point(&points[t.1], &points[t.2]), ); let e3 = Edge::new( t.2, t.0, EdgeType::Empty, EdgeType::Empty, distance_point(&points[t.2], &points[t.0]), ); if!result.contains(&e1) { result.push(e1); } if!result.contains(&e2) { result.push(e2); } if!result.contains(&e3) { result.push(e3); } } result } pub fn edges(point_index: usize, all_edges: &[Edge]) -> Vec<usize> { let mut result: Vec<usize> = vec![]; for (i, edge) in all_edges.iter().enumerate() { if edge.vertex1 == point_index || edge.vertex2 == point_index { result.push(i); } } result } fn neighborhood(point_index: usize, all_edges: &[Edge]) -> Vec<usize> { edges(point_index, all_edges) } pub fn local_mean(edges: &[Edge], edge_indicies: &[usize]) -> f64 { let mut result = 0.0; for index in edge_indicies { result += edges[*index].length; } result / (edge_indicies.len() as f64) } fn local_std_deviation(point_index: usize, edges: &[Edge], points: &[Vertex]) -> f64 { let mut result = 0.0; let current_vertex = &points[point_index]; for edge in current_vertex .edges .iter() .map(|x| &edges[*x]) .filter(|x| x.active) { result += (current_vertex.local_mean - edge.length).powi(2) / (current_vertex .edges .iter() .map(|x| &edges[*x]) .filter(|x| x.active) .count() as f64); } result.sqrt() }
{ self.verticies[vertex_index].label = label; for i in 0..self.verticies[vertex_index].edges.len() { let edge_index = self.verticies[vertex_index].edges[i]; if self.edge_is_active(edge_index) && self.verticies[self.active_edges[edge_index].other(vertex_index)].label == 0 { self.build_connected_component( self.active_edges[edge_index].other(vertex_index), label, ); } } }
conditional_block
edge.rs
use delaunator::{Point, Triangulation}; use itertools::Itertools; use std::collections::HashMap; #[derive(Clone, Copy)] pub struct Edge { pub vertex1: usize, pub vertex2: usize, pub edge_type1: EdgeType, pub edge_type2: EdgeType, pub active: bool, pub length: f64, } #[derive(PartialEq, Copy, Clone)] pub enum EdgeType { Long, Short, Medium, Empty, } impl Edge { pub fn other(&self, index: usize) -> usize { if self.vertex1 == index { return self.vertex2; } self.vertex1 } pub fn edge_type(&self, perspective: usize) -> EdgeType { if self.vertex1 == perspective { return self.edge_type1; } self.edge_type2 } pub fn new(v1: usize, v2: usize, t1: EdgeType, t2: EdgeType, length: f64) -> Edge { Edge { vertex1: v1, vertex2: v2, edge_type1: t1, edge_type2: t2, active: true, length, } } } impl PartialEq for Edge { fn eq(&self, other: &Self) -> bool { (self.vertex1 == other.vertex1 && self.vertex2 == other.vertex2) || (self.vertex1 == other.vertex2 && self.vertex2 == other.vertex1) } } #[derive(Clone)] pub struct Graph { pub verticies: Vec<Vertex>, pub active_edges: Vec<Edge>, pub mean_std_deviation: f64, } pub trait FindLabel { fn find_label_for(&self, vertex: &Vertex) -> Option<usize>; fn find_label_for_index(&self, index: usize) -> Option<usize>; } pub trait Reasign { fn reasign(&mut self, from: usize, to: usize); } impl Graph { fn edge_is_active(&self, e: usize) -> bool { self.active_edges[e].active } pub fn is_short(&self, from: &Vertex, to: &Vertex) -> bool { let length = distance(from, to); length < from.local_mean - self.mean_std_deviation } pub fn is_long(&self, from: &Vertex, to: &Vertex) -> bool { let length = distance(from, to); length > from.local_mean + self.mean_std_deviation } pub fn calculate_type(&self, from: &Vertex, to: &Vertex) -> EdgeType { if self.is_long(from, to) { return EdgeType::Long; } if self.is_short(from, to) { return EdgeType::Short; } EdgeType::Medium } pub fn filter_edges(&self) -> Graph { let mut result = self.clone(); for edge in result.active_edges.iter_mut() { if edge.edge_type1!= EdgeType::Medium && edge.edge_type2!= EdgeType::Medium { edge.active = false; } } result } fn build_connected_component(&mut self, vertex_index: usize, label: usize) { if self.verticies[vertex_index].label!= label { self.verticies[vertex_index].label = label; for i in 0..self.verticies[vertex_index].edges.len() { let edge_index = self.verticies[vertex_index].edges[i]; if self.edge_is_active(edge_index) && self.verticies[self.active_edges[edge_index].other(vertex_index)].label == 0 { self.build_connected_component( self.active_edges[edge_index].other(vertex_index), label, ); } } } } pub fn calculate_connected_components(&mut self) { let mut cc_index = 1; while let Some(v) = self .verticies .iter_mut() .position(|x|!x.edges.is_empty() && x.label == 0) { self.build_connected_component(v, cc_index); cc_index += 1; } let groups = self.calculate_cc_sizes(); for (label, size) in groups { if size == 1 { for v in 0..self.verticies.len() { if self.verticies[v].label == label { self.verticies[v].label = 0; break; } } } } } fn calculate_cc_sizes(&self) -> HashMap<usize, usize> { let mut cc_sizes: HashMap<usize, usize> = HashMap::new(); for vertex in &self.verticies { *cc_sizes.entry(vertex.label).or_insert(0) += 1; } cc_sizes } fn reassign( &mut self, vertex_index: usize, label: usize, cc_sizes: &mut HashMap<usize, usize>, ) { if self.verticies[vertex_index].label!= label { *cc_sizes .get_mut(&self.verticies[vertex_index].label) .unwrap() -= 1; *cc_sizes.get_mut(&label).unwrap() += 1; let vertex = &mut self.verticies[vertex_index]; vertex.label = label; for e in 0..vertex.edges.len() { let edge = self.verticies[vertex_index].edges[e]; let other = self.active_edges[edge].other(vertex_index); if self.active_edges[edge].edge_type(vertex_index) == EdgeType::Short && self.verticies[other].label == label { self.active_edges[edge].active = true; } if self.verticies[other].label!= label { self.active_edges[edge].active = false; } } } } pub fn restore_edges(&mut self) { struct
{ size: usize, label: usize, edge_index: usize, }; let mut cc_sizes = self.calculate_cc_sizes(); let mut reassign_map: HashMap<usize, usize> = HashMap::new(); for i in 0..self.verticies.len() { let short_edges: Vec<&Edge> = self.verticies[i] .edges .iter() .filter(|e| self.active_edges[**e].edge_type(i) == EdgeType::Short) .map(|x| &self.active_edges[*x]) .collect(); let label = self.verticies[i].label; let mut possible_labels: Vec<LabelReference> = vec![]; for (i, e) in short_edges.iter().enumerate() { let other_label = self.verticies[e.other(i)].label; if other_label!= 0 && label!= other_label { let other_size = cc_sizes[&other_label]; if matches!( possible_labels.iter_mut().find(|x| x.label == other_label), None ) { possible_labels.push(LabelReference { size: other_size, label: other_label, edge_index: i, }) } } } if let Some(best_label) = possible_labels.iter().max_by_key(|x| x.size) { if best_label.label!= label { *reassign_map.entry(i).or_insert(0) = best_label.label; } } } for (vertex, label) in reassign_map { self.reassign(vertex, label, &mut cc_sizes); } for i in 0..self.verticies.len() { for &edge in self.verticies[i].edges.iter() { if self.active_edges[edge].edge_type(i) == EdgeType::Short && self.verticies[self.active_edges[edge].other(i)].label == self.verticies[i].label { self.active_edges[edge].active = true; } } } } fn recalculate_k_neighbourhood(&mut self, vertex_index: usize) { let mut edge_count: usize = 0; let mut edge_sum = 0.0; for &edge1 in self.verticies[vertex_index].edges.iter() { if self.edge_is_active(edge1) { let other = self.active_edges[edge1].other(vertex_index); for &edge2 in self.verticies[other].edges.iter() { if self.edge_is_active(edge2) { edge_sum += self.active_edges[edge2].length; edge_count += 1; } } } } self.verticies[vertex_index].local_mean = edge_sum / edge_count as f64; self.verticies[vertex_index].local_std_dev = local_std_deviation(vertex_index, &self.active_edges, &self.verticies) } pub fn recalculate_mean_with_k_neighbourhood(&mut self) { for v in 0..self.verticies.len() { self.recalculate_k_neighbourhood(v); self.verticies[v].label = 0; } self.mean_std_deviation = self.mean_std_deviation(); for v in 0..self.verticies.len() { for e in 0..self.verticies[v].edges.len() { let other = self.active_edges[self.verticies[v].edges[e]].other(v); for e2 in 0..self.verticies[other].edges.len() { let is_long = self.active_edges[self.verticies[other].edges[e2]].length > self.verticies[v].local_mean + self.mean_std_deviation; if is_long { self.active_edges[self.verticies[other].edges[e2]].active = false; self.mean_std_deviation = self.mean_std_deviation(); self.verticies[v].local_std_dev = local_std_deviation(v, &self.active_edges, &self.verticies); } } } } } fn mean_std_deviation(&self) -> f64 { self.verticies .iter() .fold(0.0, |acc, v| acc + v.local_std_dev) / self.verticies.len() as f64 } } #[derive(Clone, Debug)] pub struct Vertex { index: usize, pub point: Point, local_mean: f64, local_std_dev: f64, edges: Vec<usize>, pub label: usize, } impl Vertex { fn new(index: usize, point: Point, edges: Vec<usize>) -> Vertex { Vertex { index, point, local_mean: 0.0, local_std_dev: 0.0, edges, label: 0, } } } pub trait ToGraph { fn to_graph(&self, points: &[Point]) -> Graph; } impl ToGraph for Triangulation { fn to_graph(&self, points: &[Point]) -> Graph { let all_edges = all_edges(self, points); let mut verticies: Vec<Vertex> = vec![]; for (i, p) in points.iter().enumerate() { let vertex = Vertex::new(i, Point { x: p.x, y: p.y }, neighborhood(i, &all_edges)); verticies.push(vertex); } for v in verticies.iter_mut() { v.local_mean = local_mean(&all_edges, &v.edges) } for i in 0..points.len() { verticies[i].local_std_dev = local_std_deviation(i, &all_edges, &verticies); } let mut result = Graph { verticies, mean_std_deviation: 0.0, active_edges: all_edges, }; result.mean_std_deviation = result.mean_std_deviation(); for i in 0..result.active_edges.len() { result.active_edges[i].edge_type1 = result.calculate_type( &result.verticies[result.active_edges[i].vertex1], &result.verticies[result.active_edges[i].vertex2], ); result.active_edges[i].edge_type2 = result.calculate_type( &result.verticies[result.active_edges[i].vertex2], &result.verticies[result.active_edges[i].vertex1], ); } result } } fn distance(p1: &Vertex, p2: &Vertex) -> f64 { ((p1.point.x - p2.point.x).powi(2) + (p1.point.y - p2.point.y).powi(2)).sqrt() } fn distance_point(p1: &Point, p2: &Point) -> f64 { ((p1.x - p2.x).powi(2) + (p1.y - p2.y).powi(2)).sqrt() } pub fn all_edges(graph: &Triangulation, points: &[Point]) -> Vec<Edge> { let mut result: Vec<Edge> = vec![]; for t in graph.triangles.iter().batching(|it| match it.next() { None => None, Some(x) => match it.next() { None => None, Some(y) => match it.next() { None => None, Some(z) => Some((*x, *y, *z)), }, }, }) { let e1 = Edge::new( t.0, t.1, EdgeType::Empty, EdgeType::Empty, distance_point(&points[t.0], &points[t.1]), ); let e2 = Edge::new( t.1, t.2, EdgeType::Empty, EdgeType::Empty, distance_point(&points[t.1], &points[t.2]), ); let e3 = Edge::new( t.2, t.0, EdgeType::Empty, EdgeType::Empty, distance_point(&points[t.2], &points[t.0]), ); if!result.contains(&e1) { result.push(e1); } if!result.contains(&e2) { result.push(e2); } if!result.contains(&e3) { result.push(e3); } } result } pub fn edges(point_index: usize, all_edges: &[Edge]) -> Vec<usize> { let mut result: Vec<usize> = vec![]; for (i, edge) in all_edges.iter().enumerate() { if edge.vertex1 == point_index || edge.vertex2 == point_index { result.push(i); } } result } fn neighborhood(point_index: usize, all_edges: &[Edge]) -> Vec<usize> { edges(point_index, all_edges) } pub fn local_mean(edges: &[Edge], edge_indicies: &[usize]) -> f64 { let mut result = 0.0; for index in edge_indicies { result += edges[*index].length; } result / (edge_indicies.len() as f64) } fn local_std_deviation(point_index: usize, edges: &[Edge], points: &[Vertex]) -> f64 { let mut result = 0.0; let current_vertex = &points[point_index]; for edge in current_vertex .edges .iter() .map(|x| &edges[*x]) .filter(|x| x.active) { result += (current_vertex.local_mean - edge.length).powi(2) / (current_vertex .edges .iter() .map(|x| &edges[*x]) .filter(|x| x.active) .count() as f64); } result.sqrt() }
LabelReference
identifier_name
garmin_util.rs
use anyhow::{format_err, Error}; use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine}; use fitparser::Value; use flate2::{read::GzEncoder, Compression}; use futures::{Stream, TryStreamExt}; use log::{debug, error}; use num_traits::pow::Pow; use postgres_query::{query, Error as PqError}; use rand::{ distributions::{Alphanumeric, Distribution, Uniform}, thread_rng, Rng, }; use smallvec::SmallVec; use stack_string::{format_sstr, StackString}; use std::{ collections::HashSet, convert::TryInto, fs::{remove_file, File}, future::Future, io::{BufRead, BufReader, Read}, path::{Path, PathBuf}, }; use subprocess::{Exec, Redirection}; use time::{format_description::well_known::Rfc3339, macros::date, Date, Month, OffsetDateTime}; use time_tz::{timezones::db::UTC, OffsetDateTimeExt}; use tokio::time::{sleep, Duration}; use crate::common::pgpool::PgPool; pub const METERS_PER_MILE: f64 = 1609.344; pub const MARATHON_DISTANCE_M: i32 = 42195; pub const MARATHON_DISTANCE_MI: f64 = MARATHON_DISTANCE_M as f64 / METERS_PER_MILE; pub const MONTH_NAMES: [&str; 12] = [ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", ]; pub const WEEKDAY_NAMES: [&str; 7] = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]; /// # Errors /// Return error if parsing time string fails pub fn convert_time_string(time_str: &str) -> Result<f64, Error> { let entries: SmallVec<[&str; 3]> = time_str.split(':').take(3).collect(); let (h, m, s): (i32, i32, f64) = match entries.first() { Some(h) => match entries.get(1) { Some(m) => match entries.get(2) { Some(s) => (h.parse()?, m.parse()?, s.parse()?), None => (h.parse()?, m.parse()?, 0.), }, None => (h.parse()?, 0, 0.), }, None => (0, 0, 0.), }; Ok(s + 60.0 * (f64::from(m) + 60.0 * f64::from(h))) } /// # Errors /// Return error if parsing time string fails pub fn convert_xml_local_time_to_utc(xml_local_time: &str) -> Result<OffsetDateTime, Error> { OffsetDateTime::parse(xml_local_time, &Rfc3339) .map(|x| x.to_timezone(UTC)) .map_err(Into::into) } /// # Errors /// Return error if running `md5sum` fails pub fn get_md5sum(filename: &Path) -> Result<StackString, Error> { if!Path::new("/usr/bin/md5sum").exists() { return Err(format_err!( "md5sum not installed (or not present at /usr/bin/md5sum)" )); } let command = format_sstr!("md5sum {}", filename.to_string_lossy()); let stream = Exec::shell(command).stream_stdout()?; let reader = BufReader::new(stream); if let Some(line) = reader.lines().next() { if let Some(entry) = line?.split_whitespace().next() { return Ok(entry.into()); } } Ok("".into()) } /// # Errors /// Return error if second is negative pub fn print_h_m_s(second: f64, do_hours: bool) -> Result<StackString, Error> { let hours = (second / 3600.0) as i32; let minutes = (second / 60.0) as i32 - hours * 60; let seconds = second as i32 - minutes * 60 - hours * 3600; if (hours > 0) | ((hours == 0) & do_hours) { Ok(format_sstr!("{hours:02}:{minutes:02}:{seconds:02}")) } else if hours == 0 { Ok(format_sstr!("{minutes:02}:{seconds:02}")) } else { Err(format_err!("Negative result!")) } }
#[must_use] pub fn days_in_year(year: i32) -> i64 { (Date::from_calendar_date(year + 1, Month::January, 1).unwrap_or(date!(1969 - 01 - 01)) - Date::from_calendar_date(year, Month::January, 1).unwrap_or(date!(1969 - 01 - 01))) .whole_days() } #[must_use] pub fn days_in_month(year: i32, month: u32) -> i64 { let mut y1 = year; let mut m1 = month + 1; if m1 == 13 { y1 += 1; m1 = 1; } let month: Month = (month as u8).try_into().unwrap_or(Month::January); let m1: Month = (m1 as u8).try_into().unwrap_or(Month::January); (Date::from_calendar_date(y1, m1, 1).unwrap_or(date!(1969 - 01 - 01)) - Date::from_calendar_date(year, month, 1).unwrap_or(date!(1969 - 01 - 01))) .whole_days() } #[must_use] pub fn expected_calories(weight: f64, pace_min_per_mile: f64, distance: f64) -> f64 { let cal_per_mi = weight * (0.0395 + 0.003_27 * (60. / pace_min_per_mile) + 0.000_455 * (60. / pace_min_per_mile).pow(2.0) + 0.000_801 * ((weight / 154.0) * 0.425 / weight * (60. / pace_min_per_mile).pow(3.0)) * 60. / (60. / pace_min_per_mile)); cal_per_mi * distance } #[must_use] pub fn titlecase(input: &str) -> StackString { if input.is_empty() { "".into() } else { let firstchar = input[0..1].to_uppercase(); format_sstr!("{firstchar}{s}", s = &input[1..]) } } #[must_use] pub fn generate_random_string(nchar: usize) -> StackString { let mut rng = thread_rng(); Alphanumeric .sample_iter(&mut rng) .take(nchar) .map(Into::into) .collect() } #[must_use] pub fn get_file_list(path: &Path) -> Vec<PathBuf> { match path.read_dir() { Ok(it) => it .filter_map(|dir_line| match dir_line { Ok(entry) => Some(entry.path()), Err(_) => None, }) .collect(), Err(err) => { debug!("{}", err); Vec::new() } } } /// # Errors /// Return error if closure fails pub async fn exponential_retry<T, U, F>(f: T) -> Result<U, Error> where T: Fn() -> F, F: Future<Output = Result<U, Error>>, { let mut timeout: f64 = 1.0; let range = Uniform::from(0..1000); loop { match f().await { Ok(resp) => return Ok(resp), Err(err) => { sleep(Duration::from_millis((timeout * 1000.0) as u64)).await; timeout *= 4.0 * f64::from(range.sample(&mut thread_rng())) / 1000.0; if timeout >= 64.0 { return Err(err); } } } } } fn extract_zip(filename: &Path, ziptmpdir: &Path) -> Result<(), Error> { if!Path::new("/usr/bin/unzip").exists() { return Err(format_err!( "md5sum not installed (or not present at /usr/bin/unzip" )); } let command = format_sstr!( "unzip {} -d {}", filename.to_string_lossy(), ziptmpdir.to_string_lossy() ); let mut process = Exec::shell(command).stdout(Redirection::Pipe).popen()?; let exit_status = process.wait()?; if!exit_status.success() { if let Some(mut f) = process.stdout.as_ref() { let mut buf = String::new(); f.read_to_string(&mut buf)?; error!("{}", buf); } return Err(format_err!("Failed with exit status {exit_status:?}")); } Ok(()) } /// # Errors /// Return error if unzip fails pub fn extract_zip_from_garmin_connect( filename: &Path, ziptmpdir: &Path, ) -> Result<PathBuf, Error> { extract_zip(filename, ziptmpdir)?; let new_filename = filename .file_stem() .ok_or_else(|| format_err!("Bad filename {}", filename.to_string_lossy()))?; let new_filename = format_sstr!("{}_ACTIVITY.fit", new_filename.to_string_lossy()); let new_filename = ziptmpdir.join(new_filename); if!new_filename.exists() { return Err(format_err!("Activity file not found")); } remove_file(filename)?; Ok(new_filename) } /// # Errors /// Return error if unzip fails pub fn extract_zip_from_garmin_connect_multiple( filename: &Path, ziptmpdir: &Path, ) -> Result<Vec<PathBuf>, Error> { extract_zip(filename, ziptmpdir)?; if!Path::new("/usr/bin/unzip").exists() { return Err(format_err!( "unzip not installed (or not present at /usr/bin/unzip" )); } let mut files = Vec::new(); for entry in ziptmpdir.read_dir()? { let entry = entry?; files.push(entry.path()); } if!files.is_empty() { remove_file(filename)?; } Ok(files) } /// # Errors /// Return error if: /// * input file does not exist /// * opening it fails /// * creating the output file fails /// * writing to the file fails pub fn gzip_file<T, U>(input_filename: T, output_filename: U) -> Result<(), Error> where T: AsRef<Path>, U: AsRef<Path>, { let input_filename = input_filename.as_ref(); let output_filename = output_filename.as_ref(); if!input_filename.exists() { return Err(format_err!("File {input_filename:?} does not exist")); } std::io::copy( &mut GzEncoder::new(File::open(input_filename)?, Compression::fast()), &mut File::create(output_filename)?, )?; Ok(()) } #[must_use] pub fn get_f64(value: &Value) -> Option<f64> { match value { Value::Timestamp(val) => Some(val.unix_timestamp() as f64), Value::Byte(val) | Value::Enum(val) | Value::UInt8(val) | Value::UInt8z(val) => { Some(f64::from(*val)) } Value::SInt8(val) => Some(f64::from(*val)), Value::SInt16(val) => Some(f64::from(*val)), Value::UInt16(val) | Value::UInt16z(val) => Some(f64::from(*val)), Value::SInt32(val) => Some(f64::from(*val)), Value::UInt32(val) | Value::UInt32z(val) => Some(f64::from(*val)), Value::SInt64(val) => Some(*val as f64), Value::UInt64(val) | Value::UInt64z(val) => Some(*val as f64), Value::Float32(val) => Some(f64::from(*val)), Value::Float64(val) => Some(*val), _ => None, } } #[must_use] pub fn get_i64(value: &Value) -> Option<i64> { match value { Value::Timestamp(val) => Some(val.unix_timestamp()), Value::Byte(val) | Value::Enum(val) | Value::UInt8(val) | Value::UInt8z(val) => { Some(i64::from(*val)) } Value::SInt8(val) => Some(i64::from(*val)), Value::SInt16(val) => Some(i64::from(*val)), Value::UInt16(val) | Value::UInt16z(val) => Some(i64::from(*val)), Value::SInt32(val) => Some(i64::from(*val)), Value::UInt32(val) | Value::UInt32z(val) => Some(i64::from(*val)), Value::SInt64(val) => Some(*val), Value::UInt64(val) | Value::UInt64z(val) => Some(*val as i64), Value::Float32(val) => Some(*val as i64), Value::Float64(val) => Some(*val as i64), _ => None, } } #[inline] #[must_use] pub fn get_degrees_from_semicircles(s: f64) -> f64 { s * 180.0 / (2_147_483_648.0) } /// # Errors /// Return error if db query fails pub async fn get_authorized_users(pool: &PgPool) -> Result<HashSet<StackString>, Error> { let query = query!("SELECT email FROM authorized_users"); let conn = pool.get().await?; query .query_streaming(&conn) .await? .and_then(|row| async move { let email: StackString = row.try_get(0).map_err(PqError::BeginTransaction)?; Ok(email) }) .try_collect() .await .map_err(Into::into) } /// # Errors /// Return error if db query fails pub async fn get_list_of_telegram_userids( pool: &PgPool, ) -> Result<impl Stream<Item = Result<i64, PqError>>, Error> { let query = query!( " SELECT distinct telegram_userid FROM authorized_users WHERE telegram_userid IS NOT NULL " ); let conn = pool.get().await?; query .query_streaming(&conn) .await .map(|stream| { stream.and_then(|row| async move { let telegram_id: i64 = row .try_get("telegram_userid") .map_err(PqError::BeginTransaction)?; Ok(telegram_id) }) }) .map_err(Into::into) } #[must_use] pub fn get_random_string() -> StackString { let random_bytes: SmallVec<[u8; 16]> = (0..16).map(|_| thread_rng().gen::<u8>()).collect(); URL_SAFE_NO_PAD.encode(&random_bytes).into() }
random_line_split
garmin_util.rs
use anyhow::{format_err, Error}; use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine}; use fitparser::Value; use flate2::{read::GzEncoder, Compression}; use futures::{Stream, TryStreamExt}; use log::{debug, error}; use num_traits::pow::Pow; use postgres_query::{query, Error as PqError}; use rand::{ distributions::{Alphanumeric, Distribution, Uniform}, thread_rng, Rng, }; use smallvec::SmallVec; use stack_string::{format_sstr, StackString}; use std::{ collections::HashSet, convert::TryInto, fs::{remove_file, File}, future::Future, io::{BufRead, BufReader, Read}, path::{Path, PathBuf}, }; use subprocess::{Exec, Redirection}; use time::{format_description::well_known::Rfc3339, macros::date, Date, Month, OffsetDateTime}; use time_tz::{timezones::db::UTC, OffsetDateTimeExt}; use tokio::time::{sleep, Duration}; use crate::common::pgpool::PgPool; pub const METERS_PER_MILE: f64 = 1609.344; pub const MARATHON_DISTANCE_M: i32 = 42195; pub const MARATHON_DISTANCE_MI: f64 = MARATHON_DISTANCE_M as f64 / METERS_PER_MILE; pub const MONTH_NAMES: [&str; 12] = [ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", ]; pub const WEEKDAY_NAMES: [&str; 7] = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]; /// # Errors /// Return error if parsing time string fails pub fn convert_time_string(time_str: &str) -> Result<f64, Error> { let entries: SmallVec<[&str; 3]> = time_str.split(':').take(3).collect(); let (h, m, s): (i32, i32, f64) = match entries.first() { Some(h) => match entries.get(1) { Some(m) => match entries.get(2) { Some(s) => (h.parse()?, m.parse()?, s.parse()?), None => (h.parse()?, m.parse()?, 0.), }, None => (h.parse()?, 0, 0.), }, None => (0, 0, 0.), }; Ok(s + 60.0 * (f64::from(m) + 60.0 * f64::from(h))) } /// # Errors /// Return error if parsing time string fails pub fn convert_xml_local_time_to_utc(xml_local_time: &str) -> Result<OffsetDateTime, Error> { OffsetDateTime::parse(xml_local_time, &Rfc3339) .map(|x| x.to_timezone(UTC)) .map_err(Into::into) } /// # Errors /// Return error if running `md5sum` fails pub fn get_md5sum(filename: &Path) -> Result<StackString, Error> { if!Path::new("/usr/bin/md5sum").exists() { return Err(format_err!( "md5sum not installed (or not present at /usr/bin/md5sum)" )); } let command = format_sstr!("md5sum {}", filename.to_string_lossy()); let stream = Exec::shell(command).stream_stdout()?; let reader = BufReader::new(stream); if let Some(line) = reader.lines().next() { if let Some(entry) = line?.split_whitespace().next() { return Ok(entry.into()); } } Ok("".into()) } /// # Errors /// Return error if second is negative pub fn print_h_m_s(second: f64, do_hours: bool) -> Result<StackString, Error> { let hours = (second / 3600.0) as i32; let minutes = (second / 60.0) as i32 - hours * 60; let seconds = second as i32 - minutes * 60 - hours * 3600; if (hours > 0) | ((hours == 0) & do_hours) { Ok(format_sstr!("{hours:02}:{minutes:02}:{seconds:02}")) } else if hours == 0 { Ok(format_sstr!("{minutes:02}:{seconds:02}")) } else { Err(format_err!("Negative result!")) } } #[must_use] pub fn days_in_year(year: i32) -> i64 { (Date::from_calendar_date(year + 1, Month::January, 1).unwrap_or(date!(1969 - 01 - 01)) - Date::from_calendar_date(year, Month::January, 1).unwrap_or(date!(1969 - 01 - 01))) .whole_days() } #[must_use] pub fn days_in_month(year: i32, month: u32) -> i64
#[must_use] pub fn expected_calories(weight: f64, pace_min_per_mile: f64, distance: f64) -> f64 { let cal_per_mi = weight * (0.0395 + 0.003_27 * (60. / pace_min_per_mile) + 0.000_455 * (60. / pace_min_per_mile).pow(2.0) + 0.000_801 * ((weight / 154.0) * 0.425 / weight * (60. / pace_min_per_mile).pow(3.0)) * 60. / (60. / pace_min_per_mile)); cal_per_mi * distance } #[must_use] pub fn titlecase(input: &str) -> StackString { if input.is_empty() { "".into() } else { let firstchar = input[0..1].to_uppercase(); format_sstr!("{firstchar}{s}", s = &input[1..]) } } #[must_use] pub fn generate_random_string(nchar: usize) -> StackString { let mut rng = thread_rng(); Alphanumeric .sample_iter(&mut rng) .take(nchar) .map(Into::into) .collect() } #[must_use] pub fn get_file_list(path: &Path) -> Vec<PathBuf> { match path.read_dir() { Ok(it) => it .filter_map(|dir_line| match dir_line { Ok(entry) => Some(entry.path()), Err(_) => None, }) .collect(), Err(err) => { debug!("{}", err); Vec::new() } } } /// # Errors /// Return error if closure fails pub async fn exponential_retry<T, U, F>(f: T) -> Result<U, Error> where T: Fn() -> F, F: Future<Output = Result<U, Error>>, { let mut timeout: f64 = 1.0; let range = Uniform::from(0..1000); loop { match f().await { Ok(resp) => return Ok(resp), Err(err) => { sleep(Duration::from_millis((timeout * 1000.0) as u64)).await; timeout *= 4.0 * f64::from(range.sample(&mut thread_rng())) / 1000.0; if timeout >= 64.0 { return Err(err); } } } } } fn extract_zip(filename: &Path, ziptmpdir: &Path) -> Result<(), Error> { if!Path::new("/usr/bin/unzip").exists() { return Err(format_err!( "md5sum not installed (or not present at /usr/bin/unzip" )); } let command = format_sstr!( "unzip {} -d {}", filename.to_string_lossy(), ziptmpdir.to_string_lossy() ); let mut process = Exec::shell(command).stdout(Redirection::Pipe).popen()?; let exit_status = process.wait()?; if!exit_status.success() { if let Some(mut f) = process.stdout.as_ref() { let mut buf = String::new(); f.read_to_string(&mut buf)?; error!("{}", buf); } return Err(format_err!("Failed with exit status {exit_status:?}")); } Ok(()) } /// # Errors /// Return error if unzip fails pub fn extract_zip_from_garmin_connect( filename: &Path, ziptmpdir: &Path, ) -> Result<PathBuf, Error> { extract_zip(filename, ziptmpdir)?; let new_filename = filename .file_stem() .ok_or_else(|| format_err!("Bad filename {}", filename.to_string_lossy()))?; let new_filename = format_sstr!("{}_ACTIVITY.fit", new_filename.to_string_lossy()); let new_filename = ziptmpdir.join(new_filename); if!new_filename.exists() { return Err(format_err!("Activity file not found")); } remove_file(filename)?; Ok(new_filename) } /// # Errors /// Return error if unzip fails pub fn extract_zip_from_garmin_connect_multiple( filename: &Path, ziptmpdir: &Path, ) -> Result<Vec<PathBuf>, Error> { extract_zip(filename, ziptmpdir)?; if!Path::new("/usr/bin/unzip").exists() { return Err(format_err!( "unzip not installed (or not present at /usr/bin/unzip" )); } let mut files = Vec::new(); for entry in ziptmpdir.read_dir()? { let entry = entry?; files.push(entry.path()); } if!files.is_empty() { remove_file(filename)?; } Ok(files) } /// # Errors /// Return error if: /// * input file does not exist /// * opening it fails /// * creating the output file fails /// * writing to the file fails pub fn gzip_file<T, U>(input_filename: T, output_filename: U) -> Result<(), Error> where T: AsRef<Path>, U: AsRef<Path>, { let input_filename = input_filename.as_ref(); let output_filename = output_filename.as_ref(); if!input_filename.exists() { return Err(format_err!("File {input_filename:?} does not exist")); } std::io::copy( &mut GzEncoder::new(File::open(input_filename)?, Compression::fast()), &mut File::create(output_filename)?, )?; Ok(()) } #[must_use] pub fn get_f64(value: &Value) -> Option<f64> { match value { Value::Timestamp(val) => Some(val.unix_timestamp() as f64), Value::Byte(val) | Value::Enum(val) | Value::UInt8(val) | Value::UInt8z(val) => { Some(f64::from(*val)) } Value::SInt8(val) => Some(f64::from(*val)), Value::SInt16(val) => Some(f64::from(*val)), Value::UInt16(val) | Value::UInt16z(val) => Some(f64::from(*val)), Value::SInt32(val) => Some(f64::from(*val)), Value::UInt32(val) | Value::UInt32z(val) => Some(f64::from(*val)), Value::SInt64(val) => Some(*val as f64), Value::UInt64(val) | Value::UInt64z(val) => Some(*val as f64), Value::Float32(val) => Some(f64::from(*val)), Value::Float64(val) => Some(*val), _ => None, } } #[must_use] pub fn get_i64(value: &Value) -> Option<i64> { match value { Value::Timestamp(val) => Some(val.unix_timestamp()), Value::Byte(val) | Value::Enum(val) | Value::UInt8(val) | Value::UInt8z(val) => { Some(i64::from(*val)) } Value::SInt8(val) => Some(i64::from(*val)), Value::SInt16(val) => Some(i64::from(*val)), Value::UInt16(val) | Value::UInt16z(val) => Some(i64::from(*val)), Value::SInt32(val) => Some(i64::from(*val)), Value::UInt32(val) | Value::UInt32z(val) => Some(i64::from(*val)), Value::SInt64(val) => Some(*val), Value::UInt64(val) | Value::UInt64z(val) => Some(*val as i64), Value::Float32(val) => Some(*val as i64), Value::Float64(val) => Some(*val as i64), _ => None, } } #[inline] #[must_use] pub fn get_degrees_from_semicircles(s: f64) -> f64 { s * 180.0 / (2_147_483_648.0) } /// # Errors /// Return error if db query fails pub async fn get_authorized_users(pool: &PgPool) -> Result<HashSet<StackString>, Error> { let query = query!("SELECT email FROM authorized_users"); let conn = pool.get().await?; query .query_streaming(&conn) .await? .and_then(|row| async move { let email: StackString = row.try_get(0).map_err(PqError::BeginTransaction)?; Ok(email) }) .try_collect() .await .map_err(Into::into) } /// # Errors /// Return error if db query fails pub async fn get_list_of_telegram_userids( pool: &PgPool, ) -> Result<impl Stream<Item = Result<i64, PqError>>, Error> { let query = query!( " SELECT distinct telegram_userid FROM authorized_users WHERE telegram_userid IS NOT NULL " ); let conn = pool.get().await?; query .query_streaming(&conn) .await .map(|stream| { stream.and_then(|row| async move { let telegram_id: i64 = row .try_get("telegram_userid") .map_err(PqError::BeginTransaction)?; Ok(telegram_id) }) }) .map_err(Into::into) } #[must_use] pub fn get_random_string() -> StackString { let random_bytes: SmallVec<[u8; 16]> = (0..16).map(|_| thread_rng().gen::<u8>()).collect(); URL_SAFE_NO_PAD.encode(&random_bytes).into() }
{ let mut y1 = year; let mut m1 = month + 1; if m1 == 13 { y1 += 1; m1 = 1; } let month: Month = (month as u8).try_into().unwrap_or(Month::January); let m1: Month = (m1 as u8).try_into().unwrap_or(Month::January); (Date::from_calendar_date(y1, m1, 1).unwrap_or(date!(1969 - 01 - 01)) - Date::from_calendar_date(year, month, 1).unwrap_or(date!(1969 - 01 - 01))) .whole_days() }
identifier_body
garmin_util.rs
use anyhow::{format_err, Error}; use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine}; use fitparser::Value; use flate2::{read::GzEncoder, Compression}; use futures::{Stream, TryStreamExt}; use log::{debug, error}; use num_traits::pow::Pow; use postgres_query::{query, Error as PqError}; use rand::{ distributions::{Alphanumeric, Distribution, Uniform}, thread_rng, Rng, }; use smallvec::SmallVec; use stack_string::{format_sstr, StackString}; use std::{ collections::HashSet, convert::TryInto, fs::{remove_file, File}, future::Future, io::{BufRead, BufReader, Read}, path::{Path, PathBuf}, }; use subprocess::{Exec, Redirection}; use time::{format_description::well_known::Rfc3339, macros::date, Date, Month, OffsetDateTime}; use time_tz::{timezones::db::UTC, OffsetDateTimeExt}; use tokio::time::{sleep, Duration}; use crate::common::pgpool::PgPool; pub const METERS_PER_MILE: f64 = 1609.344; pub const MARATHON_DISTANCE_M: i32 = 42195; pub const MARATHON_DISTANCE_MI: f64 = MARATHON_DISTANCE_M as f64 / METERS_PER_MILE; pub const MONTH_NAMES: [&str; 12] = [ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", ]; pub const WEEKDAY_NAMES: [&str; 7] = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]; /// # Errors /// Return error if parsing time string fails pub fn convert_time_string(time_str: &str) -> Result<f64, Error> { let entries: SmallVec<[&str; 3]> = time_str.split(':').take(3).collect(); let (h, m, s): (i32, i32, f64) = match entries.first() { Some(h) => match entries.get(1) { Some(m) => match entries.get(2) { Some(s) => (h.parse()?, m.parse()?, s.parse()?), None => (h.parse()?, m.parse()?, 0.), }, None => (h.parse()?, 0, 0.), }, None => (0, 0, 0.), }; Ok(s + 60.0 * (f64::from(m) + 60.0 * f64::from(h))) } /// # Errors /// Return error if parsing time string fails pub fn convert_xml_local_time_to_utc(xml_local_time: &str) -> Result<OffsetDateTime, Error> { OffsetDateTime::parse(xml_local_time, &Rfc3339) .map(|x| x.to_timezone(UTC)) .map_err(Into::into) } /// # Errors /// Return error if running `md5sum` fails pub fn get_md5sum(filename: &Path) -> Result<StackString, Error> { if!Path::new("/usr/bin/md5sum").exists() { return Err(format_err!( "md5sum not installed (or not present at /usr/bin/md5sum)" )); } let command = format_sstr!("md5sum {}", filename.to_string_lossy()); let stream = Exec::shell(command).stream_stdout()?; let reader = BufReader::new(stream); if let Some(line) = reader.lines().next() { if let Some(entry) = line?.split_whitespace().next() { return Ok(entry.into()); } } Ok("".into()) } /// # Errors /// Return error if second is negative pub fn print_h_m_s(second: f64, do_hours: bool) -> Result<StackString, Error> { let hours = (second / 3600.0) as i32; let minutes = (second / 60.0) as i32 - hours * 60; let seconds = second as i32 - minutes * 60 - hours * 3600; if (hours > 0) | ((hours == 0) & do_hours) { Ok(format_sstr!("{hours:02}:{minutes:02}:{seconds:02}")) } else if hours == 0 { Ok(format_sstr!("{minutes:02}:{seconds:02}")) } else { Err(format_err!("Negative result!")) } } #[must_use] pub fn days_in_year(year: i32) -> i64 { (Date::from_calendar_date(year + 1, Month::January, 1).unwrap_or(date!(1969 - 01 - 01)) - Date::from_calendar_date(year, Month::January, 1).unwrap_or(date!(1969 - 01 - 01))) .whole_days() } #[must_use] pub fn days_in_month(year: i32, month: u32) -> i64 { let mut y1 = year; let mut m1 = month + 1; if m1 == 13 { y1 += 1; m1 = 1; } let month: Month = (month as u8).try_into().unwrap_or(Month::January); let m1: Month = (m1 as u8).try_into().unwrap_or(Month::January); (Date::from_calendar_date(y1, m1, 1).unwrap_or(date!(1969 - 01 - 01)) - Date::from_calendar_date(year, month, 1).unwrap_or(date!(1969 - 01 - 01))) .whole_days() } #[must_use] pub fn expected_calories(weight: f64, pace_min_per_mile: f64, distance: f64) -> f64 { let cal_per_mi = weight * (0.0395 + 0.003_27 * (60. / pace_min_per_mile) + 0.000_455 * (60. / pace_min_per_mile).pow(2.0) + 0.000_801 * ((weight / 154.0) * 0.425 / weight * (60. / pace_min_per_mile).pow(3.0)) * 60. / (60. / pace_min_per_mile)); cal_per_mi * distance } #[must_use] pub fn titlecase(input: &str) -> StackString { if input.is_empty() { "".into() } else { let firstchar = input[0..1].to_uppercase(); format_sstr!("{firstchar}{s}", s = &input[1..]) } } #[must_use] pub fn generate_random_string(nchar: usize) -> StackString { let mut rng = thread_rng(); Alphanumeric .sample_iter(&mut rng) .take(nchar) .map(Into::into) .collect() } #[must_use] pub fn get_file_list(path: &Path) -> Vec<PathBuf> { match path.read_dir() { Ok(it) => it .filter_map(|dir_line| match dir_line { Ok(entry) => Some(entry.path()), Err(_) => None, }) .collect(), Err(err) => { debug!("{}", err); Vec::new() } } } /// # Errors /// Return error if closure fails pub async fn
<T, U, F>(f: T) -> Result<U, Error> where T: Fn() -> F, F: Future<Output = Result<U, Error>>, { let mut timeout: f64 = 1.0; let range = Uniform::from(0..1000); loop { match f().await { Ok(resp) => return Ok(resp), Err(err) => { sleep(Duration::from_millis((timeout * 1000.0) as u64)).await; timeout *= 4.0 * f64::from(range.sample(&mut thread_rng())) / 1000.0; if timeout >= 64.0 { return Err(err); } } } } } fn extract_zip(filename: &Path, ziptmpdir: &Path) -> Result<(), Error> { if!Path::new("/usr/bin/unzip").exists() { return Err(format_err!( "md5sum not installed (or not present at /usr/bin/unzip" )); } let command = format_sstr!( "unzip {} -d {}", filename.to_string_lossy(), ziptmpdir.to_string_lossy() ); let mut process = Exec::shell(command).stdout(Redirection::Pipe).popen()?; let exit_status = process.wait()?; if!exit_status.success() { if let Some(mut f) = process.stdout.as_ref() { let mut buf = String::new(); f.read_to_string(&mut buf)?; error!("{}", buf); } return Err(format_err!("Failed with exit status {exit_status:?}")); } Ok(()) } /// # Errors /// Return error if unzip fails pub fn extract_zip_from_garmin_connect( filename: &Path, ziptmpdir: &Path, ) -> Result<PathBuf, Error> { extract_zip(filename, ziptmpdir)?; let new_filename = filename .file_stem() .ok_or_else(|| format_err!("Bad filename {}", filename.to_string_lossy()))?; let new_filename = format_sstr!("{}_ACTIVITY.fit", new_filename.to_string_lossy()); let new_filename = ziptmpdir.join(new_filename); if!new_filename.exists() { return Err(format_err!("Activity file not found")); } remove_file(filename)?; Ok(new_filename) } /// # Errors /// Return error if unzip fails pub fn extract_zip_from_garmin_connect_multiple( filename: &Path, ziptmpdir: &Path, ) -> Result<Vec<PathBuf>, Error> { extract_zip(filename, ziptmpdir)?; if!Path::new("/usr/bin/unzip").exists() { return Err(format_err!( "unzip not installed (or not present at /usr/bin/unzip" )); } let mut files = Vec::new(); for entry in ziptmpdir.read_dir()? { let entry = entry?; files.push(entry.path()); } if!files.is_empty() { remove_file(filename)?; } Ok(files) } /// # Errors /// Return error if: /// * input file does not exist /// * opening it fails /// * creating the output file fails /// * writing to the file fails pub fn gzip_file<T, U>(input_filename: T, output_filename: U) -> Result<(), Error> where T: AsRef<Path>, U: AsRef<Path>, { let input_filename = input_filename.as_ref(); let output_filename = output_filename.as_ref(); if!input_filename.exists() { return Err(format_err!("File {input_filename:?} does not exist")); } std::io::copy( &mut GzEncoder::new(File::open(input_filename)?, Compression::fast()), &mut File::create(output_filename)?, )?; Ok(()) } #[must_use] pub fn get_f64(value: &Value) -> Option<f64> { match value { Value::Timestamp(val) => Some(val.unix_timestamp() as f64), Value::Byte(val) | Value::Enum(val) | Value::UInt8(val) | Value::UInt8z(val) => { Some(f64::from(*val)) } Value::SInt8(val) => Some(f64::from(*val)), Value::SInt16(val) => Some(f64::from(*val)), Value::UInt16(val) | Value::UInt16z(val) => Some(f64::from(*val)), Value::SInt32(val) => Some(f64::from(*val)), Value::UInt32(val) | Value::UInt32z(val) => Some(f64::from(*val)), Value::SInt64(val) => Some(*val as f64), Value::UInt64(val) | Value::UInt64z(val) => Some(*val as f64), Value::Float32(val) => Some(f64::from(*val)), Value::Float64(val) => Some(*val), _ => None, } } #[must_use] pub fn get_i64(value: &Value) -> Option<i64> { match value { Value::Timestamp(val) => Some(val.unix_timestamp()), Value::Byte(val) | Value::Enum(val) | Value::UInt8(val) | Value::UInt8z(val) => { Some(i64::from(*val)) } Value::SInt8(val) => Some(i64::from(*val)), Value::SInt16(val) => Some(i64::from(*val)), Value::UInt16(val) | Value::UInt16z(val) => Some(i64::from(*val)), Value::SInt32(val) => Some(i64::from(*val)), Value::UInt32(val) | Value::UInt32z(val) => Some(i64::from(*val)), Value::SInt64(val) => Some(*val), Value::UInt64(val) | Value::UInt64z(val) => Some(*val as i64), Value::Float32(val) => Some(*val as i64), Value::Float64(val) => Some(*val as i64), _ => None, } } #[inline] #[must_use] pub fn get_degrees_from_semicircles(s: f64) -> f64 { s * 180.0 / (2_147_483_648.0) } /// # Errors /// Return error if db query fails pub async fn get_authorized_users(pool: &PgPool) -> Result<HashSet<StackString>, Error> { let query = query!("SELECT email FROM authorized_users"); let conn = pool.get().await?; query .query_streaming(&conn) .await? .and_then(|row| async move { let email: StackString = row.try_get(0).map_err(PqError::BeginTransaction)?; Ok(email) }) .try_collect() .await .map_err(Into::into) } /// # Errors /// Return error if db query fails pub async fn get_list_of_telegram_userids( pool: &PgPool, ) -> Result<impl Stream<Item = Result<i64, PqError>>, Error> { let query = query!( " SELECT distinct telegram_userid FROM authorized_users WHERE telegram_userid IS NOT NULL " ); let conn = pool.get().await?; query .query_streaming(&conn) .await .map(|stream| { stream.and_then(|row| async move { let telegram_id: i64 = row .try_get("telegram_userid") .map_err(PqError::BeginTransaction)?; Ok(telegram_id) }) }) .map_err(Into::into) } #[must_use] pub fn get_random_string() -> StackString { let random_bytes: SmallVec<[u8; 16]> = (0..16).map(|_| thread_rng().gen::<u8>()).collect(); URL_SAFE_NO_PAD.encode(&random_bytes).into() }
exponential_retry
identifier_name
garmin_util.rs
use anyhow::{format_err, Error}; use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine}; use fitparser::Value; use flate2::{read::GzEncoder, Compression}; use futures::{Stream, TryStreamExt}; use log::{debug, error}; use num_traits::pow::Pow; use postgres_query::{query, Error as PqError}; use rand::{ distributions::{Alphanumeric, Distribution, Uniform}, thread_rng, Rng, }; use smallvec::SmallVec; use stack_string::{format_sstr, StackString}; use std::{ collections::HashSet, convert::TryInto, fs::{remove_file, File}, future::Future, io::{BufRead, BufReader, Read}, path::{Path, PathBuf}, }; use subprocess::{Exec, Redirection}; use time::{format_description::well_known::Rfc3339, macros::date, Date, Month, OffsetDateTime}; use time_tz::{timezones::db::UTC, OffsetDateTimeExt}; use tokio::time::{sleep, Duration}; use crate::common::pgpool::PgPool; pub const METERS_PER_MILE: f64 = 1609.344; pub const MARATHON_DISTANCE_M: i32 = 42195; pub const MARATHON_DISTANCE_MI: f64 = MARATHON_DISTANCE_M as f64 / METERS_PER_MILE; pub const MONTH_NAMES: [&str; 12] = [ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", ]; pub const WEEKDAY_NAMES: [&str; 7] = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]; /// # Errors /// Return error if parsing time string fails pub fn convert_time_string(time_str: &str) -> Result<f64, Error> { let entries: SmallVec<[&str; 3]> = time_str.split(':').take(3).collect(); let (h, m, s): (i32, i32, f64) = match entries.first() { Some(h) => match entries.get(1) { Some(m) => match entries.get(2) { Some(s) => (h.parse()?, m.parse()?, s.parse()?), None => (h.parse()?, m.parse()?, 0.), }, None => (h.parse()?, 0, 0.), }, None => (0, 0, 0.), }; Ok(s + 60.0 * (f64::from(m) + 60.0 * f64::from(h))) } /// # Errors /// Return error if parsing time string fails pub fn convert_xml_local_time_to_utc(xml_local_time: &str) -> Result<OffsetDateTime, Error> { OffsetDateTime::parse(xml_local_time, &Rfc3339) .map(|x| x.to_timezone(UTC)) .map_err(Into::into) } /// # Errors /// Return error if running `md5sum` fails pub fn get_md5sum(filename: &Path) -> Result<StackString, Error> { if!Path::new("/usr/bin/md5sum").exists() { return Err(format_err!( "md5sum not installed (or not present at /usr/bin/md5sum)" )); } let command = format_sstr!("md5sum {}", filename.to_string_lossy()); let stream = Exec::shell(command).stream_stdout()?; let reader = BufReader::new(stream); if let Some(line) = reader.lines().next() { if let Some(entry) = line?.split_whitespace().next() { return Ok(entry.into()); } } Ok("".into()) } /// # Errors /// Return error if second is negative pub fn print_h_m_s(second: f64, do_hours: bool) -> Result<StackString, Error> { let hours = (second / 3600.0) as i32; let minutes = (second / 60.0) as i32 - hours * 60; let seconds = second as i32 - minutes * 60 - hours * 3600; if (hours > 0) | ((hours == 0) & do_hours) { Ok(format_sstr!("{hours:02}:{minutes:02}:{seconds:02}")) } else if hours == 0 { Ok(format_sstr!("{minutes:02}:{seconds:02}")) } else { Err(format_err!("Negative result!")) } } #[must_use] pub fn days_in_year(year: i32) -> i64 { (Date::from_calendar_date(year + 1, Month::January, 1).unwrap_or(date!(1969 - 01 - 01)) - Date::from_calendar_date(year, Month::January, 1).unwrap_or(date!(1969 - 01 - 01))) .whole_days() } #[must_use] pub fn days_in_month(year: i32, month: u32) -> i64 { let mut y1 = year; let mut m1 = month + 1; if m1 == 13 { y1 += 1; m1 = 1; } let month: Month = (month as u8).try_into().unwrap_or(Month::January); let m1: Month = (m1 as u8).try_into().unwrap_or(Month::January); (Date::from_calendar_date(y1, m1, 1).unwrap_or(date!(1969 - 01 - 01)) - Date::from_calendar_date(year, month, 1).unwrap_or(date!(1969 - 01 - 01))) .whole_days() } #[must_use] pub fn expected_calories(weight: f64, pace_min_per_mile: f64, distance: f64) -> f64 { let cal_per_mi = weight * (0.0395 + 0.003_27 * (60. / pace_min_per_mile) + 0.000_455 * (60. / pace_min_per_mile).pow(2.0) + 0.000_801 * ((weight / 154.0) * 0.425 / weight * (60. / pace_min_per_mile).pow(3.0)) * 60. / (60. / pace_min_per_mile)); cal_per_mi * distance } #[must_use] pub fn titlecase(input: &str) -> StackString { if input.is_empty() { "".into() } else { let firstchar = input[0..1].to_uppercase(); format_sstr!("{firstchar}{s}", s = &input[1..]) } } #[must_use] pub fn generate_random_string(nchar: usize) -> StackString { let mut rng = thread_rng(); Alphanumeric .sample_iter(&mut rng) .take(nchar) .map(Into::into) .collect() } #[must_use] pub fn get_file_list(path: &Path) -> Vec<PathBuf> { match path.read_dir() { Ok(it) => it .filter_map(|dir_line| match dir_line { Ok(entry) => Some(entry.path()), Err(_) => None, }) .collect(), Err(err) => { debug!("{}", err); Vec::new() } } } /// # Errors /// Return error if closure fails pub async fn exponential_retry<T, U, F>(f: T) -> Result<U, Error> where T: Fn() -> F, F: Future<Output = Result<U, Error>>, { let mut timeout: f64 = 1.0; let range = Uniform::from(0..1000); loop { match f().await { Ok(resp) => return Ok(resp), Err(err) => { sleep(Duration::from_millis((timeout * 1000.0) as u64)).await; timeout *= 4.0 * f64::from(range.sample(&mut thread_rng())) / 1000.0; if timeout >= 64.0 { return Err(err); } } } } } fn extract_zip(filename: &Path, ziptmpdir: &Path) -> Result<(), Error> { if!Path::new("/usr/bin/unzip").exists() { return Err(format_err!( "md5sum not installed (or not present at /usr/bin/unzip" )); } let command = format_sstr!( "unzip {} -d {}", filename.to_string_lossy(), ziptmpdir.to_string_lossy() ); let mut process = Exec::shell(command).stdout(Redirection::Pipe).popen()?; let exit_status = process.wait()?; if!exit_status.success()
Ok(()) } /// # Errors /// Return error if unzip fails pub fn extract_zip_from_garmin_connect( filename: &Path, ziptmpdir: &Path, ) -> Result<PathBuf, Error> { extract_zip(filename, ziptmpdir)?; let new_filename = filename .file_stem() .ok_or_else(|| format_err!("Bad filename {}", filename.to_string_lossy()))?; let new_filename = format_sstr!("{}_ACTIVITY.fit", new_filename.to_string_lossy()); let new_filename = ziptmpdir.join(new_filename); if!new_filename.exists() { return Err(format_err!("Activity file not found")); } remove_file(filename)?; Ok(new_filename) } /// # Errors /// Return error if unzip fails pub fn extract_zip_from_garmin_connect_multiple( filename: &Path, ziptmpdir: &Path, ) -> Result<Vec<PathBuf>, Error> { extract_zip(filename, ziptmpdir)?; if!Path::new("/usr/bin/unzip").exists() { return Err(format_err!( "unzip not installed (or not present at /usr/bin/unzip" )); } let mut files = Vec::new(); for entry in ziptmpdir.read_dir()? { let entry = entry?; files.push(entry.path()); } if!files.is_empty() { remove_file(filename)?; } Ok(files) } /// # Errors /// Return error if: /// * input file does not exist /// * opening it fails /// * creating the output file fails /// * writing to the file fails pub fn gzip_file<T, U>(input_filename: T, output_filename: U) -> Result<(), Error> where T: AsRef<Path>, U: AsRef<Path>, { let input_filename = input_filename.as_ref(); let output_filename = output_filename.as_ref(); if!input_filename.exists() { return Err(format_err!("File {input_filename:?} does not exist")); } std::io::copy( &mut GzEncoder::new(File::open(input_filename)?, Compression::fast()), &mut File::create(output_filename)?, )?; Ok(()) } #[must_use] pub fn get_f64(value: &Value) -> Option<f64> { match value { Value::Timestamp(val) => Some(val.unix_timestamp() as f64), Value::Byte(val) | Value::Enum(val) | Value::UInt8(val) | Value::UInt8z(val) => { Some(f64::from(*val)) } Value::SInt8(val) => Some(f64::from(*val)), Value::SInt16(val) => Some(f64::from(*val)), Value::UInt16(val) | Value::UInt16z(val) => Some(f64::from(*val)), Value::SInt32(val) => Some(f64::from(*val)), Value::UInt32(val) | Value::UInt32z(val) => Some(f64::from(*val)), Value::SInt64(val) => Some(*val as f64), Value::UInt64(val) | Value::UInt64z(val) => Some(*val as f64), Value::Float32(val) => Some(f64::from(*val)), Value::Float64(val) => Some(*val), _ => None, } } #[must_use] pub fn get_i64(value: &Value) -> Option<i64> { match value { Value::Timestamp(val) => Some(val.unix_timestamp()), Value::Byte(val) | Value::Enum(val) | Value::UInt8(val) | Value::UInt8z(val) => { Some(i64::from(*val)) } Value::SInt8(val) => Some(i64::from(*val)), Value::SInt16(val) => Some(i64::from(*val)), Value::UInt16(val) | Value::UInt16z(val) => Some(i64::from(*val)), Value::SInt32(val) => Some(i64::from(*val)), Value::UInt32(val) | Value::UInt32z(val) => Some(i64::from(*val)), Value::SInt64(val) => Some(*val), Value::UInt64(val) | Value::UInt64z(val) => Some(*val as i64), Value::Float32(val) => Some(*val as i64), Value::Float64(val) => Some(*val as i64), _ => None, } } #[inline] #[must_use] pub fn get_degrees_from_semicircles(s: f64) -> f64 { s * 180.0 / (2_147_483_648.0) } /// # Errors /// Return error if db query fails pub async fn get_authorized_users(pool: &PgPool) -> Result<HashSet<StackString>, Error> { let query = query!("SELECT email FROM authorized_users"); let conn = pool.get().await?; query .query_streaming(&conn) .await? .and_then(|row| async move { let email: StackString = row.try_get(0).map_err(PqError::BeginTransaction)?; Ok(email) }) .try_collect() .await .map_err(Into::into) } /// # Errors /// Return error if db query fails pub async fn get_list_of_telegram_userids( pool: &PgPool, ) -> Result<impl Stream<Item = Result<i64, PqError>>, Error> { let query = query!( " SELECT distinct telegram_userid FROM authorized_users WHERE telegram_userid IS NOT NULL " ); let conn = pool.get().await?; query .query_streaming(&conn) .await .map(|stream| { stream.and_then(|row| async move { let telegram_id: i64 = row .try_get("telegram_userid") .map_err(PqError::BeginTransaction)?; Ok(telegram_id) }) }) .map_err(Into::into) } #[must_use] pub fn get_random_string() -> StackString { let random_bytes: SmallVec<[u8; 16]> = (0..16).map(|_| thread_rng().gen::<u8>()).collect(); URL_SAFE_NO_PAD.encode(&random_bytes).into() }
{ if let Some(mut f) = process.stdout.as_ref() { let mut buf = String::new(); f.read_to_string(&mut buf)?; error!("{}", buf); } return Err(format_err!("Failed with exit status {exit_status:?}")); }
conditional_block
intrinsicck.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use metadata::csearch; use middle::def::DefFn; use middle::subst::{Subst, Substs, EnumeratedItems}; use middle::ty::{TransmuteRestriction, ctxt, ty_bare_fn}; use middle::ty::{self, Ty}; use util::ppaux::Repr; use syntax::abi::RustIntrinsic; use syntax::ast::DefId; use syntax::ast; use syntax::ast_map::NodeForeignItem; use syntax::codemap::Span; use syntax::parse::token; use syntax::visit::Visitor; use syntax::visit; pub fn check_crate(tcx: &ctxt) { let mut visitor = IntrinsicCheckingVisitor { tcx: tcx, param_envs: Vec::new(), dummy_sized_ty: tcx.types.isize, dummy_unsized_ty: ty::mk_vec(tcx, tcx.types.isize, None), }; visit::walk_crate(&mut visitor, tcx.map.krate()); } struct IntrinsicCheckingVisitor<'a, 'tcx: 'a> { tcx: &'a ctxt<'tcx>, // As we traverse the AST, we keep a stack of the parameter // environments for each function we encounter. When we find a // call to `transmute`, we can check it in the context of the top // of the stack (which ought not to be empty). param_envs: Vec<ty::ParameterEnvironment<'a,'tcx>>, // Dummy sized/unsized types that use to substitute for type // parameters in order to estimate how big a type will be for any // possible instantiation of the type parameters in scope. See // `check_transmute` for more details. dummy_sized_ty: Ty<'tcx>, dummy_unsized_ty: Ty<'tcx>, } impl<'a, 'tcx> IntrinsicCheckingVisitor<'a, 'tcx> { fn
(&self, def_id: DefId) -> bool { let intrinsic = match ty::lookup_item_type(self.tcx, def_id).ty.sty { ty::ty_bare_fn(_, ref bfty) => bfty.abi == RustIntrinsic, _ => return false }; if def_id.krate == ast::LOCAL_CRATE { match self.tcx.map.get(def_id.node) { NodeForeignItem(ref item) if intrinsic => { token::get_ident(item.ident) == token::intern_and_get_ident("transmute") } _ => false, } } else { match csearch::get_item_path(self.tcx, def_id).last() { Some(ref last) if intrinsic => { token::get_name(last.name()) == token::intern_and_get_ident("transmute") } _ => false, } } } fn check_transmute(&self, span: Span, from: Ty<'tcx>, to: Ty<'tcx>, id: ast::NodeId) { // Find the parameter environment for the most recent function that // we entered. let param_env = match self.param_envs.last() { Some(p) => p, None => { self.tcx.sess.span_bug( span, "transmute encountered outside of any fn"); } }; // Simple case: no type parameters involved. if !ty::type_has_params(from) &&!ty::type_has_self(from) && !ty::type_has_params(to) &&!ty::type_has_self(to) { let restriction = TransmuteRestriction { span: span, original_from: from, original_to: to, substituted_from: from, substituted_to: to, id: id, }; self.push_transmute_restriction(restriction); return; } // The rules around type parameters are a bit subtle. We are // checking these rules before monomorphization, so there may // be unsubstituted type parameters present in the // types. Obviously we cannot create LLVM types for those. // However, if a type parameter appears only indirectly (i.e., // through a pointer), it does not necessarily affect the // size, so that should be allowed. The only catch is that we // DO want to be careful around unsized type parameters, since // fat pointers have a different size than a thin pointer, and // hence `&T` and `&U` have different sizes if `T : Sized` but // `U : Sized` does not hold. // // However, it's not as simple as checking whether `T : // Sized`, because even if `T : Sized` does not hold, that // just means that `T` *may* not be sized. After all, even a // type parameter `T:?Sized` could be bound to a sized // type. (Issue #20116) // // To handle this, we first check for "interior" type // parameters, which are always illegal. If there are none of // those, then we know that the only way that all type // parameters `T` are referenced indirectly, e.g. via a // pointer type like `&T`. In that case, we only care whether // `T` is sized or not, because that influences whether `&T` // is a thin or fat pointer. // // One could imagine establishing a sophisticated constraint // system to ensure that the transmute is legal, but instead // we do something brutally dumb. We just substitute dummy // sized or unsized types for every type parameter in scope, // exhaustively checking all possible combinations. Here are some examples: // // ``` // fn foo<T, U>() { // // T=int, U=int // } // // fn bar<T:?Sized, U>() { // // T=int, U=int // // T=[int], U=int // } // // fn baz<T:?Sized, U:?Sized>() { // // T=int, U=int // // T=[int], U=int // // T=int, U=[int] // // T=[int], U=[int] // } // ``` // // In all cases, we keep the original unsubstituted types // around for error reporting. let from_tc = ty::type_contents(self.tcx, from); let to_tc = ty::type_contents(self.tcx, to); if from_tc.interior_param() || to_tc.interior_param() { span_err!(self.tcx.sess, span, E0139, "cannot transmute to or from a type that contains \ type parameters in its interior"); return; } let mut substs = param_env.free_substs.clone(); self.with_each_combination( span, param_env, param_env.free_substs.types.iter_enumerated(), &mut substs, &mut |substs| { let restriction = TransmuteRestriction { span: span, original_from: from, original_to: to, substituted_from: from.subst(self.tcx, substs), substituted_to: to.subst(self.tcx, substs), id: id, }; self.push_transmute_restriction(restriction); }); } fn with_each_combination(&self, span: Span, param_env: &ty::ParameterEnvironment<'a,'tcx>, mut types_in_scope: EnumeratedItems<Ty<'tcx>>, substs: &mut Substs<'tcx>, callback: &mut FnMut(&Substs<'tcx>)) { // This parameter invokes `callback` many times with different // substitutions that replace all the parameters in scope with // either `int` or `[int]`, depending on whether the type // parameter is known to be sized. See big comment above for // an explanation of why this is a reasonable thing to do. match types_in_scope.next() { None => { debug!("with_each_combination(substs={})", substs.repr(self.tcx)); callback(substs); } Some((space, index, &param_ty)) => { debug!("with_each_combination: space={:?}, index={}, param_ty={}", space, index, param_ty.repr(self.tcx)); if!ty::type_is_sized(param_env, span, param_ty) { debug!("with_each_combination: param_ty is not known to be sized"); substs.types.get_mut_slice(space)[index] = self.dummy_unsized_ty; self.with_each_combination(span, param_env, types_in_scope.clone(), substs, callback); } substs.types.get_mut_slice(space)[index] = self.dummy_sized_ty; self.with_each_combination(span, param_env, types_in_scope, substs, callback); } } } fn push_transmute_restriction(&self, restriction: TransmuteRestriction<'tcx>) { debug!("Pushing transmute restriction: {}", restriction.repr(self.tcx)); self.tcx.transmute_restrictions.borrow_mut().push(restriction); } } impl<'a, 'tcx, 'v> Visitor<'v> for IntrinsicCheckingVisitor<'a, 'tcx> { fn visit_fn(&mut self, fk: visit::FnKind<'v>, fd: &'v ast::FnDecl, b: &'v ast::Block, s: Span, id: ast::NodeId) { match fk { visit::FkItemFn(..) | visit::FkMethod(..) => { let param_env = ty::ParameterEnvironment::for_item(self.tcx, id); self.param_envs.push(param_env); visit::walk_fn(self, fk, fd, b, s); self.param_envs.pop(); } visit::FkFnBlock(..) => { visit::walk_fn(self, fk, fd, b, s); } } } fn visit_expr(&mut self, expr: &ast::Expr) { if let ast::ExprPath(..) = expr.node { match ty::resolve_expr(self.tcx, expr) { DefFn(did, _) if self.def_id_is_transmute(did) => { let typ = ty::node_id_to_type(self.tcx, expr.id); match typ.sty { ty_bare_fn(_, ref bare_fn_ty) if bare_fn_ty.abi == RustIntrinsic => { if let ty::FnConverging(to) = bare_fn_ty.sig.0.output { let from = bare_fn_ty.sig.0.inputs[0]; self.check_transmute(expr.span, from, to, expr.id); } } _ => { self.tcx .sess .span_bug(expr.span, "transmute wasn't a bare fn?!"); } } } _ => {} } } visit::walk_expr(self, expr); } } impl<'tcx> Repr<'tcx> for TransmuteRestriction<'tcx> { fn repr(&self, tcx: &ty::ctxt<'tcx>) -> String { format!("TransmuteRestriction(id={}, original=({},{}), substituted=({},{}))", self.id, self.original_from.repr(tcx), self.original_to.repr(tcx), self.substituted_from.repr(tcx), self.substituted_to.repr(tcx)) } }
def_id_is_transmute
identifier_name
intrinsicck.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use metadata::csearch; use middle::def::DefFn; use middle::subst::{Subst, Substs, EnumeratedItems}; use middle::ty::{TransmuteRestriction, ctxt, ty_bare_fn}; use middle::ty::{self, Ty}; use util::ppaux::Repr; use syntax::abi::RustIntrinsic; use syntax::ast::DefId; use syntax::ast; use syntax::ast_map::NodeForeignItem; use syntax::codemap::Span; use syntax::parse::token; use syntax::visit::Visitor; use syntax::visit; pub fn check_crate(tcx: &ctxt) { let mut visitor = IntrinsicCheckingVisitor { tcx: tcx, param_envs: Vec::new(), dummy_sized_ty: tcx.types.isize, dummy_unsized_ty: ty::mk_vec(tcx, tcx.types.isize, None), }; visit::walk_crate(&mut visitor, tcx.map.krate()); } struct IntrinsicCheckingVisitor<'a, 'tcx: 'a> { tcx: &'a ctxt<'tcx>, // As we traverse the AST, we keep a stack of the parameter // environments for each function we encounter. When we find a // call to `transmute`, we can check it in the context of the top // of the stack (which ought not to be empty). param_envs: Vec<ty::ParameterEnvironment<'a,'tcx>>, // Dummy sized/unsized types that use to substitute for type // parameters in order to estimate how big a type will be for any // possible instantiation of the type parameters in scope. See // `check_transmute` for more details. dummy_sized_ty: Ty<'tcx>, dummy_unsized_ty: Ty<'tcx>, } impl<'a, 'tcx> IntrinsicCheckingVisitor<'a, 'tcx> { fn def_id_is_transmute(&self, def_id: DefId) -> bool { let intrinsic = match ty::lookup_item_type(self.tcx, def_id).ty.sty { ty::ty_bare_fn(_, ref bfty) => bfty.abi == RustIntrinsic, _ => return false }; if def_id.krate == ast::LOCAL_CRATE { match self.tcx.map.get(def_id.node) { NodeForeignItem(ref item) if intrinsic => { token::get_ident(item.ident) == token::intern_and_get_ident("transmute") } _ => false, } } else { match csearch::get_item_path(self.tcx, def_id).last() { Some(ref last) if intrinsic => { token::get_name(last.name()) == token::intern_and_get_ident("transmute") } _ => false, } } } fn check_transmute(&self, span: Span, from: Ty<'tcx>, to: Ty<'tcx>, id: ast::NodeId) { // Find the parameter environment for the most recent function that // we entered. let param_env = match self.param_envs.last() { Some(p) => p, None => { self.tcx.sess.span_bug( span, "transmute encountered outside of any fn"); } }; // Simple case: no type parameters involved. if !ty::type_has_params(from) &&!ty::type_has_self(from) && !ty::type_has_params(to) &&!ty::type_has_self(to) { let restriction = TransmuteRestriction { span: span, original_from: from, original_to: to, substituted_from: from, substituted_to: to, id: id, }; self.push_transmute_restriction(restriction); return; } // The rules around type parameters are a bit subtle. We are // checking these rules before monomorphization, so there may // be unsubstituted type parameters present in the // types. Obviously we cannot create LLVM types for those. // However, if a type parameter appears only indirectly (i.e., // through a pointer), it does not necessarily affect the // size, so that should be allowed. The only catch is that we // DO want to be careful around unsized type parameters, since // fat pointers have a different size than a thin pointer, and // hence `&T` and `&U` have different sizes if `T : Sized` but // `U : Sized` does not hold. // // However, it's not as simple as checking whether `T : // Sized`, because even if `T : Sized` does not hold, that // just means that `T` *may* not be sized. After all, even a // type parameter `T:?Sized` could be bound to a sized // type. (Issue #20116) // // To handle this, we first check for "interior" type // parameters, which are always illegal. If there are none of // those, then we know that the only way that all type // parameters `T` are referenced indirectly, e.g. via a // pointer type like `&T`. In that case, we only care whether // `T` is sized or not, because that influences whether `&T` // is a thin or fat pointer. // // One could imagine establishing a sophisticated constraint // system to ensure that the transmute is legal, but instead // we do something brutally dumb. We just substitute dummy // sized or unsized types for every type parameter in scope, // exhaustively checking all possible combinations. Here are some examples: // // ``` // fn foo<T, U>() { // // T=int, U=int // } // // fn bar<T:?Sized, U>() { // // T=int, U=int // // T=[int], U=int // } // // fn baz<T:?Sized, U:?Sized>() { // // T=int, U=int // // T=[int], U=int // // T=int, U=[int] // // T=[int], U=[int] // } // ``` // // In all cases, we keep the original unsubstituted types // around for error reporting. let from_tc = ty::type_contents(self.tcx, from); let to_tc = ty::type_contents(self.tcx, to); if from_tc.interior_param() || to_tc.interior_param() { span_err!(self.tcx.sess, span, E0139, "cannot transmute to or from a type that contains \ type parameters in its interior"); return; } let mut substs = param_env.free_substs.clone(); self.with_each_combination( span, param_env, param_env.free_substs.types.iter_enumerated(), &mut substs, &mut |substs| { let restriction = TransmuteRestriction { span: span, original_from: from, original_to: to, substituted_from: from.subst(self.tcx, substs), substituted_to: to.subst(self.tcx, substs), id: id, }; self.push_transmute_restriction(restriction); }); } fn with_each_combination(&self, span: Span, param_env: &ty::ParameterEnvironment<'a,'tcx>, mut types_in_scope: EnumeratedItems<Ty<'tcx>>, substs: &mut Substs<'tcx>, callback: &mut FnMut(&Substs<'tcx>)) { // This parameter invokes `callback` many times with different // substitutions that replace all the parameters in scope with // either `int` or `[int]`, depending on whether the type // parameter is known to be sized. See big comment above for // an explanation of why this is a reasonable thing to do. match types_in_scope.next() { None => { debug!("with_each_combination(substs={})", substs.repr(self.tcx)); callback(substs); } Some((space, index, &param_ty)) => { debug!("with_each_combination: space={:?}, index={}, param_ty={}", space, index, param_ty.repr(self.tcx)); if!ty::type_is_sized(param_env, span, param_ty) { debug!("with_each_combination: param_ty is not known to be sized"); substs.types.get_mut_slice(space)[index] = self.dummy_unsized_ty; self.with_each_combination(span, param_env, types_in_scope.clone(), substs, callback); } substs.types.get_mut_slice(space)[index] = self.dummy_sized_ty; self.with_each_combination(span, param_env, types_in_scope, substs, callback); } } } fn push_transmute_restriction(&self, restriction: TransmuteRestriction<'tcx>)
} impl<'a, 'tcx, 'v> Visitor<'v> for IntrinsicCheckingVisitor<'a, 'tcx> { fn visit_fn(&mut self, fk: visit::FnKind<'v>, fd: &'v ast::FnDecl, b: &'v ast::Block, s: Span, id: ast::NodeId) { match fk { visit::FkItemFn(..) | visit::FkMethod(..) => { let param_env = ty::ParameterEnvironment::for_item(self.tcx, id); self.param_envs.push(param_env); visit::walk_fn(self, fk, fd, b, s); self.param_envs.pop(); } visit::FkFnBlock(..) => { visit::walk_fn(self, fk, fd, b, s); } } } fn visit_expr(&mut self, expr: &ast::Expr) { if let ast::ExprPath(..) = expr.node { match ty::resolve_expr(self.tcx, expr) { DefFn(did, _) if self.def_id_is_transmute(did) => { let typ = ty::node_id_to_type(self.tcx, expr.id); match typ.sty { ty_bare_fn(_, ref bare_fn_ty) if bare_fn_ty.abi == RustIntrinsic => { if let ty::FnConverging(to) = bare_fn_ty.sig.0.output { let from = bare_fn_ty.sig.0.inputs[0]; self.check_transmute(expr.span, from, to, expr.id); } } _ => { self.tcx .sess .span_bug(expr.span, "transmute wasn't a bare fn?!"); } } } _ => {} } } visit::walk_expr(self, expr); } } impl<'tcx> Repr<'tcx> for TransmuteRestriction<'tcx> { fn repr(&self, tcx: &ty::ctxt<'tcx>) -> String { format!("TransmuteRestriction(id={}, original=({},{}), substituted=({},{}))", self.id, self.original_from.repr(tcx), self.original_to.repr(tcx), self.substituted_from.repr(tcx), self.substituted_to.repr(tcx)) } }
{ debug!("Pushing transmute restriction: {}", restriction.repr(self.tcx)); self.tcx.transmute_restrictions.borrow_mut().push(restriction); }
identifier_body
intrinsicck.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use metadata::csearch; use middle::def::DefFn; use middle::subst::{Subst, Substs, EnumeratedItems}; use middle::ty::{TransmuteRestriction, ctxt, ty_bare_fn}; use middle::ty::{self, Ty}; use util::ppaux::Repr; use syntax::abi::RustIntrinsic; use syntax::ast::DefId; use syntax::ast;
use syntax::codemap::Span; use syntax::parse::token; use syntax::visit::Visitor; use syntax::visit; pub fn check_crate(tcx: &ctxt) { let mut visitor = IntrinsicCheckingVisitor { tcx: tcx, param_envs: Vec::new(), dummy_sized_ty: tcx.types.isize, dummy_unsized_ty: ty::mk_vec(tcx, tcx.types.isize, None), }; visit::walk_crate(&mut visitor, tcx.map.krate()); } struct IntrinsicCheckingVisitor<'a, 'tcx: 'a> { tcx: &'a ctxt<'tcx>, // As we traverse the AST, we keep a stack of the parameter // environments for each function we encounter. When we find a // call to `transmute`, we can check it in the context of the top // of the stack (which ought not to be empty). param_envs: Vec<ty::ParameterEnvironment<'a,'tcx>>, // Dummy sized/unsized types that use to substitute for type // parameters in order to estimate how big a type will be for any // possible instantiation of the type parameters in scope. See // `check_transmute` for more details. dummy_sized_ty: Ty<'tcx>, dummy_unsized_ty: Ty<'tcx>, } impl<'a, 'tcx> IntrinsicCheckingVisitor<'a, 'tcx> { fn def_id_is_transmute(&self, def_id: DefId) -> bool { let intrinsic = match ty::lookup_item_type(self.tcx, def_id).ty.sty { ty::ty_bare_fn(_, ref bfty) => bfty.abi == RustIntrinsic, _ => return false }; if def_id.krate == ast::LOCAL_CRATE { match self.tcx.map.get(def_id.node) { NodeForeignItem(ref item) if intrinsic => { token::get_ident(item.ident) == token::intern_and_get_ident("transmute") } _ => false, } } else { match csearch::get_item_path(self.tcx, def_id).last() { Some(ref last) if intrinsic => { token::get_name(last.name()) == token::intern_and_get_ident("transmute") } _ => false, } } } fn check_transmute(&self, span: Span, from: Ty<'tcx>, to: Ty<'tcx>, id: ast::NodeId) { // Find the parameter environment for the most recent function that // we entered. let param_env = match self.param_envs.last() { Some(p) => p, None => { self.tcx.sess.span_bug( span, "transmute encountered outside of any fn"); } }; // Simple case: no type parameters involved. if !ty::type_has_params(from) &&!ty::type_has_self(from) && !ty::type_has_params(to) &&!ty::type_has_self(to) { let restriction = TransmuteRestriction { span: span, original_from: from, original_to: to, substituted_from: from, substituted_to: to, id: id, }; self.push_transmute_restriction(restriction); return; } // The rules around type parameters are a bit subtle. We are // checking these rules before monomorphization, so there may // be unsubstituted type parameters present in the // types. Obviously we cannot create LLVM types for those. // However, if a type parameter appears only indirectly (i.e., // through a pointer), it does not necessarily affect the // size, so that should be allowed. The only catch is that we // DO want to be careful around unsized type parameters, since // fat pointers have a different size than a thin pointer, and // hence `&T` and `&U` have different sizes if `T : Sized` but // `U : Sized` does not hold. // // However, it's not as simple as checking whether `T : // Sized`, because even if `T : Sized` does not hold, that // just means that `T` *may* not be sized. After all, even a // type parameter `T:?Sized` could be bound to a sized // type. (Issue #20116) // // To handle this, we first check for "interior" type // parameters, which are always illegal. If there are none of // those, then we know that the only way that all type // parameters `T` are referenced indirectly, e.g. via a // pointer type like `&T`. In that case, we only care whether // `T` is sized or not, because that influences whether `&T` // is a thin or fat pointer. // // One could imagine establishing a sophisticated constraint // system to ensure that the transmute is legal, but instead // we do something brutally dumb. We just substitute dummy // sized or unsized types for every type parameter in scope, // exhaustively checking all possible combinations. Here are some examples: // // ``` // fn foo<T, U>() { // // T=int, U=int // } // // fn bar<T:?Sized, U>() { // // T=int, U=int // // T=[int], U=int // } // // fn baz<T:?Sized, U:?Sized>() { // // T=int, U=int // // T=[int], U=int // // T=int, U=[int] // // T=[int], U=[int] // } // ``` // // In all cases, we keep the original unsubstituted types // around for error reporting. let from_tc = ty::type_contents(self.tcx, from); let to_tc = ty::type_contents(self.tcx, to); if from_tc.interior_param() || to_tc.interior_param() { span_err!(self.tcx.sess, span, E0139, "cannot transmute to or from a type that contains \ type parameters in its interior"); return; } let mut substs = param_env.free_substs.clone(); self.with_each_combination( span, param_env, param_env.free_substs.types.iter_enumerated(), &mut substs, &mut |substs| { let restriction = TransmuteRestriction { span: span, original_from: from, original_to: to, substituted_from: from.subst(self.tcx, substs), substituted_to: to.subst(self.tcx, substs), id: id, }; self.push_transmute_restriction(restriction); }); } fn with_each_combination(&self, span: Span, param_env: &ty::ParameterEnvironment<'a,'tcx>, mut types_in_scope: EnumeratedItems<Ty<'tcx>>, substs: &mut Substs<'tcx>, callback: &mut FnMut(&Substs<'tcx>)) { // This parameter invokes `callback` many times with different // substitutions that replace all the parameters in scope with // either `int` or `[int]`, depending on whether the type // parameter is known to be sized. See big comment above for // an explanation of why this is a reasonable thing to do. match types_in_scope.next() { None => { debug!("with_each_combination(substs={})", substs.repr(self.tcx)); callback(substs); } Some((space, index, &param_ty)) => { debug!("with_each_combination: space={:?}, index={}, param_ty={}", space, index, param_ty.repr(self.tcx)); if!ty::type_is_sized(param_env, span, param_ty) { debug!("with_each_combination: param_ty is not known to be sized"); substs.types.get_mut_slice(space)[index] = self.dummy_unsized_ty; self.with_each_combination(span, param_env, types_in_scope.clone(), substs, callback); } substs.types.get_mut_slice(space)[index] = self.dummy_sized_ty; self.with_each_combination(span, param_env, types_in_scope, substs, callback); } } } fn push_transmute_restriction(&self, restriction: TransmuteRestriction<'tcx>) { debug!("Pushing transmute restriction: {}", restriction.repr(self.tcx)); self.tcx.transmute_restrictions.borrow_mut().push(restriction); } } impl<'a, 'tcx, 'v> Visitor<'v> for IntrinsicCheckingVisitor<'a, 'tcx> { fn visit_fn(&mut self, fk: visit::FnKind<'v>, fd: &'v ast::FnDecl, b: &'v ast::Block, s: Span, id: ast::NodeId) { match fk { visit::FkItemFn(..) | visit::FkMethod(..) => { let param_env = ty::ParameterEnvironment::for_item(self.tcx, id); self.param_envs.push(param_env); visit::walk_fn(self, fk, fd, b, s); self.param_envs.pop(); } visit::FkFnBlock(..) => { visit::walk_fn(self, fk, fd, b, s); } } } fn visit_expr(&mut self, expr: &ast::Expr) { if let ast::ExprPath(..) = expr.node { match ty::resolve_expr(self.tcx, expr) { DefFn(did, _) if self.def_id_is_transmute(did) => { let typ = ty::node_id_to_type(self.tcx, expr.id); match typ.sty { ty_bare_fn(_, ref bare_fn_ty) if bare_fn_ty.abi == RustIntrinsic => { if let ty::FnConverging(to) = bare_fn_ty.sig.0.output { let from = bare_fn_ty.sig.0.inputs[0]; self.check_transmute(expr.span, from, to, expr.id); } } _ => { self.tcx .sess .span_bug(expr.span, "transmute wasn't a bare fn?!"); } } } _ => {} } } visit::walk_expr(self, expr); } } impl<'tcx> Repr<'tcx> for TransmuteRestriction<'tcx> { fn repr(&self, tcx: &ty::ctxt<'tcx>) -> String { format!("TransmuteRestriction(id={}, original=({},{}), substituted=({},{}))", self.id, self.original_from.repr(tcx), self.original_to.repr(tcx), self.substituted_from.repr(tcx), self.substituted_to.repr(tcx)) } }
use syntax::ast_map::NodeForeignItem;
random_line_split
output.rs
use std::io; use std::string::ToString; use std::thread::sleep; use std::time::Duration; use std::{collections::BTreeMap, path::PathBuf}; use anyhow::Result; use comfy_table::presets::UTF8_HORIZONTAL_BORDERS_ONLY; use comfy_table::*; use snap::read::FrameDecoder; use pueue::log::{get_log_file_handles, get_log_paths}; use pueue::network::message::{GroupResponseMessage, TaskLogMessage}; use pueue::settings::Settings; use pueue::state::State; use pueue::task::{Task, TaskResult, TaskStatus}; use crate::cli::SubCommand; use crate::output_helper::*; pub fn print_success(message: &str) { println!("{}", message); } pub fn print_error(message: &str) { let styled = style_text(message, Some(Color::Red), None); println!("{}", styled); } pub fn print_groups(message: GroupResponseMessage) { let mut text = String::new(); let mut group_iter = message.groups.iter().peekable(); while let Some((name, status)) = group_iter.next() { let parallel = *message.settings.get(name).unwrap(); let styled = get_group_headline(name, &status, parallel); text.push_str(&styled); if group_iter.peek().is_some() { text.push('\n'); } } println!("{}", text); } /// Print the current state of the daemon in a nicely formatted table. pub fn print_state(state: State, cli_command: &SubCommand, settings: &Settings) { let (json, group_only) = match cli_command { SubCommand::Status { json, group } => (*json, group.clone()), _ => panic!( "Got wrong Subcommand {:?} in print_state. This shouldn't happen", cli_command ), }; // If the json flag is specified, print the state as json and exit. if json { println!("{}", serde_json::to_string(&state).unwrap()); return; } // Early exit and hint if there are no tasks in the queue if state.tasks.is_empty() { println!("Task list is empty. Add tasks with `pueue add -- [cmd]`"); return; } // Sort all tasks by their respective group; let sorted_tasks = sort_tasks_by_group(&state.tasks); // Always print the default queue at the very top. if group_only.is_none() { let tasks = sorted_tasks.get("default").unwrap(); let headline = get_group_headline( &"default", &state.groups.get("default").unwrap(), *state.settings.daemon.groups.get("default").unwrap(), ); println!("{}", headline); print_table(&tasks, settings); // Add a newline if there are further groups to be printed if sorted_tasks.len() > 1 { println!(); } } let mut sorted_iter = sorted_tasks.iter().peekable(); // Print new table for each group while let Some((group, tasks)) = sorted_iter.next() { // We always want to print the default group at the very top. // That's why we print it outside of this loop and skip it in here. if group.eq("default") { continue; } // Skip unwanted groups, if a single group is requested if let Some(group_only) = &group_only { if group_only!= group { continue; } } let headline = get_group_headline( &group, &state.groups.get(group).unwrap(), *state.settings.daemon.groups.get(group).unwrap(), ); println!("{}", headline); print_table(&tasks, settings); // Add a newline between groups if sorted_iter.peek().is_some() { println!(); } } } /// Print some tasks into a nicely formatted table fn print_table(tasks: &BTreeMap<usize, Task>, settings: &Settings) { let (has_delayed_tasks, has_dependencies, has_labels) = has_special_columns(tasks); // Create table header row let mut headers = vec![Cell::new("Index"), Cell::new("Status")]; if has_delayed_tasks { headers.push(Cell::new("Enqueue At")); } if has_dependencies { headers.push(Cell::new("Deps")); } headers.push(Cell::new("Exitcode")); if has_labels { headers.push(Cell::new("Label")); } headers.append(&mut vec![ Cell::new("Command"), Cell::new("Path"), Cell::new("Start"), Cell::new("End"), ]); // Initialize comfy table. let mut table = Table::new(); table .set_content_arrangement(ContentArrangement::Dynamic) .load_preset(UTF8_HORIZONTAL_BORDERS_ONLY) .set_header(headers); // Add rows one by one. for (id, task) in tasks { let mut row = Row::new(); if let Some(height) = settings.client.max_status_lines { row.max_height(height); } row.add_cell(Cell::new(&id.to_string())); // Determine the human readable task status representation and the respective color. let status_string = task.status.to_string(); let (status_text, color) = match task.status { TaskStatus::Running => (status_string, Color::Green), TaskStatus::Paused | TaskStatus::Locked => (status_string, Color::White), TaskStatus::Done => match &task.result { Some(TaskResult::Success) => (TaskResult::Success.to_string(), Color::Green), Some(TaskResult::DependencyFailed) => ("Dependency failed".to_string(), Color::Red), Some(TaskResult::FailedToSpawn(_)) => ("Failed to spawn".to_string(), Color::Red), Some(result) => (result.to_string(), Color::Red), None => panic!("Got a 'Done' task without a task result. Please report this bug."), }, _ => (status_string, Color::Yellow), }; row.add_cell(Cell::new(status_text).fg(color)); if has_delayed_tasks { if let Some(enqueue_at) = task.enqueue_at { row.add_cell(Cell::new(enqueue_at.format("%Y-%m-%d\n%H:%M:%S"))); } else { row.add_cell(Cell::new("")); } } if has_dependencies { let text = task .dependencies .iter() .map(|id| id.to_string()) .collect::<Vec<String>>() .join(", "); row.add_cell(Cell::new(text)); } // Match the color of the exit code. // If the exit_code is none, it has been killed by the task handler. let exit_code_cell = match task.result { Some(TaskResult::Success) => Cell::new("0").fg(Color::Green), Some(TaskResult::Failed(code)) => Cell::new(&code.to_string()).fg(Color::Red), _ => Cell::new(""), }; row.add_cell(exit_code_cell); if has_labels { if let Some(label) = &task.label { row.add_cell(label.to_cell()); } else { row.add_cell(Cell::new("")); } } // Add command and path. if settings.client.show_expanded_aliases { row.add_cell(Cell::new(&task.command)); } else { row.add_cell(Cell::new(&task.original_command)); } row.add_cell(Cell::new(&task.path)); // Add start time, if already set. if let Some(start) = task.start { let formatted = start.format("%H:%M").to_string(); row.add_cell(Cell::new(&formatted)); } else { row.add_cell(Cell::new("")); } // Add finish time, if already set. if let Some(end) = task.end { let formatted = end.format("%H:%M").to_string(); row.add_cell(Cell::new(&formatted)); } else { row.add_cell(Cell::new("")); } table.add_row(row); } // Print the table. println!("{}", table); } /// Print the log ouput of finished tasks. /// Either print the logs of every task /// or only print the logs of the specified tasks. pub fn print_logs( mut task_logs: BTreeMap<usize, TaskLogMessage>, cli_command: &SubCommand, settings: &Settings, ) { let (json, task_ids) = match cli_command { SubCommand::Log { json, task_ids } => (*json, task_ids.clone()), _ => panic!( "Got wrong Subcommand {:?} in print_log. This shouldn't happen", cli_command ), }; if json { println!("{}", serde_json::to_string(&task_logs).unwrap()); return; } if task_ids.is_empty() && task_logs.is_empty() { println!("There are no finished tasks"); return; } if!task_ids.is_empty() && task_logs.is_empty() { println!("There are no finished tasks for your specified ids"); return; } let mut task_iter = task_logs.iter_mut().peekable(); while let Some((_, mut task_log)) = task_iter.next() { print_log(&mut task_log, settings); // Add a newline if there is another task that's going to be printed. if let Some((_, task_log)) = task_iter.peek() { if!vec![TaskStatus::Done, TaskStatus::Running, TaskStatus::Paused] .contains(&task_log.task.status) { println!(); } } } } /// Print the log of a single task. pub fn print_log(task_log: &mut TaskLogMessage, settings: &Settings) { let task = &task_log.task; // We only show logs of finished or running tasks. if!vec![TaskStatus::Done, TaskStatus::Running, TaskStatus::Paused].contains(&task.status) { return; } // Print task id and exit code. let task_text = style_text(&format!("Task {}", task.id), None, Some(Attribute::Bold)); let (exit_status, color) = match &task.result { Some(TaskResult::Success) => ("completed successfully".into(), Color::Green), Some(TaskResult::Failed(exit_code)) => { (format!("failed with exit code {}", exit_code), Color::Red) } Some(TaskResult::FailedToSpawn(err)) => (format!("failed to spawn: {}", err), Color::Red), Some(TaskResult::Killed) => ("killed by system or user".into(), Color::Red), Some(TaskResult::DependencyFailed) => ("dependency failed".into(), Color::Red), None => ("running".into(), Color::White), }; let status_text = style_text(&exit_status, Some(color), None); println!("{} {}", task_text, status_text); // Print command and path. println!("Command: {}", task.command); println!("Path: {}", task.path); if let Some(start) = task.start { println!("Start: {}", start.to_rfc2822()); } if let Some(end) = task.end { println!("End: {}", end.to_rfc2822()); } if settings.client.read_local_logs { print_local_log_output(task_log.task.id, settings); } else if task_log.stdout.is_some() && task_log.stderr.is_some() { print_task_output_from_daemon(task_log); } else { println!("Logs requested from pueue daemon, but none received. Please report this bug."); } } /// The daemon didn't send any log output, thereby we didn't request any. /// If that's the case, read the log files from the local pueue directory pub fn
(task_id: usize, settings: &Settings) { let (mut stdout_log, mut stderr_log) = match get_log_file_handles(task_id, &settings.shared.pueue_directory) { Ok((stdout, stderr)) => (stdout, stderr), Err(err) => { println!("Failed to get log file handles: {}", err); return; } }; // Stdout handler to directly write log file output to io::stdout // without having to load anything into memory. let mut stdout = io::stdout(); if let Ok(metadata) = stdout_log.metadata() { if metadata.len()!= 0 { println!( "\n{}", style_text("stdout:", Some(Color::Green), Some(Attribute::Bold)) ); if let Err(err) = io::copy(&mut stdout_log, &mut stdout) { println!("Failed reading local stdout log file: {}", err); }; } } if let Ok(metadata) = stderr_log.metadata() { if metadata.len()!= 0 { // Add a spacer line between stdout and stderr println!( "\n{}", style_text("stderr:", Some(Color::Red), Some(Attribute::Bold)) ); if let Err(err) = io::copy(&mut stderr_log, &mut stdout) { println!("Failed reading local stderr log file: {}", err); }; } } } /// Prints log output received from the daemon. /// We can safely call.unwrap() on stdout and stderr in here, since this /// branch is always called after ensuring that both are `Some`. pub fn print_task_output_from_daemon(task_log: &TaskLogMessage) { // Save whether stdout was printed, so we can add a newline between outputs. if!task_log.stdout.as_ref().unwrap().is_empty() { if let Err(err) = print_remote_task_output(&task_log, true) { println!("Error while parsing stdout: {}", err); } } if!task_log.stderr.as_ref().unwrap().is_empty() { if let Err(err) = print_remote_task_output(&task_log, false) { println!("Error while parsing stderr: {}", err); }; } } /// Print log output of a finished process. pub fn print_remote_task_output(task_log: &TaskLogMessage, stdout: bool) -> Result<()> { let (pre_text, color, bytes) = if stdout { ("stdout: ", Color::Green, task_log.stdout.as_ref().unwrap()) } else { ("stderr: ", Color::Red, task_log.stderr.as_ref().unwrap()) }; println!( "\n{}", style_text(pre_text, Some(color), Some(Attribute::Bold)) ); let mut decompressor = FrameDecoder::new(bytes.as_slice()); let stdout = io::stdout(); let mut write = stdout.lock(); io::copy(&mut decompressor, &mut write)?; Ok(()) } /// Follow the log ouput of running task. /// /// If no task is specified, this will check for the following cases: /// /// - No running task: Print an error that there are no running tasks /// - Single running task: Follow the output of that task /// - Multiple running tasks: Print out the list of possible tasks to follow. pub fn follow_task_logs(pueue_directory: &PathBuf, task_id: usize, stderr: bool) { let (stdout_handle, stderr_handle) = match get_log_file_handles(task_id, &pueue_directory) { Ok((stdout, stderr)) => (stdout, stderr), Err(err) => { println!("Failed to get log file handles: {}", err); return; } }; let mut handle = if stderr { stderr_handle } else { stdout_handle }; let (out_path, err_path) = get_log_paths(task_id, &pueue_directory); let handle_path = if stderr { err_path } else { out_path }; // Stdout handler to directly write log file output to io::stdout // without having to load anything into memory. let mut stdout = io::stdout(); loop { // Check whether the file still exists. Exit if it doesn't. if!handle_path.exists() { println!("File has gone away. Did somebody remove the task?"); return; } // Read the next chunk of text from the last position. if let Err(err) = io::copy(&mut handle, &mut stdout) { println!("Error while reading file: {}", err); return; }; let timeout = Duration::from_millis(100); sleep(timeout); } }
print_local_log_output
identifier_name
output.rs
use std::io; use std::string::ToString; use std::thread::sleep; use std::time::Duration; use std::{collections::BTreeMap, path::PathBuf}; use anyhow::Result; use comfy_table::presets::UTF8_HORIZONTAL_BORDERS_ONLY; use comfy_table::*; use snap::read::FrameDecoder; use pueue::log::{get_log_file_handles, get_log_paths}; use pueue::network::message::{GroupResponseMessage, TaskLogMessage}; use pueue::settings::Settings; use pueue::state::State; use pueue::task::{Task, TaskResult, TaskStatus}; use crate::cli::SubCommand; use crate::output_helper::*; pub fn print_success(message: &str)
pub fn print_error(message: &str) { let styled = style_text(message, Some(Color::Red), None); println!("{}", styled); } pub fn print_groups(message: GroupResponseMessage) { let mut text = String::new(); let mut group_iter = message.groups.iter().peekable(); while let Some((name, status)) = group_iter.next() { let parallel = *message.settings.get(name).unwrap(); let styled = get_group_headline(name, &status, parallel); text.push_str(&styled); if group_iter.peek().is_some() { text.push('\n'); } } println!("{}", text); } /// Print the current state of the daemon in a nicely formatted table. pub fn print_state(state: State, cli_command: &SubCommand, settings: &Settings) { let (json, group_only) = match cli_command { SubCommand::Status { json, group } => (*json, group.clone()), _ => panic!( "Got wrong Subcommand {:?} in print_state. This shouldn't happen", cli_command ), }; // If the json flag is specified, print the state as json and exit. if json { println!("{}", serde_json::to_string(&state).unwrap()); return; } // Early exit and hint if there are no tasks in the queue if state.tasks.is_empty() { println!("Task list is empty. Add tasks with `pueue add -- [cmd]`"); return; } // Sort all tasks by their respective group; let sorted_tasks = sort_tasks_by_group(&state.tasks); // Always print the default queue at the very top. if group_only.is_none() { let tasks = sorted_tasks.get("default").unwrap(); let headline = get_group_headline( &"default", &state.groups.get("default").unwrap(), *state.settings.daemon.groups.get("default").unwrap(), ); println!("{}", headline); print_table(&tasks, settings); // Add a newline if there are further groups to be printed if sorted_tasks.len() > 1 { println!(); } } let mut sorted_iter = sorted_tasks.iter().peekable(); // Print new table for each group while let Some((group, tasks)) = sorted_iter.next() { // We always want to print the default group at the very top. // That's why we print it outside of this loop and skip it in here. if group.eq("default") { continue; } // Skip unwanted groups, if a single group is requested if let Some(group_only) = &group_only { if group_only!= group { continue; } } let headline = get_group_headline( &group, &state.groups.get(group).unwrap(), *state.settings.daemon.groups.get(group).unwrap(), ); println!("{}", headline); print_table(&tasks, settings); // Add a newline between groups if sorted_iter.peek().is_some() { println!(); } } } /// Print some tasks into a nicely formatted table fn print_table(tasks: &BTreeMap<usize, Task>, settings: &Settings) { let (has_delayed_tasks, has_dependencies, has_labels) = has_special_columns(tasks); // Create table header row let mut headers = vec![Cell::new("Index"), Cell::new("Status")]; if has_delayed_tasks { headers.push(Cell::new("Enqueue At")); } if has_dependencies { headers.push(Cell::new("Deps")); } headers.push(Cell::new("Exitcode")); if has_labels { headers.push(Cell::new("Label")); } headers.append(&mut vec![ Cell::new("Command"), Cell::new("Path"), Cell::new("Start"), Cell::new("End"), ]); // Initialize comfy table. let mut table = Table::new(); table .set_content_arrangement(ContentArrangement::Dynamic) .load_preset(UTF8_HORIZONTAL_BORDERS_ONLY) .set_header(headers); // Add rows one by one. for (id, task) in tasks { let mut row = Row::new(); if let Some(height) = settings.client.max_status_lines { row.max_height(height); } row.add_cell(Cell::new(&id.to_string())); // Determine the human readable task status representation and the respective color. let status_string = task.status.to_string(); let (status_text, color) = match task.status { TaskStatus::Running => (status_string, Color::Green), TaskStatus::Paused | TaskStatus::Locked => (status_string, Color::White), TaskStatus::Done => match &task.result { Some(TaskResult::Success) => (TaskResult::Success.to_string(), Color::Green), Some(TaskResult::DependencyFailed) => ("Dependency failed".to_string(), Color::Red), Some(TaskResult::FailedToSpawn(_)) => ("Failed to spawn".to_string(), Color::Red), Some(result) => (result.to_string(), Color::Red), None => panic!("Got a 'Done' task without a task result. Please report this bug."), }, _ => (status_string, Color::Yellow), }; row.add_cell(Cell::new(status_text).fg(color)); if has_delayed_tasks { if let Some(enqueue_at) = task.enqueue_at { row.add_cell(Cell::new(enqueue_at.format("%Y-%m-%d\n%H:%M:%S"))); } else { row.add_cell(Cell::new("")); } } if has_dependencies { let text = task .dependencies .iter() .map(|id| id.to_string()) .collect::<Vec<String>>() .join(", "); row.add_cell(Cell::new(text)); } // Match the color of the exit code. // If the exit_code is none, it has been killed by the task handler. let exit_code_cell = match task.result { Some(TaskResult::Success) => Cell::new("0").fg(Color::Green), Some(TaskResult::Failed(code)) => Cell::new(&code.to_string()).fg(Color::Red), _ => Cell::new(""), }; row.add_cell(exit_code_cell); if has_labels { if let Some(label) = &task.label { row.add_cell(label.to_cell()); } else { row.add_cell(Cell::new("")); } } // Add command and path. if settings.client.show_expanded_aliases { row.add_cell(Cell::new(&task.command)); } else { row.add_cell(Cell::new(&task.original_command)); } row.add_cell(Cell::new(&task.path)); // Add start time, if already set. if let Some(start) = task.start { let formatted = start.format("%H:%M").to_string(); row.add_cell(Cell::new(&formatted)); } else { row.add_cell(Cell::new("")); } // Add finish time, if already set. if let Some(end) = task.end { let formatted = end.format("%H:%M").to_string(); row.add_cell(Cell::new(&formatted)); } else { row.add_cell(Cell::new("")); } table.add_row(row); } // Print the table. println!("{}", table); } /// Print the log ouput of finished tasks. /// Either print the logs of every task /// or only print the logs of the specified tasks. pub fn print_logs( mut task_logs: BTreeMap<usize, TaskLogMessage>, cli_command: &SubCommand, settings: &Settings, ) { let (json, task_ids) = match cli_command { SubCommand::Log { json, task_ids } => (*json, task_ids.clone()), _ => panic!( "Got wrong Subcommand {:?} in print_log. This shouldn't happen", cli_command ), }; if json { println!("{}", serde_json::to_string(&task_logs).unwrap()); return; } if task_ids.is_empty() && task_logs.is_empty() { println!("There are no finished tasks"); return; } if!task_ids.is_empty() && task_logs.is_empty() { println!("There are no finished tasks for your specified ids"); return; } let mut task_iter = task_logs.iter_mut().peekable(); while let Some((_, mut task_log)) = task_iter.next() { print_log(&mut task_log, settings); // Add a newline if there is another task that's going to be printed. if let Some((_, task_log)) = task_iter.peek() { if!vec![TaskStatus::Done, TaskStatus::Running, TaskStatus::Paused] .contains(&task_log.task.status) { println!(); } } } } /// Print the log of a single task. pub fn print_log(task_log: &mut TaskLogMessage, settings: &Settings) { let task = &task_log.task; // We only show logs of finished or running tasks. if!vec![TaskStatus::Done, TaskStatus::Running, TaskStatus::Paused].contains(&task.status) { return; } // Print task id and exit code. let task_text = style_text(&format!("Task {}", task.id), None, Some(Attribute::Bold)); let (exit_status, color) = match &task.result { Some(TaskResult::Success) => ("completed successfully".into(), Color::Green), Some(TaskResult::Failed(exit_code)) => { (format!("failed with exit code {}", exit_code), Color::Red) } Some(TaskResult::FailedToSpawn(err)) => (format!("failed to spawn: {}", err), Color::Red), Some(TaskResult::Killed) => ("killed by system or user".into(), Color::Red), Some(TaskResult::DependencyFailed) => ("dependency failed".into(), Color::Red), None => ("running".into(), Color::White), }; let status_text = style_text(&exit_status, Some(color), None); println!("{} {}", task_text, status_text); // Print command and path. println!("Command: {}", task.command); println!("Path: {}", task.path); if let Some(start) = task.start { println!("Start: {}", start.to_rfc2822()); } if let Some(end) = task.end { println!("End: {}", end.to_rfc2822()); } if settings.client.read_local_logs { print_local_log_output(task_log.task.id, settings); } else if task_log.stdout.is_some() && task_log.stderr.is_some() { print_task_output_from_daemon(task_log); } else { println!("Logs requested from pueue daemon, but none received. Please report this bug."); } } /// The daemon didn't send any log output, thereby we didn't request any. /// If that's the case, read the log files from the local pueue directory pub fn print_local_log_output(task_id: usize, settings: &Settings) { let (mut stdout_log, mut stderr_log) = match get_log_file_handles(task_id, &settings.shared.pueue_directory) { Ok((stdout, stderr)) => (stdout, stderr), Err(err) => { println!("Failed to get log file handles: {}", err); return; } }; // Stdout handler to directly write log file output to io::stdout // without having to load anything into memory. let mut stdout = io::stdout(); if let Ok(metadata) = stdout_log.metadata() { if metadata.len()!= 0 { println!( "\n{}", style_text("stdout:", Some(Color::Green), Some(Attribute::Bold)) ); if let Err(err) = io::copy(&mut stdout_log, &mut stdout) { println!("Failed reading local stdout log file: {}", err); }; } } if let Ok(metadata) = stderr_log.metadata() { if metadata.len()!= 0 { // Add a spacer line between stdout and stderr println!( "\n{}", style_text("stderr:", Some(Color::Red), Some(Attribute::Bold)) ); if let Err(err) = io::copy(&mut stderr_log, &mut stdout) { println!("Failed reading local stderr log file: {}", err); }; } } } /// Prints log output received from the daemon. /// We can safely call.unwrap() on stdout and stderr in here, since this /// branch is always called after ensuring that both are `Some`. pub fn print_task_output_from_daemon(task_log: &TaskLogMessage) { // Save whether stdout was printed, so we can add a newline between outputs. if!task_log.stdout.as_ref().unwrap().is_empty() { if let Err(err) = print_remote_task_output(&task_log, true) { println!("Error while parsing stdout: {}", err); } } if!task_log.stderr.as_ref().unwrap().is_empty() { if let Err(err) = print_remote_task_output(&task_log, false) { println!("Error while parsing stderr: {}", err); }; } } /// Print log output of a finished process. pub fn print_remote_task_output(task_log: &TaskLogMessage, stdout: bool) -> Result<()> { let (pre_text, color, bytes) = if stdout { ("stdout: ", Color::Green, task_log.stdout.as_ref().unwrap()) } else { ("stderr: ", Color::Red, task_log.stderr.as_ref().unwrap()) }; println!( "\n{}", style_text(pre_text, Some(color), Some(Attribute::Bold)) ); let mut decompressor = FrameDecoder::new(bytes.as_slice()); let stdout = io::stdout(); let mut write = stdout.lock(); io::copy(&mut decompressor, &mut write)?; Ok(()) } /// Follow the log ouput of running task. /// /// If no task is specified, this will check for the following cases: /// /// - No running task: Print an error that there are no running tasks /// - Single running task: Follow the output of that task /// - Multiple running tasks: Print out the list of possible tasks to follow. pub fn follow_task_logs(pueue_directory: &PathBuf, task_id: usize, stderr: bool) { let (stdout_handle, stderr_handle) = match get_log_file_handles(task_id, &pueue_directory) { Ok((stdout, stderr)) => (stdout, stderr), Err(err) => { println!("Failed to get log file handles: {}", err); return; } }; let mut handle = if stderr { stderr_handle } else { stdout_handle }; let (out_path, err_path) = get_log_paths(task_id, &pueue_directory); let handle_path = if stderr { err_path } else { out_path }; // Stdout handler to directly write log file output to io::stdout // without having to load anything into memory. let mut stdout = io::stdout(); loop { // Check whether the file still exists. Exit if it doesn't. if!handle_path.exists() { println!("File has gone away. Did somebody remove the task?"); return; } // Read the next chunk of text from the last position. if let Err(err) = io::copy(&mut handle, &mut stdout) { println!("Error while reading file: {}", err); return; }; let timeout = Duration::from_millis(100); sleep(timeout); } }
{ println!("{}", message); }
identifier_body
output.rs
use std::io; use std::string::ToString; use std::thread::sleep; use std::time::Duration; use std::{collections::BTreeMap, path::PathBuf}; use anyhow::Result; use comfy_table::presets::UTF8_HORIZONTAL_BORDERS_ONLY; use comfy_table::*; use snap::read::FrameDecoder; use pueue::log::{get_log_file_handles, get_log_paths}; use pueue::network::message::{GroupResponseMessage, TaskLogMessage}; use pueue::settings::Settings; use pueue::state::State; use pueue::task::{Task, TaskResult, TaskStatus}; use crate::cli::SubCommand; use crate::output_helper::*; pub fn print_success(message: &str) { println!("{}", message); } pub fn print_error(message: &str) { let styled = style_text(message, Some(Color::Red), None); println!("{}", styled); } pub fn print_groups(message: GroupResponseMessage) { let mut text = String::new(); let mut group_iter = message.groups.iter().peekable(); while let Some((name, status)) = group_iter.next() { let parallel = *message.settings.get(name).unwrap(); let styled = get_group_headline(name, &status, parallel); text.push_str(&styled); if group_iter.peek().is_some() { text.push('\n'); } } println!("{}", text); } /// Print the current state of the daemon in a nicely formatted table. pub fn print_state(state: State, cli_command: &SubCommand, settings: &Settings) { let (json, group_only) = match cli_command { SubCommand::Status { json, group } => (*json, group.clone()), _ => panic!( "Got wrong Subcommand {:?} in print_state. This shouldn't happen", cli_command ), }; // If the json flag is specified, print the state as json and exit. if json { println!("{}", serde_json::to_string(&state).unwrap()); return; } // Early exit and hint if there are no tasks in the queue if state.tasks.is_empty() { println!("Task list is empty. Add tasks with `pueue add -- [cmd]`"); return; } // Sort all tasks by their respective group; let sorted_tasks = sort_tasks_by_group(&state.tasks); // Always print the default queue at the very top. if group_only.is_none() { let tasks = sorted_tasks.get("default").unwrap(); let headline = get_group_headline( &"default", &state.groups.get("default").unwrap(), *state.settings.daemon.groups.get("default").unwrap(), ); println!("{}", headline); print_table(&tasks, settings); // Add a newline if there are further groups to be printed if sorted_tasks.len() > 1 { println!(); } } let mut sorted_iter = sorted_tasks.iter().peekable(); // Print new table for each group
if group.eq("default") { continue; } // Skip unwanted groups, if a single group is requested if let Some(group_only) = &group_only { if group_only!= group { continue; } } let headline = get_group_headline( &group, &state.groups.get(group).unwrap(), *state.settings.daemon.groups.get(group).unwrap(), ); println!("{}", headline); print_table(&tasks, settings); // Add a newline between groups if sorted_iter.peek().is_some() { println!(); } } } /// Print some tasks into a nicely formatted table fn print_table(tasks: &BTreeMap<usize, Task>, settings: &Settings) { let (has_delayed_tasks, has_dependencies, has_labels) = has_special_columns(tasks); // Create table header row let mut headers = vec![Cell::new("Index"), Cell::new("Status")]; if has_delayed_tasks { headers.push(Cell::new("Enqueue At")); } if has_dependencies { headers.push(Cell::new("Deps")); } headers.push(Cell::new("Exitcode")); if has_labels { headers.push(Cell::new("Label")); } headers.append(&mut vec![ Cell::new("Command"), Cell::new("Path"), Cell::new("Start"), Cell::new("End"), ]); // Initialize comfy table. let mut table = Table::new(); table .set_content_arrangement(ContentArrangement::Dynamic) .load_preset(UTF8_HORIZONTAL_BORDERS_ONLY) .set_header(headers); // Add rows one by one. for (id, task) in tasks { let mut row = Row::new(); if let Some(height) = settings.client.max_status_lines { row.max_height(height); } row.add_cell(Cell::new(&id.to_string())); // Determine the human readable task status representation and the respective color. let status_string = task.status.to_string(); let (status_text, color) = match task.status { TaskStatus::Running => (status_string, Color::Green), TaskStatus::Paused | TaskStatus::Locked => (status_string, Color::White), TaskStatus::Done => match &task.result { Some(TaskResult::Success) => (TaskResult::Success.to_string(), Color::Green), Some(TaskResult::DependencyFailed) => ("Dependency failed".to_string(), Color::Red), Some(TaskResult::FailedToSpawn(_)) => ("Failed to spawn".to_string(), Color::Red), Some(result) => (result.to_string(), Color::Red), None => panic!("Got a 'Done' task without a task result. Please report this bug."), }, _ => (status_string, Color::Yellow), }; row.add_cell(Cell::new(status_text).fg(color)); if has_delayed_tasks { if let Some(enqueue_at) = task.enqueue_at { row.add_cell(Cell::new(enqueue_at.format("%Y-%m-%d\n%H:%M:%S"))); } else { row.add_cell(Cell::new("")); } } if has_dependencies { let text = task .dependencies .iter() .map(|id| id.to_string()) .collect::<Vec<String>>() .join(", "); row.add_cell(Cell::new(text)); } // Match the color of the exit code. // If the exit_code is none, it has been killed by the task handler. let exit_code_cell = match task.result { Some(TaskResult::Success) => Cell::new("0").fg(Color::Green), Some(TaskResult::Failed(code)) => Cell::new(&code.to_string()).fg(Color::Red), _ => Cell::new(""), }; row.add_cell(exit_code_cell); if has_labels { if let Some(label) = &task.label { row.add_cell(label.to_cell()); } else { row.add_cell(Cell::new("")); } } // Add command and path. if settings.client.show_expanded_aliases { row.add_cell(Cell::new(&task.command)); } else { row.add_cell(Cell::new(&task.original_command)); } row.add_cell(Cell::new(&task.path)); // Add start time, if already set. if let Some(start) = task.start { let formatted = start.format("%H:%M").to_string(); row.add_cell(Cell::new(&formatted)); } else { row.add_cell(Cell::new("")); } // Add finish time, if already set. if let Some(end) = task.end { let formatted = end.format("%H:%M").to_string(); row.add_cell(Cell::new(&formatted)); } else { row.add_cell(Cell::new("")); } table.add_row(row); } // Print the table. println!("{}", table); } /// Print the log ouput of finished tasks. /// Either print the logs of every task /// or only print the logs of the specified tasks. pub fn print_logs( mut task_logs: BTreeMap<usize, TaskLogMessage>, cli_command: &SubCommand, settings: &Settings, ) { let (json, task_ids) = match cli_command { SubCommand::Log { json, task_ids } => (*json, task_ids.clone()), _ => panic!( "Got wrong Subcommand {:?} in print_log. This shouldn't happen", cli_command ), }; if json { println!("{}", serde_json::to_string(&task_logs).unwrap()); return; } if task_ids.is_empty() && task_logs.is_empty() { println!("There are no finished tasks"); return; } if!task_ids.is_empty() && task_logs.is_empty() { println!("There are no finished tasks for your specified ids"); return; } let mut task_iter = task_logs.iter_mut().peekable(); while let Some((_, mut task_log)) = task_iter.next() { print_log(&mut task_log, settings); // Add a newline if there is another task that's going to be printed. if let Some((_, task_log)) = task_iter.peek() { if!vec![TaskStatus::Done, TaskStatus::Running, TaskStatus::Paused] .contains(&task_log.task.status) { println!(); } } } } /// Print the log of a single task. pub fn print_log(task_log: &mut TaskLogMessage, settings: &Settings) { let task = &task_log.task; // We only show logs of finished or running tasks. if!vec![TaskStatus::Done, TaskStatus::Running, TaskStatus::Paused].contains(&task.status) { return; } // Print task id and exit code. let task_text = style_text(&format!("Task {}", task.id), None, Some(Attribute::Bold)); let (exit_status, color) = match &task.result { Some(TaskResult::Success) => ("completed successfully".into(), Color::Green), Some(TaskResult::Failed(exit_code)) => { (format!("failed with exit code {}", exit_code), Color::Red) } Some(TaskResult::FailedToSpawn(err)) => (format!("failed to spawn: {}", err), Color::Red), Some(TaskResult::Killed) => ("killed by system or user".into(), Color::Red), Some(TaskResult::DependencyFailed) => ("dependency failed".into(), Color::Red), None => ("running".into(), Color::White), }; let status_text = style_text(&exit_status, Some(color), None); println!("{} {}", task_text, status_text); // Print command and path. println!("Command: {}", task.command); println!("Path: {}", task.path); if let Some(start) = task.start { println!("Start: {}", start.to_rfc2822()); } if let Some(end) = task.end { println!("End: {}", end.to_rfc2822()); } if settings.client.read_local_logs { print_local_log_output(task_log.task.id, settings); } else if task_log.stdout.is_some() && task_log.stderr.is_some() { print_task_output_from_daemon(task_log); } else { println!("Logs requested from pueue daemon, but none received. Please report this bug."); } } /// The daemon didn't send any log output, thereby we didn't request any. /// If that's the case, read the log files from the local pueue directory pub fn print_local_log_output(task_id: usize, settings: &Settings) { let (mut stdout_log, mut stderr_log) = match get_log_file_handles(task_id, &settings.shared.pueue_directory) { Ok((stdout, stderr)) => (stdout, stderr), Err(err) => { println!("Failed to get log file handles: {}", err); return; } }; // Stdout handler to directly write log file output to io::stdout // without having to load anything into memory. let mut stdout = io::stdout(); if let Ok(metadata) = stdout_log.metadata() { if metadata.len()!= 0 { println!( "\n{}", style_text("stdout:", Some(Color::Green), Some(Attribute::Bold)) ); if let Err(err) = io::copy(&mut stdout_log, &mut stdout) { println!("Failed reading local stdout log file: {}", err); }; } } if let Ok(metadata) = stderr_log.metadata() { if metadata.len()!= 0 { // Add a spacer line between stdout and stderr println!( "\n{}", style_text("stderr:", Some(Color::Red), Some(Attribute::Bold)) ); if let Err(err) = io::copy(&mut stderr_log, &mut stdout) { println!("Failed reading local stderr log file: {}", err); }; } } } /// Prints log output received from the daemon. /// We can safely call.unwrap() on stdout and stderr in here, since this /// branch is always called after ensuring that both are `Some`. pub fn print_task_output_from_daemon(task_log: &TaskLogMessage) { // Save whether stdout was printed, so we can add a newline between outputs. if!task_log.stdout.as_ref().unwrap().is_empty() { if let Err(err) = print_remote_task_output(&task_log, true) { println!("Error while parsing stdout: {}", err); } } if!task_log.stderr.as_ref().unwrap().is_empty() { if let Err(err) = print_remote_task_output(&task_log, false) { println!("Error while parsing stderr: {}", err); }; } } /// Print log output of a finished process. pub fn print_remote_task_output(task_log: &TaskLogMessage, stdout: bool) -> Result<()> { let (pre_text, color, bytes) = if stdout { ("stdout: ", Color::Green, task_log.stdout.as_ref().unwrap()) } else { ("stderr: ", Color::Red, task_log.stderr.as_ref().unwrap()) }; println!( "\n{}", style_text(pre_text, Some(color), Some(Attribute::Bold)) ); let mut decompressor = FrameDecoder::new(bytes.as_slice()); let stdout = io::stdout(); let mut write = stdout.lock(); io::copy(&mut decompressor, &mut write)?; Ok(()) } /// Follow the log ouput of running task. /// /// If no task is specified, this will check for the following cases: /// /// - No running task: Print an error that there are no running tasks /// - Single running task: Follow the output of that task /// - Multiple running tasks: Print out the list of possible tasks to follow. pub fn follow_task_logs(pueue_directory: &PathBuf, task_id: usize, stderr: bool) { let (stdout_handle, stderr_handle) = match get_log_file_handles(task_id, &pueue_directory) { Ok((stdout, stderr)) => (stdout, stderr), Err(err) => { println!("Failed to get log file handles: {}", err); return; } }; let mut handle = if stderr { stderr_handle } else { stdout_handle }; let (out_path, err_path) = get_log_paths(task_id, &pueue_directory); let handle_path = if stderr { err_path } else { out_path }; // Stdout handler to directly write log file output to io::stdout // without having to load anything into memory. let mut stdout = io::stdout(); loop { // Check whether the file still exists. Exit if it doesn't. if!handle_path.exists() { println!("File has gone away. Did somebody remove the task?"); return; } // Read the next chunk of text from the last position. if let Err(err) = io::copy(&mut handle, &mut stdout) { println!("Error while reading file: {}", err); return; }; let timeout = Duration::from_millis(100); sleep(timeout); } }
while let Some((group, tasks)) = sorted_iter.next() { // We always want to print the default group at the very top. // That's why we print it outside of this loop and skip it in here.
random_line_split
player.rs
use std::f32; use nalgebra::{norm, zero, Point2, Rotation2, Vector2}; use specs::prelude::*; use specs::storage::BTreeStorage; use defs::{EntityId, GameInfo, PlayerId, PlayerInput, INVALID_ENTITY_ID}; use event::{self, Event}; use game::entity::hook; use game::ComponentType; use physics::collision::{self, CollisionGroups, Cuboid, GeometricQueryType, ShapeHandle}; use physics::interaction; use physics::{AngularVelocity, Drag, Dynamic, InvAngularMass, InvMass, Orientation, Position, Velocity}; use registry::Registry; use repl; pub fn register(reg: &mut Registry) { reg.component::<InputState>(); reg.component::<CurrentInput>(); reg.component::<Player>(); reg.component::<State>(); reg.event::<DashedEvent>(); repl::entity::register_class( reg, "player", &[ ComponentType::Position, ComponentType::Orientation, ComponentType::Player, // TODO: Only send to owner ComponentType::Velocity, ComponentType::AngularVelocity, ComponentType::PlayerInputState, ComponentType::PlayerState, ], build_player, ); interaction::set( reg, "player", "wall", Some(interaction::Action::PreventOverlap { rotate_a: false, rotate_b: false, }), None, ); interaction::set( reg, "player", "test", Some(interaction::Action::PreventOverlap { rotate_a: false, rotate_b: false, }), None, ); // FIXME: Due to a bug in physics sim, other player also gets moved interaction::set( reg, "player", "player", Some(interaction::Action::PreventOverlap { rotate_a: false, rotate_b: false, }), None, ); } pub const NUM_HOOKS: usize = 2; pub const WIDTH: f32 = 40.0; pub const HEIGHT: f32 = 40.0; pub const MOVE_ACCEL: f32 = 3000.0; pub const ROT_ACCEL: f32 = 200.0; pub const MASS: f32 = 50.0; pub const DRAG: f32 = 4.0; pub const SNAP_ANGLE: f32 = f32::consts::PI / 12.0; pub const MAX_ANGULAR_VEL: f32 = f32::consts::PI * 5.0; pub const TAP_SECS: f32 = 0.25; pub const DASH_SECS: f32 = 0.3; pub const DASH_COOLDOWN_SECS: f32 = 2.0; pub const DASH_ACCEL: f32 = 10000.0; #[derive(Debug, Clone, BitStore)] pub struct DashedEvent { /// Different hook colors for drawing. pub hook_index: u32, } impl Event for DashedEvent { fn class(&self) -> event::Class { event::Class::Order } } /// Component that is attached whenever player input should be executed for an entity. #[derive(Component, Clone, Debug)] #[storage(BTreeStorage)] pub struct CurrentInput(pub PlayerInput, [bool; NUM_TAP_KEYS]); impl CurrentInput { fn new(input: PlayerInput) -> CurrentInput { CurrentInput(input, [false; NUM_TAP_KEYS]) } } // Tappable keys const MOVE_FORWARD_KEY: usize = 0; const MOVE_BACKWARD_KEY: usize = 1; const MOVE_LEFT_KEY: usize = 2; const MOVE_RIGHT_KEY: usize = 3; const NUM_TAP_KEYS: usize = 4; #[derive(PartialEq, Clone, Copy, Debug, Default, BitStore)] struct TapState { secs_left: f32, } #[derive(Component, PartialEq, Clone, Copy, Debug, Default, BitStore)] #[storage(BTreeStorage)] pub struct InputState { previous_shoot_one: bool, previous_shoot_two: bool, previous_tap_input: [bool; NUM_TAP_KEYS], tap_state: [TapState; NUM_TAP_KEYS], } impl repl::Component for InputState {} #[derive(Component, PartialEq, Clone, Copy, Debug, BitStore)] #[storage(BTreeStorage)] pub struct Player { pub hooks: [EntityId; NUM_HOOKS], } impl repl::Component for Player { const STATIC: bool = true; } #[derive(PartialEq, Clone, Copy, Debug, BitStore)] pub struct DashState { pub direction: [f32; 2], pub secs_left: f32, } #[derive(Component, PartialEq, Clone, Copy, Debug, Default, BitStore)] #[storage(BTreeStorage)] pub struct State { pub dash_cooldown_secs: f32, pub dash_state: Option<DashState>, } impl repl::Component for State {} impl State { pub fn dash(&mut self, direction: Vector2<f32>) { if self.dash_cooldown_secs == 0.0 { self.dash_cooldown_secs = DASH_COOLDOWN_SECS; self.dash_state = Some(DashState { direction: [direction.x, direction.y], secs_left: DASH_SECS, }); } } pub fn update_dash(&mut self, dt: f32) { self.dash_cooldown_secs -= dt; if self.dash_cooldown_secs < 0.0 { self.dash_cooldown_secs = 0.0; } self.dash_state = self.dash_state.as_ref().and_then(|dash_state| { let secs_left = dash_state.secs_left - dt; if secs_left <= 0.0 { None } else { Some(DashState { secs_left, ..*dash_state }) } }); } } pub fn run_input( world: &mut World, inputs: &[(PlayerId, PlayerInput, Entity)], ) -> Result<(), repl::Error> { // Update hooks for &(_, ref input, entity) in inputs { let player = *repl::try(&world.read::<Player>(), entity)?; let input_state = *repl::try(&world.read::<InputState>(), entity)?; for i in 0..NUM_HOOKS { let hook_entity = repl::try_id_to_entity(world, player.hooks[i])?; let hook_input = hook::CurrentInput { rot_angle: input.rot_angle, shoot: if i == 0 { input.shoot_one } else { input.shoot_two }, previous_shoot: if i == 0 { input_state.previous_shoot_one } else { input_state.previous_shoot_two }, pull: if i == 0 { input.pull_one } else { input.pull_two }, }; world .write::<hook::CurrentInput>() .insert(hook_entity, hook_input); } } hook::run_input(&world)?; // Update player for &(_, ref input, entity) in inputs { world .write::<CurrentInput>() .insert(entity, CurrentInput::new(input.clone())); } InputSys.run_now(&world.res); Ok(()) } pub fn
( world: &mut World, _inputs: &[(PlayerId, PlayerInput, Entity)], ) -> Result<(), repl::Error> { hook::run_input_post_sim(&world)?; world.write::<hook::CurrentInput>().clear(); world.write::<CurrentInput>().clear(); Ok(()) } pub mod auth { use super::*; pub fn create(world: &mut World, owner: PlayerId, pos: Point2<f32>) -> (EntityId, Entity) { let (id, entity) = repl::entity::auth::create(world, owner, "player", |builder| { builder.with(Position(pos)) }); let mut hooks = [INVALID_ENTITY_ID; NUM_HOOKS]; for (i, hook) in hooks.iter_mut().enumerate() { let (hook_id, _) = hook::auth::create(world, id, i as u32); *hook = hook_id; } // Now that we have created our hooks, attach the player definition world.write::<Player>().insert(entity, Player { hooks }); (id, entity) } } fn build_player(builder: EntityBuilder) -> EntityBuilder { let shape = Cuboid::new(Vector2::new(WIDTH / 2.0, HEIGHT / 2.0)); let mut groups = CollisionGroups::new(); groups.set_membership(&[collision::GROUP_PLAYER]); groups.set_whitelist(&[ collision::GROUP_PLAYER, collision::GROUP_WALL, collision::GROUP_PLAYER_ENTITY, collision::GROUP_NEUTRAL, ]); let query_type = GeometricQueryType::Contacts(0.0, 0.0); // TODO: Velocity (and Dynamic?) component should be added only for owners builder .with(Orientation(0.0)) .with(Velocity(zero())) .with(AngularVelocity(0.0)) .with(InvMass(1.0 / MASS)) .with(InvAngularMass(1.0 / 10.0)) .with(Dynamic) .with(Drag(DRAG)) .with(collision::Shape(ShapeHandle::new(shape))) .with(collision::Object { groups, query_type }) .with(InputState::default()) .with(State::default()) } #[derive(SystemData)] struct InputData<'a> { game_info: Fetch<'a, GameInfo>, input: WriteStorage<'a, CurrentInput>, orientation: WriteStorage<'a, Orientation>, velocity: WriteStorage<'a, Velocity>, angular_velocity: WriteStorage<'a, AngularVelocity>, state: WriteStorage<'a, State>, input_state: WriteStorage<'a, InputState>, } struct InputSys; impl<'a> System<'a> for InputSys { type SystemData = InputData<'a>; fn run(&mut self, mut data: InputData<'a>) { let dt = data.game_info.tick_duration_secs(); // Update tap state for (mut input, input_state) in (&mut data.input, &mut data.input_state).join() { let tap_input = [ input.0.move_forward, input.0.move_backward, input.0.move_left, input.0.move_right, ]; for i in 0..NUM_TAP_KEYS { if tap_input[i] &&!input_state.previous_tap_input[i] { if input_state.tap_state[i].secs_left > 0.0 { input.1[i] = true; input_state.tap_state[i].secs_left = 0.0; } else { input_state.tap_state[i].secs_left = TAP_SECS; } } input_state.tap_state[i].secs_left -= dt; if input_state.tap_state[i].secs_left < 0.0 { input_state.tap_state[i].secs_left = 0.0; } input_state.previous_tap_input[i] = tap_input[i]; } } // Movement for (input, orientation, velocity, angular_velocity, state) in ( &data.input, &mut data.orientation, &mut data.velocity, &mut data.angular_velocity, &mut data.state, ).join() { // Dashing let forward = Rotation2::new(orientation.0).matrix() * Vector2::new(1.0, 0.0); let right = Vector2::new(-forward.y, forward.x); if input.1[MOVE_FORWARD_KEY] { state.dash(forward); } if input.1[MOVE_BACKWARD_KEY] { state.dash(-forward); } if input.1[MOVE_RIGHT_KEY] { state.dash(right); } if input.1[MOVE_LEFT_KEY] { state.dash(-right); } state.update_dash(dt); if let Some(dash_state) = state.dash_state.as_ref() { velocity.0 += Vector2::new(dash_state.direction[0], dash_state.direction[1]) * DASH_ACCEL * dt; continue; } /*if input.0.rot_angle!= orientation.0 { // TODO: Only mutate if changed orientation.0 = input.0.rot_angle; }*/ let diff = (input.0.rot_angle - orientation.0 + f32::consts::PI) % (2.0 * f32::consts::PI) - f32::consts::PI; let smallest_angle = if diff < -f32::consts::PI { diff + 2.0 * f32::consts::PI } else { diff }; if smallest_angle.abs() <= SNAP_ANGLE { orientation.0 = input.0.rot_angle; } else if smallest_angle < 0.0 { angular_velocity.0 -= ROT_ACCEL * dt; } else if smallest_angle > 0.0 { angular_velocity.0 += ROT_ACCEL * dt; } if angular_velocity.0.abs() > MAX_ANGULAR_VEL { angular_velocity.0 = angular_velocity.0.signum() * MAX_ANGULAR_VEL; } let forward = Rotation2::new(orientation.0).matrix() * Vector2::new(1.0, 0.0); let right = Vector2::new(-forward.y, forward.x); let mut direction = Vector2::new(0.0, 0.0); if input.0.move_forward { direction += forward; } if input.0.move_backward { direction -= forward; } if input.0.move_right { direction += right; } if input.0.move_left { direction -= right; } let direction_norm = norm(&direction); if direction_norm > 0.0 { velocity.0 += direction / direction_norm * MOVE_ACCEL * dt; //velocity.0 += direction / direction_norm * 25.0; } } // Remember some input state for (input, input_state) in (&data.input, &mut data.input_state).join() { input_state.previous_shoot_one = input.0.shoot_one; input_state.previous_shoot_two = input.0.shoot_two; } } }
run_input_post_sim
identifier_name
player.rs
use std::f32; use nalgebra::{norm, zero, Point2, Rotation2, Vector2}; use specs::prelude::*; use specs::storage::BTreeStorage; use defs::{EntityId, GameInfo, PlayerId, PlayerInput, INVALID_ENTITY_ID}; use event::{self, Event}; use game::entity::hook; use game::ComponentType; use physics::collision::{self, CollisionGroups, Cuboid, GeometricQueryType, ShapeHandle}; use physics::interaction; use physics::{AngularVelocity, Drag, Dynamic, InvAngularMass, InvMass, Orientation, Position, Velocity}; use registry::Registry; use repl; pub fn register(reg: &mut Registry) { reg.component::<InputState>(); reg.component::<CurrentInput>(); reg.component::<Player>(); reg.component::<State>(); reg.event::<DashedEvent>(); repl::entity::register_class( reg, "player", &[ ComponentType::Position, ComponentType::Orientation, ComponentType::Player, // TODO: Only send to owner ComponentType::Velocity, ComponentType::AngularVelocity, ComponentType::PlayerInputState, ComponentType::PlayerState, ], build_player, ); interaction::set( reg, "player", "wall", Some(interaction::Action::PreventOverlap { rotate_a: false, rotate_b: false, }), None, ); interaction::set( reg, "player", "test", Some(interaction::Action::PreventOverlap { rotate_a: false, rotate_b: false, }), None, ); // FIXME: Due to a bug in physics sim, other player also gets moved interaction::set( reg, "player", "player", Some(interaction::Action::PreventOverlap { rotate_a: false, rotate_b: false, }), None, ); } pub const NUM_HOOKS: usize = 2; pub const WIDTH: f32 = 40.0; pub const HEIGHT: f32 = 40.0; pub const MOVE_ACCEL: f32 = 3000.0; pub const ROT_ACCEL: f32 = 200.0; pub const MASS: f32 = 50.0; pub const DRAG: f32 = 4.0; pub const SNAP_ANGLE: f32 = f32::consts::PI / 12.0; pub const MAX_ANGULAR_VEL: f32 = f32::consts::PI * 5.0; pub const TAP_SECS: f32 = 0.25; pub const DASH_SECS: f32 = 0.3; pub const DASH_COOLDOWN_SECS: f32 = 2.0; pub const DASH_ACCEL: f32 = 10000.0; #[derive(Debug, Clone, BitStore)] pub struct DashedEvent { /// Different hook colors for drawing. pub hook_index: u32, } impl Event for DashedEvent { fn class(&self) -> event::Class { event::Class::Order } } /// Component that is attached whenever player input should be executed for an entity. #[derive(Component, Clone, Debug)] #[storage(BTreeStorage)] pub struct CurrentInput(pub PlayerInput, [bool; NUM_TAP_KEYS]); impl CurrentInput { fn new(input: PlayerInput) -> CurrentInput { CurrentInput(input, [false; NUM_TAP_KEYS]) } } // Tappable keys const MOVE_FORWARD_KEY: usize = 0; const MOVE_BACKWARD_KEY: usize = 1; const MOVE_LEFT_KEY: usize = 2; const MOVE_RIGHT_KEY: usize = 3; const NUM_TAP_KEYS: usize = 4; #[derive(PartialEq, Clone, Copy, Debug, Default, BitStore)] struct TapState { secs_left: f32, } #[derive(Component, PartialEq, Clone, Copy, Debug, Default, BitStore)] #[storage(BTreeStorage)] pub struct InputState { previous_shoot_one: bool, previous_shoot_two: bool, previous_tap_input: [bool; NUM_TAP_KEYS], tap_state: [TapState; NUM_TAP_KEYS], } impl repl::Component for InputState {} #[derive(Component, PartialEq, Clone, Copy, Debug, BitStore)] #[storage(BTreeStorage)] pub struct Player { pub hooks: [EntityId; NUM_HOOKS], } impl repl::Component for Player { const STATIC: bool = true; } #[derive(PartialEq, Clone, Copy, Debug, BitStore)] pub struct DashState { pub direction: [f32; 2], pub secs_left: f32, } #[derive(Component, PartialEq, Clone, Copy, Debug, Default, BitStore)] #[storage(BTreeStorage)] pub struct State { pub dash_cooldown_secs: f32, pub dash_state: Option<DashState>, } impl repl::Component for State {} impl State { pub fn dash(&mut self, direction: Vector2<f32>) { if self.dash_cooldown_secs == 0.0 { self.dash_cooldown_secs = DASH_COOLDOWN_SECS; self.dash_state = Some(DashState { direction: [direction.x, direction.y], secs_left: DASH_SECS, }); } } pub fn update_dash(&mut self, dt: f32) { self.dash_cooldown_secs -= dt; if self.dash_cooldown_secs < 0.0 { self.dash_cooldown_secs = 0.0; } self.dash_state = self.dash_state.as_ref().and_then(|dash_state| { let secs_left = dash_state.secs_left - dt; if secs_left <= 0.0 { None } else { Some(DashState { secs_left, ..*dash_state }) } }); } } pub fn run_input( world: &mut World, inputs: &[(PlayerId, PlayerInput, Entity)], ) -> Result<(), repl::Error> { // Update hooks for &(_, ref input, entity) in inputs { let player = *repl::try(&world.read::<Player>(), entity)?; let input_state = *repl::try(&world.read::<InputState>(), entity)?; for i in 0..NUM_HOOKS { let hook_entity = repl::try_id_to_entity(world, player.hooks[i])?; let hook_input = hook::CurrentInput { rot_angle: input.rot_angle, shoot: if i == 0 { input.shoot_one } else { input.shoot_two }, previous_shoot: if i == 0 { input_state.previous_shoot_one } else { input_state.previous_shoot_two }, pull: if i == 0 { input.pull_one } else { input.pull_two }, }; world .write::<hook::CurrentInput>() .insert(hook_entity, hook_input); } } hook::run_input(&world)?; // Update player for &(_, ref input, entity) in inputs { world .write::<CurrentInput>() .insert(entity, CurrentInput::new(input.clone())); } InputSys.run_now(&world.res); Ok(()) } pub fn run_input_post_sim( world: &mut World, _inputs: &[(PlayerId, PlayerInput, Entity)], ) -> Result<(), repl::Error>
pub mod auth { use super::*; pub fn create(world: &mut World, owner: PlayerId, pos: Point2<f32>) -> (EntityId, Entity) { let (id, entity) = repl::entity::auth::create(world, owner, "player", |builder| { builder.with(Position(pos)) }); let mut hooks = [INVALID_ENTITY_ID; NUM_HOOKS]; for (i, hook) in hooks.iter_mut().enumerate() { let (hook_id, _) = hook::auth::create(world, id, i as u32); *hook = hook_id; } // Now that we have created our hooks, attach the player definition world.write::<Player>().insert(entity, Player { hooks }); (id, entity) } } fn build_player(builder: EntityBuilder) -> EntityBuilder { let shape = Cuboid::new(Vector2::new(WIDTH / 2.0, HEIGHT / 2.0)); let mut groups = CollisionGroups::new(); groups.set_membership(&[collision::GROUP_PLAYER]); groups.set_whitelist(&[ collision::GROUP_PLAYER, collision::GROUP_WALL, collision::GROUP_PLAYER_ENTITY, collision::GROUP_NEUTRAL, ]); let query_type = GeometricQueryType::Contacts(0.0, 0.0); // TODO: Velocity (and Dynamic?) component should be added only for owners builder .with(Orientation(0.0)) .with(Velocity(zero())) .with(AngularVelocity(0.0)) .with(InvMass(1.0 / MASS)) .with(InvAngularMass(1.0 / 10.0)) .with(Dynamic) .with(Drag(DRAG)) .with(collision::Shape(ShapeHandle::new(shape))) .with(collision::Object { groups, query_type }) .with(InputState::default()) .with(State::default()) } #[derive(SystemData)] struct InputData<'a> { game_info: Fetch<'a, GameInfo>, input: WriteStorage<'a, CurrentInput>, orientation: WriteStorage<'a, Orientation>, velocity: WriteStorage<'a, Velocity>, angular_velocity: WriteStorage<'a, AngularVelocity>, state: WriteStorage<'a, State>, input_state: WriteStorage<'a, InputState>, } struct InputSys; impl<'a> System<'a> for InputSys { type SystemData = InputData<'a>; fn run(&mut self, mut data: InputData<'a>) { let dt = data.game_info.tick_duration_secs(); // Update tap state for (mut input, input_state) in (&mut data.input, &mut data.input_state).join() { let tap_input = [ input.0.move_forward, input.0.move_backward, input.0.move_left, input.0.move_right, ]; for i in 0..NUM_TAP_KEYS { if tap_input[i] &&!input_state.previous_tap_input[i] { if input_state.tap_state[i].secs_left > 0.0 { input.1[i] = true; input_state.tap_state[i].secs_left = 0.0; } else { input_state.tap_state[i].secs_left = TAP_SECS; } } input_state.tap_state[i].secs_left -= dt; if input_state.tap_state[i].secs_left < 0.0 { input_state.tap_state[i].secs_left = 0.0; } input_state.previous_tap_input[i] = tap_input[i]; } } // Movement for (input, orientation, velocity, angular_velocity, state) in ( &data.input, &mut data.orientation, &mut data.velocity, &mut data.angular_velocity, &mut data.state, ).join() { // Dashing let forward = Rotation2::new(orientation.0).matrix() * Vector2::new(1.0, 0.0); let right = Vector2::new(-forward.y, forward.x); if input.1[MOVE_FORWARD_KEY] { state.dash(forward); } if input.1[MOVE_BACKWARD_KEY] { state.dash(-forward); } if input.1[MOVE_RIGHT_KEY] { state.dash(right); } if input.1[MOVE_LEFT_KEY] { state.dash(-right); } state.update_dash(dt); if let Some(dash_state) = state.dash_state.as_ref() { velocity.0 += Vector2::new(dash_state.direction[0], dash_state.direction[1]) * DASH_ACCEL * dt; continue; } /*if input.0.rot_angle!= orientation.0 { // TODO: Only mutate if changed orientation.0 = input.0.rot_angle; }*/ let diff = (input.0.rot_angle - orientation.0 + f32::consts::PI) % (2.0 * f32::consts::PI) - f32::consts::PI; let smallest_angle = if diff < -f32::consts::PI { diff + 2.0 * f32::consts::PI } else { diff }; if smallest_angle.abs() <= SNAP_ANGLE { orientation.0 = input.0.rot_angle; } else if smallest_angle < 0.0 { angular_velocity.0 -= ROT_ACCEL * dt; } else if smallest_angle > 0.0 { angular_velocity.0 += ROT_ACCEL * dt; } if angular_velocity.0.abs() > MAX_ANGULAR_VEL { angular_velocity.0 = angular_velocity.0.signum() * MAX_ANGULAR_VEL; } let forward = Rotation2::new(orientation.0).matrix() * Vector2::new(1.0, 0.0); let right = Vector2::new(-forward.y, forward.x); let mut direction = Vector2::new(0.0, 0.0); if input.0.move_forward { direction += forward; } if input.0.move_backward { direction -= forward; } if input.0.move_right { direction += right; } if input.0.move_left { direction -= right; } let direction_norm = norm(&direction); if direction_norm > 0.0 { velocity.0 += direction / direction_norm * MOVE_ACCEL * dt; //velocity.0 += direction / direction_norm * 25.0; } } // Remember some input state for (input, input_state) in (&data.input, &mut data.input_state).join() { input_state.previous_shoot_one = input.0.shoot_one; input_state.previous_shoot_two = input.0.shoot_two; } } }
{ hook::run_input_post_sim(&world)?; world.write::<hook::CurrentInput>().clear(); world.write::<CurrentInput>().clear(); Ok(()) }
identifier_body
player.rs
use std::f32; use nalgebra::{norm, zero, Point2, Rotation2, Vector2}; use specs::prelude::*; use specs::storage::BTreeStorage; use defs::{EntityId, GameInfo, PlayerId, PlayerInput, INVALID_ENTITY_ID}; use event::{self, Event}; use game::entity::hook; use game::ComponentType; use physics::collision::{self, CollisionGroups, Cuboid, GeometricQueryType, ShapeHandle}; use physics::interaction; use physics::{AngularVelocity, Drag, Dynamic, InvAngularMass, InvMass, Orientation, Position, Velocity}; use registry::Registry; use repl; pub fn register(reg: &mut Registry) { reg.component::<InputState>(); reg.component::<CurrentInput>(); reg.component::<Player>(); reg.component::<State>(); reg.event::<DashedEvent>(); repl::entity::register_class( reg, "player", &[ ComponentType::Position, ComponentType::Orientation, ComponentType::Player, // TODO: Only send to owner ComponentType::Velocity, ComponentType::AngularVelocity, ComponentType::PlayerInputState, ComponentType::PlayerState, ], build_player, ); interaction::set( reg, "player", "wall", Some(interaction::Action::PreventOverlap { rotate_a: false, rotate_b: false, }), None, ); interaction::set( reg, "player", "test", Some(interaction::Action::PreventOverlap { rotate_a: false, rotate_b: false, }), None, ); // FIXME: Due to a bug in physics sim, other player also gets moved interaction::set( reg, "player", "player", Some(interaction::Action::PreventOverlap { rotate_a: false, rotate_b: false, }), None, ); } pub const NUM_HOOKS: usize = 2; pub const WIDTH: f32 = 40.0; pub const HEIGHT: f32 = 40.0; pub const MOVE_ACCEL: f32 = 3000.0; pub const ROT_ACCEL: f32 = 200.0; pub const MASS: f32 = 50.0; pub const DRAG: f32 = 4.0; pub const SNAP_ANGLE: f32 = f32::consts::PI / 12.0; pub const MAX_ANGULAR_VEL: f32 = f32::consts::PI * 5.0; pub const TAP_SECS: f32 = 0.25; pub const DASH_SECS: f32 = 0.3; pub const DASH_COOLDOWN_SECS: f32 = 2.0; pub const DASH_ACCEL: f32 = 10000.0; #[derive(Debug, Clone, BitStore)] pub struct DashedEvent { /// Different hook colors for drawing. pub hook_index: u32, } impl Event for DashedEvent { fn class(&self) -> event::Class { event::Class::Order } } /// Component that is attached whenever player input should be executed for an entity. #[derive(Component, Clone, Debug)] #[storage(BTreeStorage)] pub struct CurrentInput(pub PlayerInput, [bool; NUM_TAP_KEYS]); impl CurrentInput { fn new(input: PlayerInput) -> CurrentInput { CurrentInput(input, [false; NUM_TAP_KEYS]) } } // Tappable keys const MOVE_FORWARD_KEY: usize = 0; const MOVE_BACKWARD_KEY: usize = 1; const MOVE_LEFT_KEY: usize = 2; const MOVE_RIGHT_KEY: usize = 3; const NUM_TAP_KEYS: usize = 4; #[derive(PartialEq, Clone, Copy, Debug, Default, BitStore)] struct TapState { secs_left: f32, } #[derive(Component, PartialEq, Clone, Copy, Debug, Default, BitStore)] #[storage(BTreeStorage)] pub struct InputState { previous_shoot_one: bool, previous_shoot_two: bool, previous_tap_input: [bool; NUM_TAP_KEYS], tap_state: [TapState; NUM_TAP_KEYS], } impl repl::Component for InputState {} #[derive(Component, PartialEq, Clone, Copy, Debug, BitStore)] #[storage(BTreeStorage)] pub struct Player { pub hooks: [EntityId; NUM_HOOKS], } impl repl::Component for Player { const STATIC: bool = true; } #[derive(PartialEq, Clone, Copy, Debug, BitStore)] pub struct DashState { pub direction: [f32; 2], pub secs_left: f32, } #[derive(Component, PartialEq, Clone, Copy, Debug, Default, BitStore)] #[storage(BTreeStorage)] pub struct State { pub dash_cooldown_secs: f32, pub dash_state: Option<DashState>, } impl repl::Component for State {} impl State { pub fn dash(&mut self, direction: Vector2<f32>) { if self.dash_cooldown_secs == 0.0 { self.dash_cooldown_secs = DASH_COOLDOWN_SECS; self.dash_state = Some(DashState { direction: [direction.x, direction.y], secs_left: DASH_SECS, }); } } pub fn update_dash(&mut self, dt: f32) { self.dash_cooldown_secs -= dt; if self.dash_cooldown_secs < 0.0 { self.dash_cooldown_secs = 0.0; } self.dash_state = self.dash_state.as_ref().and_then(|dash_state| { let secs_left = dash_state.secs_left - dt; if secs_left <= 0.0 { None } else { Some(DashState { secs_left, ..*dash_state }) } }); } } pub fn run_input( world: &mut World, inputs: &[(PlayerId, PlayerInput, Entity)], ) -> Result<(), repl::Error> { // Update hooks for &(_, ref input, entity) in inputs { let player = *repl::try(&world.read::<Player>(), entity)?; let input_state = *repl::try(&world.read::<InputState>(), entity)?; for i in 0..NUM_HOOKS { let hook_entity = repl::try_id_to_entity(world, player.hooks[i])?; let hook_input = hook::CurrentInput { rot_angle: input.rot_angle, shoot: if i == 0 { input.shoot_one } else { input.shoot_two }, previous_shoot: if i == 0 { input_state.previous_shoot_one } else { input_state.previous_shoot_two }, pull: if i == 0 { input.pull_one } else { input.pull_two }, }; world .write::<hook::CurrentInput>() .insert(hook_entity, hook_input); } } hook::run_input(&world)?; // Update player for &(_, ref input, entity) in inputs { world .write::<CurrentInput>() .insert(entity, CurrentInput::new(input.clone())); } InputSys.run_now(&world.res); Ok(()) } pub fn run_input_post_sim( world: &mut World, _inputs: &[(PlayerId, PlayerInput, Entity)], ) -> Result<(), repl::Error> { hook::run_input_post_sim(&world)?; world.write::<hook::CurrentInput>().clear(); world.write::<CurrentInput>().clear(); Ok(()) } pub mod auth { use super::*; pub fn create(world: &mut World, owner: PlayerId, pos: Point2<f32>) -> (EntityId, Entity) { let (id, entity) = repl::entity::auth::create(world, owner, "player", |builder| { builder.with(Position(pos)) }); let mut hooks = [INVALID_ENTITY_ID; NUM_HOOKS]; for (i, hook) in hooks.iter_mut().enumerate() { let (hook_id, _) = hook::auth::create(world, id, i as u32); *hook = hook_id; } // Now that we have created our hooks, attach the player definition world.write::<Player>().insert(entity, Player { hooks }); (id, entity) } } fn build_player(builder: EntityBuilder) -> EntityBuilder { let shape = Cuboid::new(Vector2::new(WIDTH / 2.0, HEIGHT / 2.0)); let mut groups = CollisionGroups::new(); groups.set_membership(&[collision::GROUP_PLAYER]); groups.set_whitelist(&[ collision::GROUP_PLAYER, collision::GROUP_WALL, collision::GROUP_PLAYER_ENTITY, collision::GROUP_NEUTRAL, ]); let query_type = GeometricQueryType::Contacts(0.0, 0.0); // TODO: Velocity (and Dynamic?) component should be added only for owners builder .with(Orientation(0.0)) .with(Velocity(zero())) .with(AngularVelocity(0.0)) .with(InvMass(1.0 / MASS)) .with(InvAngularMass(1.0 / 10.0)) .with(Dynamic) .with(Drag(DRAG)) .with(collision::Shape(ShapeHandle::new(shape))) .with(collision::Object { groups, query_type }) .with(InputState::default()) .with(State::default()) } #[derive(SystemData)] struct InputData<'a> { game_info: Fetch<'a, GameInfo>, input: WriteStorage<'a, CurrentInput>, orientation: WriteStorage<'a, Orientation>, velocity: WriteStorage<'a, Velocity>, angular_velocity: WriteStorage<'a, AngularVelocity>, state: WriteStorage<'a, State>, input_state: WriteStorage<'a, InputState>, } struct InputSys; impl<'a> System<'a> for InputSys { type SystemData = InputData<'a>; fn run(&mut self, mut data: InputData<'a>) { let dt = data.game_info.tick_duration_secs(); // Update tap state for (mut input, input_state) in (&mut data.input, &mut data.input_state).join() { let tap_input = [ input.0.move_forward, input.0.move_backward, input.0.move_left, input.0.move_right, ]; for i in 0..NUM_TAP_KEYS { if tap_input[i] &&!input_state.previous_tap_input[i] { if input_state.tap_state[i].secs_left > 0.0 { input.1[i] = true; input_state.tap_state[i].secs_left = 0.0; } else { input_state.tap_state[i].secs_left = TAP_SECS; } } input_state.tap_state[i].secs_left -= dt; if input_state.tap_state[i].secs_left < 0.0 { input_state.tap_state[i].secs_left = 0.0; } input_state.previous_tap_input[i] = tap_input[i]; } } // Movement for (input, orientation, velocity, angular_velocity, state) in ( &data.input, &mut data.orientation, &mut data.velocity, &mut data.angular_velocity, &mut data.state, ).join() { // Dashing let forward = Rotation2::new(orientation.0).matrix() * Vector2::new(1.0, 0.0); let right = Vector2::new(-forward.y, forward.x); if input.1[MOVE_FORWARD_KEY] { state.dash(forward); } if input.1[MOVE_BACKWARD_KEY]
if input.1[MOVE_RIGHT_KEY] { state.dash(right); } if input.1[MOVE_LEFT_KEY] { state.dash(-right); } state.update_dash(dt); if let Some(dash_state) = state.dash_state.as_ref() { velocity.0 += Vector2::new(dash_state.direction[0], dash_state.direction[1]) * DASH_ACCEL * dt; continue; } /*if input.0.rot_angle!= orientation.0 { // TODO: Only mutate if changed orientation.0 = input.0.rot_angle; }*/ let diff = (input.0.rot_angle - orientation.0 + f32::consts::PI) % (2.0 * f32::consts::PI) - f32::consts::PI; let smallest_angle = if diff < -f32::consts::PI { diff + 2.0 * f32::consts::PI } else { diff }; if smallest_angle.abs() <= SNAP_ANGLE { orientation.0 = input.0.rot_angle; } else if smallest_angle < 0.0 { angular_velocity.0 -= ROT_ACCEL * dt; } else if smallest_angle > 0.0 { angular_velocity.0 += ROT_ACCEL * dt; } if angular_velocity.0.abs() > MAX_ANGULAR_VEL { angular_velocity.0 = angular_velocity.0.signum() * MAX_ANGULAR_VEL; } let forward = Rotation2::new(orientation.0).matrix() * Vector2::new(1.0, 0.0); let right = Vector2::new(-forward.y, forward.x); let mut direction = Vector2::new(0.0, 0.0); if input.0.move_forward { direction += forward; } if input.0.move_backward { direction -= forward; } if input.0.move_right { direction += right; } if input.0.move_left { direction -= right; } let direction_norm = norm(&direction); if direction_norm > 0.0 { velocity.0 += direction / direction_norm * MOVE_ACCEL * dt; //velocity.0 += direction / direction_norm * 25.0; } } // Remember some input state for (input, input_state) in (&data.input, &mut data.input_state).join() { input_state.previous_shoot_one = input.0.shoot_one; input_state.previous_shoot_two = input.0.shoot_two; } } }
{ state.dash(-forward); }
conditional_block
player.rs
use std::f32; use nalgebra::{norm, zero, Point2, Rotation2, Vector2}; use specs::prelude::*; use specs::storage::BTreeStorage; use defs::{EntityId, GameInfo, PlayerId, PlayerInput, INVALID_ENTITY_ID}; use event::{self, Event}; use game::entity::hook; use game::ComponentType; use physics::collision::{self, CollisionGroups, Cuboid, GeometricQueryType, ShapeHandle}; use physics::interaction; use physics::{AngularVelocity, Drag, Dynamic, InvAngularMass, InvMass, Orientation, Position, Velocity}; use registry::Registry; use repl; pub fn register(reg: &mut Registry) { reg.component::<InputState>(); reg.component::<CurrentInput>(); reg.component::<Player>(); reg.component::<State>(); reg.event::<DashedEvent>(); repl::entity::register_class( reg, "player", &[ ComponentType::Position, ComponentType::Orientation, ComponentType::Player, // TODO: Only send to owner ComponentType::Velocity, ComponentType::AngularVelocity, ComponentType::PlayerInputState, ComponentType::PlayerState, ], build_player, ); interaction::set( reg, "player", "wall", Some(interaction::Action::PreventOverlap { rotate_a: false, rotate_b: false, }), None, ); interaction::set( reg, "player", "test", Some(interaction::Action::PreventOverlap { rotate_a: false, rotate_b: false, }), None, ); // FIXME: Due to a bug in physics sim, other player also gets moved interaction::set( reg, "player", "player", Some(interaction::Action::PreventOverlap { rotate_a: false, rotate_b: false, }), None, ); } pub const NUM_HOOKS: usize = 2; pub const WIDTH: f32 = 40.0; pub const HEIGHT: f32 = 40.0; pub const MOVE_ACCEL: f32 = 3000.0; pub const ROT_ACCEL: f32 = 200.0; pub const MASS: f32 = 50.0; pub const DRAG: f32 = 4.0; pub const SNAP_ANGLE: f32 = f32::consts::PI / 12.0; pub const MAX_ANGULAR_VEL: f32 = f32::consts::PI * 5.0; pub const TAP_SECS: f32 = 0.25; pub const DASH_SECS: f32 = 0.3; pub const DASH_COOLDOWN_SECS: f32 = 2.0; pub const DASH_ACCEL: f32 = 10000.0; #[derive(Debug, Clone, BitStore)] pub struct DashedEvent { /// Different hook colors for drawing. pub hook_index: u32, }
fn class(&self) -> event::Class { event::Class::Order } } /// Component that is attached whenever player input should be executed for an entity. #[derive(Component, Clone, Debug)] #[storage(BTreeStorage)] pub struct CurrentInput(pub PlayerInput, [bool; NUM_TAP_KEYS]); impl CurrentInput { fn new(input: PlayerInput) -> CurrentInput { CurrentInput(input, [false; NUM_TAP_KEYS]) } } // Tappable keys const MOVE_FORWARD_KEY: usize = 0; const MOVE_BACKWARD_KEY: usize = 1; const MOVE_LEFT_KEY: usize = 2; const MOVE_RIGHT_KEY: usize = 3; const NUM_TAP_KEYS: usize = 4; #[derive(PartialEq, Clone, Copy, Debug, Default, BitStore)] struct TapState { secs_left: f32, } #[derive(Component, PartialEq, Clone, Copy, Debug, Default, BitStore)] #[storage(BTreeStorage)] pub struct InputState { previous_shoot_one: bool, previous_shoot_two: bool, previous_tap_input: [bool; NUM_TAP_KEYS], tap_state: [TapState; NUM_TAP_KEYS], } impl repl::Component for InputState {} #[derive(Component, PartialEq, Clone, Copy, Debug, BitStore)] #[storage(BTreeStorage)] pub struct Player { pub hooks: [EntityId; NUM_HOOKS], } impl repl::Component for Player { const STATIC: bool = true; } #[derive(PartialEq, Clone, Copy, Debug, BitStore)] pub struct DashState { pub direction: [f32; 2], pub secs_left: f32, } #[derive(Component, PartialEq, Clone, Copy, Debug, Default, BitStore)] #[storage(BTreeStorage)] pub struct State { pub dash_cooldown_secs: f32, pub dash_state: Option<DashState>, } impl repl::Component for State {} impl State { pub fn dash(&mut self, direction: Vector2<f32>) { if self.dash_cooldown_secs == 0.0 { self.dash_cooldown_secs = DASH_COOLDOWN_SECS; self.dash_state = Some(DashState { direction: [direction.x, direction.y], secs_left: DASH_SECS, }); } } pub fn update_dash(&mut self, dt: f32) { self.dash_cooldown_secs -= dt; if self.dash_cooldown_secs < 0.0 { self.dash_cooldown_secs = 0.0; } self.dash_state = self.dash_state.as_ref().and_then(|dash_state| { let secs_left = dash_state.secs_left - dt; if secs_left <= 0.0 { None } else { Some(DashState { secs_left, ..*dash_state }) } }); } } pub fn run_input( world: &mut World, inputs: &[(PlayerId, PlayerInput, Entity)], ) -> Result<(), repl::Error> { // Update hooks for &(_, ref input, entity) in inputs { let player = *repl::try(&world.read::<Player>(), entity)?; let input_state = *repl::try(&world.read::<InputState>(), entity)?; for i in 0..NUM_HOOKS { let hook_entity = repl::try_id_to_entity(world, player.hooks[i])?; let hook_input = hook::CurrentInput { rot_angle: input.rot_angle, shoot: if i == 0 { input.shoot_one } else { input.shoot_two }, previous_shoot: if i == 0 { input_state.previous_shoot_one } else { input_state.previous_shoot_two }, pull: if i == 0 { input.pull_one } else { input.pull_two }, }; world .write::<hook::CurrentInput>() .insert(hook_entity, hook_input); } } hook::run_input(&world)?; // Update player for &(_, ref input, entity) in inputs { world .write::<CurrentInput>() .insert(entity, CurrentInput::new(input.clone())); } InputSys.run_now(&world.res); Ok(()) } pub fn run_input_post_sim( world: &mut World, _inputs: &[(PlayerId, PlayerInput, Entity)], ) -> Result<(), repl::Error> { hook::run_input_post_sim(&world)?; world.write::<hook::CurrentInput>().clear(); world.write::<CurrentInput>().clear(); Ok(()) } pub mod auth { use super::*; pub fn create(world: &mut World, owner: PlayerId, pos: Point2<f32>) -> (EntityId, Entity) { let (id, entity) = repl::entity::auth::create(world, owner, "player", |builder| { builder.with(Position(pos)) }); let mut hooks = [INVALID_ENTITY_ID; NUM_HOOKS]; for (i, hook) in hooks.iter_mut().enumerate() { let (hook_id, _) = hook::auth::create(world, id, i as u32); *hook = hook_id; } // Now that we have created our hooks, attach the player definition world.write::<Player>().insert(entity, Player { hooks }); (id, entity) } } fn build_player(builder: EntityBuilder) -> EntityBuilder { let shape = Cuboid::new(Vector2::new(WIDTH / 2.0, HEIGHT / 2.0)); let mut groups = CollisionGroups::new(); groups.set_membership(&[collision::GROUP_PLAYER]); groups.set_whitelist(&[ collision::GROUP_PLAYER, collision::GROUP_WALL, collision::GROUP_PLAYER_ENTITY, collision::GROUP_NEUTRAL, ]); let query_type = GeometricQueryType::Contacts(0.0, 0.0); // TODO: Velocity (and Dynamic?) component should be added only for owners builder .with(Orientation(0.0)) .with(Velocity(zero())) .with(AngularVelocity(0.0)) .with(InvMass(1.0 / MASS)) .with(InvAngularMass(1.0 / 10.0)) .with(Dynamic) .with(Drag(DRAG)) .with(collision::Shape(ShapeHandle::new(shape))) .with(collision::Object { groups, query_type }) .with(InputState::default()) .with(State::default()) } #[derive(SystemData)] struct InputData<'a> { game_info: Fetch<'a, GameInfo>, input: WriteStorage<'a, CurrentInput>, orientation: WriteStorage<'a, Orientation>, velocity: WriteStorage<'a, Velocity>, angular_velocity: WriteStorage<'a, AngularVelocity>, state: WriteStorage<'a, State>, input_state: WriteStorage<'a, InputState>, } struct InputSys; impl<'a> System<'a> for InputSys { type SystemData = InputData<'a>; fn run(&mut self, mut data: InputData<'a>) { let dt = data.game_info.tick_duration_secs(); // Update tap state for (mut input, input_state) in (&mut data.input, &mut data.input_state).join() { let tap_input = [ input.0.move_forward, input.0.move_backward, input.0.move_left, input.0.move_right, ]; for i in 0..NUM_TAP_KEYS { if tap_input[i] &&!input_state.previous_tap_input[i] { if input_state.tap_state[i].secs_left > 0.0 { input.1[i] = true; input_state.tap_state[i].secs_left = 0.0; } else { input_state.tap_state[i].secs_left = TAP_SECS; } } input_state.tap_state[i].secs_left -= dt; if input_state.tap_state[i].secs_left < 0.0 { input_state.tap_state[i].secs_left = 0.0; } input_state.previous_tap_input[i] = tap_input[i]; } } // Movement for (input, orientation, velocity, angular_velocity, state) in ( &data.input, &mut data.orientation, &mut data.velocity, &mut data.angular_velocity, &mut data.state, ).join() { // Dashing let forward = Rotation2::new(orientation.0).matrix() * Vector2::new(1.0, 0.0); let right = Vector2::new(-forward.y, forward.x); if input.1[MOVE_FORWARD_KEY] { state.dash(forward); } if input.1[MOVE_BACKWARD_KEY] { state.dash(-forward); } if input.1[MOVE_RIGHT_KEY] { state.dash(right); } if input.1[MOVE_LEFT_KEY] { state.dash(-right); } state.update_dash(dt); if let Some(dash_state) = state.dash_state.as_ref() { velocity.0 += Vector2::new(dash_state.direction[0], dash_state.direction[1]) * DASH_ACCEL * dt; continue; } /*if input.0.rot_angle!= orientation.0 { // TODO: Only mutate if changed orientation.0 = input.0.rot_angle; }*/ let diff = (input.0.rot_angle - orientation.0 + f32::consts::PI) % (2.0 * f32::consts::PI) - f32::consts::PI; let smallest_angle = if diff < -f32::consts::PI { diff + 2.0 * f32::consts::PI } else { diff }; if smallest_angle.abs() <= SNAP_ANGLE { orientation.0 = input.0.rot_angle; } else if smallest_angle < 0.0 { angular_velocity.0 -= ROT_ACCEL * dt; } else if smallest_angle > 0.0 { angular_velocity.0 += ROT_ACCEL * dt; } if angular_velocity.0.abs() > MAX_ANGULAR_VEL { angular_velocity.0 = angular_velocity.0.signum() * MAX_ANGULAR_VEL; } let forward = Rotation2::new(orientation.0).matrix() * Vector2::new(1.0, 0.0); let right = Vector2::new(-forward.y, forward.x); let mut direction = Vector2::new(0.0, 0.0); if input.0.move_forward { direction += forward; } if input.0.move_backward { direction -= forward; } if input.0.move_right { direction += right; } if input.0.move_left { direction -= right; } let direction_norm = norm(&direction); if direction_norm > 0.0 { velocity.0 += direction / direction_norm * MOVE_ACCEL * dt; //velocity.0 += direction / direction_norm * 25.0; } } // Remember some input state for (input, input_state) in (&data.input, &mut data.input_state).join() { input_state.previous_shoot_one = input.0.shoot_one; input_state.previous_shoot_two = input.0.shoot_two; } } }
impl Event for DashedEvent {
random_line_split
linktypes.rs
//! LINK-LAYER HEADER TYPE VALUES [https://www.tcpdump.org/linktypes.html](https://www.tcpdump.org/linktypes.html) //! //! LINKTYPE_ name | LINKTYPE_ value | Corresponding DLT_ name | Description /// DLT_NULL BSD loopback encapsulation; the link layer header is a 4-byte field, in host byte order, containing a value of 2 for IPv4 packets, a value of either 24, 28, or 30 for IPv6 packets, a value of 7 for OSI packets, or a value of 23 for IPX packets. All of the IPv6 values correspond to IPv6 packets; code reading files should check for all of them. Note that ``host byte order'' is the byte order of the machine on which the packets are captured; if a live capture is being done, ``host byte order'' is the byte order of the machine capturing the packets, but if a ``savefile'' is being read, the byte order is not necessarily that of the machine reading the capture file. pub const NULL: i32 = 0; /// DLT_EN10MB IEEE 802.3 Ethernet (10Mb, 100Mb, 1000Mb, and up); the 10MB in the DLT_ name is historical. pub const ETHERNET: i32 = 1; /// DLT_AX25 AX.25 packet, with nothing preceding it. pub const AX25: i32 = 3; /// DLT_IEEE802 IEEE 802.5 Token Ring; the IEEE802, without _5, in the DLT_ name is historical. pub const IEEE802_5: i32 = 6; /// DLT_ARCNET ARCNET Data Packets, as described by the ARCNET Trade Association standard ATA 878.1-1999, but without the Starting Delimiter, Information Length, or Frame Check Sequence fields, and with only the first ISU of the Destination Identifier. For most packet types, ARCNET Trade Association draft standard ATA 878.2 is also used. See also RFC 1051 and RFC 1201; for RFC 1051 frames, ATA 878.2 is not used. pub const ARCNET_BSD: i32 = 7; /// DLT_SLIP SLIP, encapsulated with a LINKTYPE_SLIP header. pub const SLIP: i32 = 8; /// DLT_PPP PPP, as per RFC 1661 and RFC 1662; if the first 2 bytes are 0xff and 0x03, it's PPP in HDLC-like framing, with the PPP header following those two bytes, otherwise it's PPP without framing, and the packet begins with the PPP header. The data in the frame is not octet-stuffed or bit-stuffed. pub const PPP: i32 = 9; /// DLT_FDDI FDDI, as specified by ANSI INCITS 239-1994. pub const FDDI: i32 = 10; /// DLT_PPP_SERIAL PPP in HDLC-like framing, as per RFC 1662, or Cisco PPP with HDLC framing, as per section 4.3.1 of RFC 1547; the first byte will be 0xFF for PPP in HDLC-like framing, and will be 0x0F or 0x8F for Cisco PPP with HDLC framing. The data in the frame is not octet-stuffed or bit-stuffed. pub const PPP_HDLC: i32 = 50; /// DLT_PPP_ETHER PPPoE; the packet begins with a PPPoE header, as per RFC 2516. pub const PPP_ETHER: i32 = 51; /// DLT_ATM_RFC1483 RFC 1483 LLC/SNAP-encapsulated ATM; the packet begins with an ISO 8802-2 (formerly known as IEEE 802.2) LLC header. pub const ATM_RFC1483: i32 = 100; /// DLT_RAW Raw IP; the packet begins with an IPv4 or IPv6 header, with the "version" field of the header indicating whether it's an IPv4 or IPv6 header. pub const RAW: i32 = 101; /// DLT_C_HDLC Cisco PPP with HDLC framing, as per section 4.3.1 of RFC 1547. pub const C_HDLC: i32 = 104; /// DLT_IEEE802_11 IEEE 802.11 wireless LAN. pub const IEEE802_11: i32 = 105; /// DLT_FRELAY Frame Relay LAPF frames, beginning with a ITU-T Recommendation Q.922 LAPF header starting with the address field, and without an FCS at the end of the frame. pub const FRELAY: i32 = 107; /// DLT_LOOP OpenBSD loopback encapsulation; the link-layer header is a 4-byte field, in network byte order, containing a value of 2 for IPv4 packets, a value of either 24, 28, or 30 for IPv6 packets, a value of 7 for OSI packets, or a value of 23 for IPX packets. All of the IPv6 values correspond to IPv6 packets; code reading files should check for all of them. pub const LOOP: i32 = 108; /// DLT_LINUX_SLL Linux "cooked" capture encapsulation. pub const LINUX_SLL: i32 = 113; /// DLT_LTALK Apple LocalTalk; the packet begins with an AppleTalk LocalTalk Link Access Protocol header, as described in chapter 1 of Inside AppleTalk, Second Edition. pub const LTALK: i32 = 114; /// DLT_PFLOG OpenBSD pflog; the link-layer header contains a "struct pfloghdr" structure, as defined by the host on which the file was saved. (This differs from operating system to operating system and release to release; there is nothing in the file to indicate what the layout of that structure is.) pub const PFLOG: i32 = 117; /// DLT_PRISM_HEADER Prism monitor mode information followed by an 802.11 header. pub const IEEE802_11_PRISM: i32 = 119; /// DLT_IP_OVER_FC RFC 2625 IP-over-Fibre Channel, with the link-layer header being the Network_Header as described in that RFC. pub const IP_OVER_FC: i32 = 122; /// DLT_SUNATM ATM traffic, encapsulated as per the scheme used by SunATM devices. pub const SUNATM: i32 = 123; /// DLT_IEEE802_11_RADIO Radiotap link-layer information followed by an 802.11 header. pub const IEEE802_11_RADIOTAP: i32 = 127; /// DLT_ARCNET_LINUX ARCNET Data Packets, as described by the ARCNET Trade Association standard ATA 878.1-1999, but without the Starting Delimiter, Information Length, or Frame Check Sequence fields, with only the first ISU of the Destination Identifier, and with an extra two-ISU "offset" field following the Destination Identifier. For most packet types, ARCNET Trade Association draft standard ATA 878.2 is also used; however, no exception frames are supplied, and reassembled frames, rather than fragments, are supplied. See also RFC 1051 and RFC 1201; for RFC 1051 frames, ATA 878.2 is not used. pub const ARCNET_LINUX: i32 = 129; /// DLT_APPLE_IP_OVER_IEEE1394 Apple IP-over-IEEE 1394 cooked header. pub const APPLE_IP_OVER_IEEE1394: i32 = 138; /// DLT_MTP2_WITH_PHDR Signaling System 7 Message Transfer Part Level 2, as specified by ITU-T Recommendation Q.703, preceded by a pseudo-header. pub const MTP2_WITH_PHDR: i32 = 139; /// DLT_MTP2 Signaling System 7 Message Transfer Part Level 2, as specified by ITU-T Recommendation Q.703. pub const MTP2: i32 = 140; /// DLT_MTP3 Signaling System 7 Message Transfer Part Level 3, as specified by ITU-T Recommendation Q.704, with no MTP2 header preceding the MTP3 packet. pub const MTP3: i32 = 141; /// DLT_SCCP Signaling System 7 Signalling Connection Control Part, as specified by ITU-T Recommendation Q.711, ITU-T Recommendation Q.712, ITU-T Recommendation Q.713, and ITU-T Recommendation Q.714, with no MTP3 or MTP2 headers preceding the SCCP packet. pub const SCCP: i32 = 142; /// DLT_DOCSIS DOCSIS MAC frames, as described by the DOCSIS 3.1 MAC and Upper Layer Protocols Interface Specification or earlier specifications for MAC frames. pub const DOCSIS: i32 = 143; /// DLT_LINUX_IRDA Linux-IrDA packets, with a LINKTYPE_LINUX_IRDA header, with the payload for IrDA frames beginning with by the IrLAP header as defined by IrDA Data Specifications, including the IrDA Link Access Protocol specification. pub const LINUX_IRDA: i32 = 144; // LINKTYPE_USER0-LINKTYPE-USER15 147-162 DLT_USER0-DLT_USER15 Reserved for private use; see above. /// DLT_IEEE802_11_RADIO_AVS AVS monitor mode information followed by an 802.11 header. pub const IEEE802_11_AVS: i32 = 163; /// DLT_BACNET_MS_TP BACnet MS/TP frames, as specified by section 9.3 MS/TP Frame Format of ANSI/ASHRAE Standard 135, BACnet® - A Data Communication Protocol for Building Automation and Control Networks, including the preamble and, if present, the Data CRC. pub const BACNET_MS_TP: i32 = 165; /// DLT_PPP_PPPD PPP in HDLC-like encapsulation, like LINKTYPE_PPP_HDLC, but with the 0xff address byte replaced by a direction indication - 0x00 for incoming and 0x01 for outgoing. pub const PPP_PPPD: i32 = 166; /// DLT_GPRS_LLC General Packet Radio Service Logical Link Control, as defined by 3GPP TS 04.64. pub const GPRS_LLC: i32 = 169; /// DLT_GPF_T Transparent-mapped generic framing procedure, as specified by ITU-T Recommendation G.7041/Y.1303. pub const GPF_T: i32 = 170; /// DLT_GPF_F Frame-mapped generic framing procedure, as specified by ITU-T Recommendation G.7041/Y.1303. pub const GPF_F: i32 = 171; /// DLT_LINUX_LAPD Link Access Procedures on the D Channel (LAPD) frames, as specified by ITU-T Recommendation Q.920 and ITU-T Recommendation Q.921, captured via vISDN, with a LINKTYPE_LINUX_LAPD header, followed by the Q.921 frame, starting with the address field. pub const LINUX_LAPD: i32 = 177; /// DLT_MFR FRF.16.1 Multi-Link Frame Relay frames, beginning with an FRF.12 Interface fragmentation format fragmentation header. pub const MFR: i32 = 182; /// DLT_BLUETOOTH_HCI_H4 Bluetooth HCI UART transport layer; the frame contains an HCI packet indicator byte, as specified by the UART Transport Layer portion of the most recent Bluetooth Core specification, followed by an HCI packet of the specified packet type, as specified by the Host Controller Interface Functional Specification portion of the most recent Bluetooth Core Specification. pub const BLUETOOTH_HCI_H4: i32 = 187; /// DLT_USB_LINUX USB packets, beginning with a Linux USB header, as specified by the struct usbmon_packet in the Documentation/usb/usbmon.txt file in the Linux source tree. Only the first 48 bytes of that header are present. All fields in the header are in host byte order. When performing a live capture, the host byte order is the byte order of the machine on which the packets are captured. When reading a pcap file, the byte order is the byte order for the file, as specified by the file's magic number; when reading a pcapng file, the byte order is the byte order for the section of the pcapng file, as specified by the Section Header Block. pub const USB_LINUX: i32 = 189; /// DLT_PPI Per-Packet Information information, as specified by the Per-Packet Information Header Specification, followed by a packet with the LINKTYPE_ value specified by the pph_dlt field of that header. pub const PPI: i32 = 192; /// DLT_IEEE802_15_4_WITHFCS IEEE 802.15.4 Low-Rate Wireless Networks, with each packet having the FCS at the end of the frame. pub const IEEE802_15_4_WITHFCS: i32 = 195; /// DLT_SITA Various link-layer types, with a pseudo-header, for SITA. pub const SITA: i32 = 196; /// DLT_ERF Various link-layer types, with a pseudo-header, for Endace DAG cards; encapsulates Endace ERF records.
/// DLT_AX25_KISS AX.25 packet, with a 1-byte KISS header containing a type indicator. pub const AX25_KISS: i32 = 202; /// DLT_LAPD Link Access Procedures on the D Channel (LAPD) frames, as specified by ITU-T Recommendation Q.920 and ITU-T Recommendation Q.921, starting with the address field, with no pseudo-header. pub const LAPD: i32 = 203; /// DLT_PPP_WITH_DIR PPP, as per RFC 1661 and RFC 1662, preceded with a one-byte pseudo-header with a zero value meaning "received by this host" and a non-zero value meaning "sent by this host"; if the first 2 bytes are 0xff and 0x03, it's PPP in HDLC-like framing, with the PPP header following those two bytes, otherwise it's PPP without framing, and the packet begins with the PPP header. The data in the frame is not octet-stuffed or bit-stuffed. pub const PPP_WITH_DIR: i32 = 204; /// DLT_C_HDLC_WITH_DIR Cisco PPP with HDLC framing, as per section 4.3.1 of RFC 1547, preceded with a one-byte pseudo-header with a zero value meaning "received by this host" and a non-zero value meaning "sent by this host". pub const C_HDLC_WITH_DIR: i32 = 205; /// DLT_FRELAY_WITH_DIR Frame Relay LAPF frames, beginning with a one-byte pseudo-header with a zero value meaning "received by this host" (DCE->DTE) and a non-zero value meaning "sent by this host" (DTE->DCE), followed by an ITU-T Recommendation Q.922 LAPF header starting with the address field, and without an FCS at the end of the frame. pub const FRELAY_WITH_DIR: i32 = 206; /// DLT_LAPB_WITH_DIR Link Access Procedure, Balanced (LAPB), as specified by ITU-T Recommendation X.25, preceded with a one-byte pseudo-header with a zero value meaning "received by this host" (DCE->DTE) and a non-zero value meaning "sent by this host" (DTE->DCE). pub const LAPB_WITH_DIR: i32 = 207; /// DLT_IPMB_LINUX IPMB over an I2C circuit, with a Linux-specific pseudo-header. pub const IPMB_LINUX: i32 = 209; /// DLT_IEEE802_15_4_NONASK_PHY IEEE 802.15.4 Low-Rate Wireless Networks, with each packet having the FCS at the end of the frame, and with the PHY-level data for the O-QPSK, BPSK, GFSK, MSK, and RCC DSS BPSK PHYs (4 octets of 0 as preamble, one octet of SFD, one octet of frame length + reserved bit) preceding the MAC-layer data (starting with the frame control field). pub const IEEE802_15_4_NONASK_PHY: i32 = 215; /// DLT_USB_LINUX_MMAPPED USB packets, beginning with a Linux USB header, as specified by the struct usbmon_packet in the Documentation/usb/usbmon.txt file in the Linux source tree. All 64 bytes of the header are present. All fields in the header are in host byte order. When performing a live capture, the host byte order is the byte order of the machine on which the packets are captured. When reading a pcap file, the byte order is the byte order for the file, as specified by the file's magic number; when reading a pcapng file, the byte order is the byte order for the section of the pcapng file, as specified by the Section Header Block. For isochronous transfers, the ndesc field specifies the number of isochronous descriptors that follow. pub const USB_LINUX_MMAPPED: i32 = 220; /// DLT_FC_2 Fibre Channel FC-2 frames, beginning with a Frame_Header. pub const FC_2: i32 = 224; /// DLT_FC_2_WITH_FRAME_DELIMS Fibre Channel FC-2 frames, beginning an encoding of the SOF, followed by a Frame_Header, and ending with an encoding of the SOF. The encodings represent the frame delimiters as 4-byte sequences representing the corresponding ordered sets, with K28.5 represented as 0xBC, and the D symbols as the corresponding byte values; for example, SOFi2, which is K28.5 - D21.5 - D1.2 - D21.2, is represented as 0xBC 0xB5 0x55 0x55. pub const FC_2_WITH_FRAME_DELIMS: i32 = 225; /// DLT_IPNET Solaris ipnet pseudo-header, followed by an IPv4 or IPv6 datagram. pub const IPNET: i32 = 226; /// DLT_CAN_SOCKETCAN CAN (Controller Area Network) frames, with a pseudo-header followed by the frame payload. pub const CAN_SOCKETCAN: i32 = 227; /// DLT_IPV4 Raw IPv4; the packet begins with an IPv4 header. pub const IPV4: i32 = 228; /// DLT_IPV6 Raw IPv6; the packet begins with an IPv6 header. pub const IPV6: i32 = 229; /// DLT_IEEE802_15_4_NOFCS IEEE 802.15.4 Low-Rate Wireless Network, without the FCS at the end of the frame. pub const IEEE802_15_4_NOFCS: i32 = 230; /// DLT_DBUS Raw D-Bus messages, starting with the endianness flag, followed by the message type, etc., but without the authentication handshake before the message sequence. pub const DBUS: i32 = 231; /// DLT_DVB_CI DVB-CI (DVB Common Interface for communication between a PC Card module and a DVB receiver), with the message format specified by the PCAP format for DVB-CI specification. pub const DVB_CI: i32 = 235; /// DLT_MUX27010 Variant of 3GPP TS 27.010 multiplexing protocol (similar to, but not the same as, 27.010). pub const MUX27010: i32 = 236; /// DLT_STANAG_5066_D_PDU D_PDUs as described by NATO standard STANAG 5066, starting with the synchronization sequence, and including both header and data CRCs. The current version of STANAG 5066 is backwards-compatible with the 1.0.2 version, although newer versions are classified. pub const STANAG_5066_D_PDU: i32 = 237; /// DLT_NFLOG Linux netlink NETLINK NFLOG socket log messages. pub const NFLOG: i32 = 239; /// DLT_NETANALYZER Pseudo-header for Hilscher Gesellschaft für Systemautomation mbH netANALYZER devices, followed by an Ethernet frame, beginning with the MAC header and ending with the FCS. pub const NETANALYZER: i32 = 240; /// DLT_NETANALYZER_TRANSPARENT Pseudo-header for Hilscher Gesellschaft für Systemautomation mbH netANALYZER devices, followed by an Ethernet frame, beginning with the preamble, SFD, and MAC header, and ending with the FCS. pub const NETANALYZER_TRANSPARENT: i32 = 241; /// DLT_IPOIB IP-over-InfiniBand, as specified by RFC 4391 section 6. pub const IPOIB: i32 = 242; /// DLT_MPEG_2_TS MPEG-2 Transport Stream transport packets, as specified by ISO 13818-1/ITU-T Recommendation H.222.0 (see table 2-2 of section 2.4.3.2 "Transport Stream packet layer"). pub const MPEG_2_TS: i32 = 243; /// DLT_NG40 Pseudo-header for ng4T GmbH's UMTS Iub/Iur-over-ATM and Iub/Iur-over-IP format as used by their ng40 protocol tester, followed by frames for the Frame Protocol as specified by 3GPP TS 25.427 for dedicated channels and 3GPP TS 25.435 for common/shared channels in the case of ATM AAL2 or UDP traffic, by SSCOP packets as specified by ITU-T Recommendation Q.2110 for ATM AAL5 traffic, and by NBAP packets for SCTP traffic. pub const NG40: i32 = 244; /// DLT_NFC_LLCP Pseudo-header for NFC LLCP packet captures, followed by frame data for the LLCP Protocol as specified by NFCForum-TS-LLCP_1.1. pub const NFC_LLCP: i32 = 245; /// DLT_INFINIBAND Raw InfiniBand frames, starting with the Local Routing Header, as specified in Chapter 5 "Data packet format" of InfiniBand™ Architectural Specification Release 1.2.1 Volume 1 - General Specifications. pub const INFINIBAND: i32 = 247; /// DLT_SCTP SCTP packets, as defined by RFC 4960, with no lower-level protocols such as IPv4 or IPv6. pub const SCTP: i32 = 248; /// DLT_USBPCAP USB packets, beginning with a USBPcap header. pub const USBPCAP: i32 = 249; /// DLT_RTAC_SERIAL Serial-line packet header for the Schweitzer Engineering Laboratories "RTAC" product, followed by a payload for one of a number of industrial control protocols. pub const RTAC_SERIAL: i32 = 250; /// DLT_BLUETOOTH_LE_LL Bluetooth Low Energy air interface Link Layer packets, in the format described in section 2.1 "PACKET FORMAT" of volume 6 of the Bluetooth Specification Version 4.0 (see PDF page 2200), but without the Preamble. pub const BLUETOOTH_LE_LL: i32 = 251; /// DLT_NETLINK Linux Netlink capture encapsulation. pub const NETLINK: i32 = 253; /// DLT_BLUETOOTH_LINUX_MONITOR Bluetooth Linux Monitor encapsulation of traffic for the BlueZ stack. pub const BLUETOOTH_LINUX_MONITOR: i32 = 254; /// DLT_BLUETOOTH_BREDR_BB Bluetooth Basic Rate and Enhanced Data Rate baseband packets. pub const BLUETOOTH_BREDR_BB: i32 = 255; /// DLT_BLUETOOTH_LE_LL_WITH_PHDR Bluetooth Low Energy link-layer packets. pub const BLUETOOTH_LE_LL_WITH_PHDR: i32 = 256; /// DLT_PROFIBUS_DL PROFIBUS data link layer packets, as specified by IEC standard 61158-4-3, beginning with the start delimiter, ending with the end delimiter, and including all octets between them. pub const PROFIBUS_DL: i32 = 257; /// DLT_PKTAP Apple PKTAP capture encapsulation. pub const PKTAP: i32 = 258; /// DLT_EPON Ethernet-over-passive-optical-network packets, starting with the last 6 octets of the modified preamble as specified by 65.1.3.2 "Transmit" in Clause 65 of Section 5 of IEEE 802.3, followed immediately by an Ethernet frame. pub const EPON: i32 = 259; /// DLT_IPMI_HPM_2 IPMI trace packets, as specified by Table 3-20 "Trace Data Block Format" in the PICMG HPM.2 specification. The time stamps for packets in this format must match the time stamps in the Trace Data Blocks. pub const IPMI_HPM_2: i32 = 260; /// DLT_ZWAVE_R1_R2 Z-Wave RF profile R1 and R2 packets, as specified by ITU-T Recommendation G.9959, with some MAC layer fields moved. pub const ZWAVE_R1_R2: i32 = 261; /// DLT_ZWAVE_R3 Z-Wave RF profile R3 packets, as specified by ITU-T Recommendation G.9959, with some MAC layer fields moved. pub const ZWAVE_R3: i32 = 262; /// DLT_WATTSTOPPER_DLM Formats for WattStopper Digital Lighting Management (DLM) and Legrand Nitoo Open protocol common packet structure captures. pub const WATTSTOPPER_DLM: i32 = 263; /// DLT_ISO_14443 Messages between ISO 14443 contactless smartcards (Proximity Integrated Circuit Card, PICC) and card readers (Proximity Coupling Device, PCD), with the message format specified by the PCAP format for ISO14443 specification. pub const ISO_14443: i32 = 264; /// DLT_RDS Radio data system (RDS) groups, as per IEC 62106, encapsulated in this form. pub const RDS: i32 = 265; /// DLT_USB_DARWIN USB packets, beginning with a Darwin (macOS, etc.) USB header. pub const USB_DARWIN: i32 = 266; /// DLT_SDLC SDLC packets, as specified by Chapter 1, "DLC Links", section "Synchronous Data Link Control (SDLC)" of Systems Network Architecture Formats, GA27-3136-20, without the flag fields, zero-bit insertion, or Frame Check Sequence field, containing SNA path information units (PIUs) as the payload. pub const SDLC: i32 = 268; /// DLT_LORATAP LoRaTap pseudo-header, followed by the payload, which is typically the PHYPayload from the LoRaWan specification. pub const LORATAP: i32 = 270; /// DLT_VSOCK Protocol for communication between host and guest machines in VMware and KVM hypervisors. pub const VSOCK: i32 = 271; /// DLT_NORDIC_BLE Messages to and from a Nordic Semiconductor nRF Sniffer for Bluetooth LE packets, beginning with a pseudo-header. pub const NORDIC_BLE: i32 = 272; /// DLT_DOCSIS31_XRA31 DOCSIS packets and bursts, preceded by a pseudo-header giving metadata about the packet. pub const DOCSIS31_XRA31: i32 = 273; /// DLT_ETHERNET_MPACKET mPackets, as specified by IEEE 802.3br Figure 99-4, starting with the preamble and always ending with a CRC field. pub const ETHERNET_MPACKET: i32 = 274; /// DLT_DISPLAYPORT_AUX DisplayPort AUX channel monitoring data as specified by VESA DisplayPort(DP) Standard preceeded by a pseudo-header. pub const DISPLAYPORT_AUX: i32 = 275; /// DLT_LINUX_SLL2 Linux "cooked" capture encapsulation v2. pub const LINUX_SLL2: i32 = 276; /// DLT_OPENVIZSLA Openvizsla FPGA-based USB sniffer. pub const OPENVIZSLA: i32 = 278; /// DLT_EBHSCR Elektrobit High Speed Capture and Replay (EBHSCR) format. pub const EBHSCR: i32 = 279; /// DLT_VPP_DISPATCH Records in traces from the http://fd.io VPP graph dispatch tracer, in the the graph dispatcher trace format. pub const VPP_DISPATCH: i32 = 280; /// DLT_DSA_TAG_BRCM Ethernet frames, with a switch tag inserted between the source address field and the type/length field in the Ethernet header. pub const DSA_TAG_BRCM: i32 = 281; /// DLT_DSA_TAG_BRCM_PREPEND Ethernet frames, with a switch tag inserted before the destination address in the Ethernet header. pub const DSA_TAG_BRCM_PREPEND: i32 = 282; /// DLT_IEEE802_15_4_TAP IEEE 802.15.4 Low-Rate Wireless Networks, with a pseudo-header containing TLVs with metadata preceding the 802.15.4 header. pub const IEEE802_15_4_TAP: i32 = 283; /// DLT_DSA_TAG_DSA Ethernet frames, with a switch tag inserted between the source address field and the type/length field in the Ethernet header. pub const DSA_TAG_DSA: i32 = 284; /// DLT_DSA_TAG_EDSA Ethernet frames, with a programmable Ethernet type switch tag inserted between the source address field and the type/length field in the Ethernet header. pub const DSA_TAG_EDSA: i32 = 285; /// DLT_ELEE Payload of lawful intercept packets using the ELEE protocol. The packet begins with the ELEE header; it does not include any transport-layer or lower-layer headers for protcols used to transport ELEE packets. pub const ELEE: i32 = 286;
pub const ERF: i32 = 197; /// DLT_BLUETOOTH_HCI_H4_WITH_PHDR Bluetooth HCI UART transport layer; the frame contains a 4-byte direction field, in network byte order (big-endian), the low-order bit of which is set if the frame was sent from the host to the controller and clear if the frame was received by the host from the controller, followed by an HCI packet indicator byte, as specified by the UART Transport Layer portion of the most recent Bluetooth Core specification, followed by an HCI packet of the specified packet type, as specified by the Host Controller Interface Functional Specification portion of the most recent Bluetooth Core Specification. pub const BLUETOOTH_HCI_H4_WITH_PHDR: i32 = 201;
random_line_split
storage.rs
//! A module encapsulating the Raft storage interface. use actix::{ dev::ToEnvelope, prelude::*, }; use futures::sync::mpsc::UnboundedReceiver; use failure::Fail; use crate::{ proto, raft::NodeId, }; /// An error type which wraps a `dyn Fail` type coming from the storage layer. /// /// This does require an allocation; however, the Raft node is currently configured to stop when /// it encounteres an error from the storage layer. The cost of this allocation then is quite low. /// /// In order to avoid potential data corruption or other such issues, when an error is observed /// from the storage layer, the Raft node will stop. The parent application will still be able to /// perform cleanup or any other routines as needed before shutdown. #[derive(Debug, Fail)] #[fail(display="{}", _0)] pub struct StorageError(pub Box<dyn Fail>); /// The result type of all `RaftStorage` interfaces. pub type StorageResult<T> = Result<T, StorageError>; ////////////////////////////////////////////////////////////////////////////// // GetInitialState /////////////////////////////////////////////////////////// /// An actix message type for requesting Raft state information from the storage layer. /// /// When the Raft actor is first started, it will call this interface on the storage system to /// fetch the last known state from stable storage. If no such entry exists due to being the /// first time the node has come online, then the default value for `InitialState` should be used. /// /// ### pro tip
/// state record; and the index of the last log applied to the state machine. pub struct GetInitialState; impl Message for GetInitialState { type Result = StorageResult<InitialState>; } /// A struct used to represent the initial state which a Raft node needs when first starting. pub struct InitialState { /// The index of the last entry. pub last_log_index: u64, /// The term of the last log entry. pub last_log_term: u64, /// The index of the last log applied to the state machine. pub last_applied_log: u64, /// The saved hard state of the node. pub hard_state: HardState, } ////////////////////////////////////////////////////////////////////////////////////////////////// // GetLogEntries ///////////////////////////////////////////////////////////////////////////////// /// An actix message type for requesting a series of log entries from storage. /// /// The start value is inclusive in the search and the stop value is non-inclusive: /// `[start, stop)`. pub struct GetLogEntries { pub start: u64, pub stop: u64, } impl Message for GetLogEntries { type Result = StorageResult<Vec<proto::Entry>>; } ////////////////////////////////////////////////////////////////////////////////////////////////// // AppendLogEntries ////////////////////////////////////////////////////////////////////////////// /// An actix message type for requesting a series of entries to be written to the log. /// /// Though the entries will always be presented in order, each entry's index should be used for /// determining its location to be written in the log, as logs may need to be overwritten under /// some circumstances. /// /// The result of a successful append entries call must contain the details on that last log entry /// appended to the log. pub struct AppendLogEntries(pub Vec<proto::Entry>); /// Details on the last log entry appended to the log as part of an `AppendLogEntries` operation. pub struct AppendLogEntriesData { pub index: u64, pub term: u64, } impl Message for AppendLogEntries { type Result = StorageResult<AppendLogEntriesData>; } ////////////////////////////////////////////////////////////////////////////////////////////////// // CreateSnapshot //////////////////////////////////////////////////////////////////////////////// /// A request from the Raft node to have a new snapshot created which covers the current breadth /// of the log. /// /// The Raft node guarantees that this interface will never be called multiple overlapping times /// from the same Raft node, and it will not be called when an `InstallSnapshot` operation is in /// progress. /// /// **It is critical to note** that the newly created snapshot must be able to be used to /// completely and accurately create a state machine. In addition to saving space on disk (log /// compaction), snapshots are used to bring new Raft nodes and slow Raft nodes up-to-speed with /// the cluster leader. /// /// ### implementation algorithm /// - The generated snapshot should include all log entries starting from entry `0` up through /// the index specified by `through`. This will include any snapshot which may already exist. If /// a snapshot does already exist, the new log compaction process should be able to just load the /// old snapshot first, and resume processing from its last entry. /// - The newly generated snapshot should be written to the directory specified by `snapshot_dir`. /// - All previous entries in the log should be deleted up to the entry specified at index /// `through`. /// - The entry at index `through` should be replaced with a new entry created from calling /// `actix_raft::proto::Entry::new_snapshot_pointer(...)`. /// - Any old snapshot will no longer have representation in the log, and should be deleted. /// - Return a copy of the snapshot pointer entry created earlier. pub struct CreateSnapshot { /// The new snapshot should start from entry `0` and should cover all entries through the /// index specified here, inclusive. pub through: u64, /// The directory where the new snapshot is to be written. pub snapshot_dir: String, } impl Message for CreateSnapshot { type Result = StorageResult<proto::Entry>; } ////////////////////////////////////////////////////////////////////////////////////////////////// // InstallSnapshot /////////////////////////////////////////////////////////////////////////////// /// A request from the Raft node to have a new snapshot written to disk and installed. /// /// This message holds an `UnboundedReceiver` which will stream in new chunks of data as they are /// received from the Raft leader. /// /// ### implementation algorithm /// - Upon receiving the request, a new snapshot file should be created on disk. /// - Every new chunk of data received should be written to the new snapshot file starting at the /// `offset` specified in the chunk. The Raft actor will ensure that redelivered chunks are not /// sent through multiple times. /// - If the receiver is dropped, the snapshot which was being created should be removed from /// disk. /// /// Once a chunk is received which is the final chunk of the snapshot, after writing the data, /// there are a few important steps to take: /// /// - Create a new entry in the log via the `actix_raft::proto::Entry::new_snapshot_pointer(...)` /// constructor. Insert the new entry into the log at the specified `index` of this payload. /// - If there are any logs older than `index`, remove them. /// - If there are any other snapshots in `snapshot_dir`, remove them. /// - If there are any logs newer than `index`, then return. /// - If there are no logs newer than `index`, then the state machine should be reset, and /// recreated from the new snapshot. Return once the state machine has been brought up-to-date. pub struct InstallSnapshot { /// The term which the final entry of this snapshot covers. pub term: u64, /// The index of the final entry which this snapshot covers. pub index: u64, /// The directory where the new snapshot is to be written. pub snapshot_dir: String, /// A stream of data chunks for this snapshot. pub stream: UnboundedReceiver<InstallSnapshotChunk>, } impl Message for InstallSnapshot { type Result = StorageResult<()>; } /// A chunk of snapshot data. pub struct InstallSnapshotChunk { /// The byte offset where chunk is positioned in the snapshot file. pub offset: u64, /// The raw bytes of the snapshot chunk, starting at `offset`. pub data: Vec<u8>, /// Will be `true` if this is the last chunk in the snapshot. pub done: bool, } ////////////////////////////////////////////////////////////////////////////////////////////////// // GetCurrentSnapshot //////////////////////////////////////////////////////////////////////////// /// A request from the Raft node to get the location of the current snapshot on disk. /// /// ### implementation algorithm /// Implementation for this type's handler should be quite simple. Check the directory specified /// by `snapshot_dir` for any snapshot files. A proper implementation will only ever have one /// active snapshot, though another may exist while it is being created. As such, it is /// recommended to use a file naming pattern which will allow for easily distinguishing betweeen /// the current live snapshot, and any new snapshot which is being created. /// /// Once the current snapshot has been located, the absolute path to the file should be returned. /// If there is no active snapshot file, then `None` should be returned. pub struct GetCurrentSnapshot { /// The directory where the system has been configured to store snapshots. pub snapshot_dir: String, } impl Message for GetCurrentSnapshot { type Result = StorageResult<Option<String>>; } ////////////////////////////////////////////////////////////////////////////////////////////////// // ApplyEntriesToStateMachine //////////////////////////////////////////////////////////////////// /// A request from the Raft node to apply the given log entries to the state machine. /// /// The Raft protocol guarantees that only logs which have been _committed_, that is, logs which /// have been replicated to a majority of the cluster, will be applied to the state machine. pub struct ApplyEntriesToStateMachine(pub Vec<proto::Entry>); /// Details on the last log entry applied to the state machine as part of an `ApplyEntriesToStateMachine` operation. pub struct ApplyEntriesToStateMachineData { pub index: u64, pub term: u64, } impl Message for ApplyEntriesToStateMachine { type Result = StorageResult<ApplyEntriesToStateMachineData>; } ////////////////////////////////////////////////////////////////////////////////////////////////// // SaveHardState ///////////////////////////////////////////////////////////////////////////////// /// A request from the Raft node to save its HardState. pub struct SaveHardState(pub HardState); /// A record holding the hard state of a Raft node. pub struct HardState { /// The last recorded term observed by this system. pub current_term: u64, /// The ID of the node voted for in the `current_term`. pub voted_for: Option<NodeId>, /// The IDs of all known members of the cluster. pub members: Vec<u64>, } impl Message for SaveHardState { type Result = StorageResult<()>; } ////////////////////////////////////////////////////////////////////////////////////////////////// // RaftStorage /////////////////////////////////////////////////////////////////////////////////// /// A trait defining the interface of a Raft storage actor. /// /// ### implementation notes /// Appending log entries should not be considered complete until the data has been flushed to /// disk. Some of Raft's safety guarantees are premised upon committed log entries being fully /// flushed to disk. If this invariant is not upheld, the system could incur data loss. /// /// ### snapshot /// See §7. /// /// Each node in the cluster will independently snapshot its data for compaction purposes. The /// conditions for when a new snapshot will be generated is based on the nodes `Config`. In /// addition to periodic snapshots, a leader may need to send an `InstallSnapshot` RPC to /// followers which are far behind or which are new to the cluster. This is based on the same /// `Config` value. The Raft node will send a message to this `RaftStorage` interface when a /// periodic snapshot is to be generated based on its configuration. /// /// Log compaction, which is part of what taking a snapshot is for, is an application specific /// process. The essential idea is that superfluous records in the log will be removed. See §7 for /// more details. There are a few snapshot related messages which the `RaftStorage` actor must /// handle: /// /// - `CreateSnapshot`: a request to create a new snapshot of the current log. /// - `InstallSnapshot`: the Raft leader is streaming over a snapshot, install it. /// - `GetCurrentSnapshot`: the Raft node needs to know the location of the current snapshot. /// /// See each message type for more details on the message and how to properly implement their /// behaviors. pub trait RaftStorage where Self: Actor<Context=Context<Self>>, Self: Handler<GetInitialState> + ToEnvelope<Self, GetInitialState>, Self: Handler<SaveHardState> + ToEnvelope<Self, SaveHardState>, Self: Handler<GetLogEntries> + ToEnvelope<Self, GetLogEntries>, Self: Handler<AppendLogEntries> + ToEnvelope<Self, AppendLogEntries>, Self: Handler<ApplyEntriesToStateMachine> + ToEnvelope<Self, ApplyEntriesToStateMachine>, Self: Handler<CreateSnapshot> + ToEnvelope<Self, CreateSnapshot>, Self: Handler<InstallSnapshot> + ToEnvelope<Self, InstallSnapshot>, Self: Handler<GetCurrentSnapshot> + ToEnvelope<Self, GetCurrentSnapshot>, {}
/// The storage impl may need to look in a few different places to accurately respond to this /// request. That last entry in the log for `last_log_index` & `last_log_term`; the node's hard
random_line_split
storage.rs
//! A module encapsulating the Raft storage interface. use actix::{ dev::ToEnvelope, prelude::*, }; use futures::sync::mpsc::UnboundedReceiver; use failure::Fail; use crate::{ proto, raft::NodeId, }; /// An error type which wraps a `dyn Fail` type coming from the storage layer. /// /// This does require an allocation; however, the Raft node is currently configured to stop when /// it encounteres an error from the storage layer. The cost of this allocation then is quite low. /// /// In order to avoid potential data corruption or other such issues, when an error is observed /// from the storage layer, the Raft node will stop. The parent application will still be able to /// perform cleanup or any other routines as needed before shutdown. #[derive(Debug, Fail)] #[fail(display="{}", _0)] pub struct
(pub Box<dyn Fail>); /// The result type of all `RaftStorage` interfaces. pub type StorageResult<T> = Result<T, StorageError>; ////////////////////////////////////////////////////////////////////////////// // GetInitialState /////////////////////////////////////////////////////////// /// An actix message type for requesting Raft state information from the storage layer. /// /// When the Raft actor is first started, it will call this interface on the storage system to /// fetch the last known state from stable storage. If no such entry exists due to being the /// first time the node has come online, then the default value for `InitialState` should be used. /// /// ### pro tip /// The storage impl may need to look in a few different places to accurately respond to this /// request. That last entry in the log for `last_log_index` & `last_log_term`; the node's hard /// state record; and the index of the last log applied to the state machine. pub struct GetInitialState; impl Message for GetInitialState { type Result = StorageResult<InitialState>; } /// A struct used to represent the initial state which a Raft node needs when first starting. pub struct InitialState { /// The index of the last entry. pub last_log_index: u64, /// The term of the last log entry. pub last_log_term: u64, /// The index of the last log applied to the state machine. pub last_applied_log: u64, /// The saved hard state of the node. pub hard_state: HardState, } ////////////////////////////////////////////////////////////////////////////////////////////////// // GetLogEntries ///////////////////////////////////////////////////////////////////////////////// /// An actix message type for requesting a series of log entries from storage. /// /// The start value is inclusive in the search and the stop value is non-inclusive: /// `[start, stop)`. pub struct GetLogEntries { pub start: u64, pub stop: u64, } impl Message for GetLogEntries { type Result = StorageResult<Vec<proto::Entry>>; } ////////////////////////////////////////////////////////////////////////////////////////////////// // AppendLogEntries ////////////////////////////////////////////////////////////////////////////// /// An actix message type for requesting a series of entries to be written to the log. /// /// Though the entries will always be presented in order, each entry's index should be used for /// determining its location to be written in the log, as logs may need to be overwritten under /// some circumstances. /// /// The result of a successful append entries call must contain the details on that last log entry /// appended to the log. pub struct AppendLogEntries(pub Vec<proto::Entry>); /// Details on the last log entry appended to the log as part of an `AppendLogEntries` operation. pub struct AppendLogEntriesData { pub index: u64, pub term: u64, } impl Message for AppendLogEntries { type Result = StorageResult<AppendLogEntriesData>; } ////////////////////////////////////////////////////////////////////////////////////////////////// // CreateSnapshot //////////////////////////////////////////////////////////////////////////////// /// A request from the Raft node to have a new snapshot created which covers the current breadth /// of the log. /// /// The Raft node guarantees that this interface will never be called multiple overlapping times /// from the same Raft node, and it will not be called when an `InstallSnapshot` operation is in /// progress. /// /// **It is critical to note** that the newly created snapshot must be able to be used to /// completely and accurately create a state machine. In addition to saving space on disk (log /// compaction), snapshots are used to bring new Raft nodes and slow Raft nodes up-to-speed with /// the cluster leader. /// /// ### implementation algorithm /// - The generated snapshot should include all log entries starting from entry `0` up through /// the index specified by `through`. This will include any snapshot which may already exist. If /// a snapshot does already exist, the new log compaction process should be able to just load the /// old snapshot first, and resume processing from its last entry. /// - The newly generated snapshot should be written to the directory specified by `snapshot_dir`. /// - All previous entries in the log should be deleted up to the entry specified at index /// `through`. /// - The entry at index `through` should be replaced with a new entry created from calling /// `actix_raft::proto::Entry::new_snapshot_pointer(...)`. /// - Any old snapshot will no longer have representation in the log, and should be deleted. /// - Return a copy of the snapshot pointer entry created earlier. pub struct CreateSnapshot { /// The new snapshot should start from entry `0` and should cover all entries through the /// index specified here, inclusive. pub through: u64, /// The directory where the new snapshot is to be written. pub snapshot_dir: String, } impl Message for CreateSnapshot { type Result = StorageResult<proto::Entry>; } ////////////////////////////////////////////////////////////////////////////////////////////////// // InstallSnapshot /////////////////////////////////////////////////////////////////////////////// /// A request from the Raft node to have a new snapshot written to disk and installed. /// /// This message holds an `UnboundedReceiver` which will stream in new chunks of data as they are /// received from the Raft leader. /// /// ### implementation algorithm /// - Upon receiving the request, a new snapshot file should be created on disk. /// - Every new chunk of data received should be written to the new snapshot file starting at the /// `offset` specified in the chunk. The Raft actor will ensure that redelivered chunks are not /// sent through multiple times. /// - If the receiver is dropped, the snapshot which was being created should be removed from /// disk. /// /// Once a chunk is received which is the final chunk of the snapshot, after writing the data, /// there are a few important steps to take: /// /// - Create a new entry in the log via the `actix_raft::proto::Entry::new_snapshot_pointer(...)` /// constructor. Insert the new entry into the log at the specified `index` of this payload. /// - If there are any logs older than `index`, remove them. /// - If there are any other snapshots in `snapshot_dir`, remove them. /// - If there are any logs newer than `index`, then return. /// - If there are no logs newer than `index`, then the state machine should be reset, and /// recreated from the new snapshot. Return once the state machine has been brought up-to-date. pub struct InstallSnapshot { /// The term which the final entry of this snapshot covers. pub term: u64, /// The index of the final entry which this snapshot covers. pub index: u64, /// The directory where the new snapshot is to be written. pub snapshot_dir: String, /// A stream of data chunks for this snapshot. pub stream: UnboundedReceiver<InstallSnapshotChunk>, } impl Message for InstallSnapshot { type Result = StorageResult<()>; } /// A chunk of snapshot data. pub struct InstallSnapshotChunk { /// The byte offset where chunk is positioned in the snapshot file. pub offset: u64, /// The raw bytes of the snapshot chunk, starting at `offset`. pub data: Vec<u8>, /// Will be `true` if this is the last chunk in the snapshot. pub done: bool, } ////////////////////////////////////////////////////////////////////////////////////////////////// // GetCurrentSnapshot //////////////////////////////////////////////////////////////////////////// /// A request from the Raft node to get the location of the current snapshot on disk. /// /// ### implementation algorithm /// Implementation for this type's handler should be quite simple. Check the directory specified /// by `snapshot_dir` for any snapshot files. A proper implementation will only ever have one /// active snapshot, though another may exist while it is being created. As such, it is /// recommended to use a file naming pattern which will allow for easily distinguishing betweeen /// the current live snapshot, and any new snapshot which is being created. /// /// Once the current snapshot has been located, the absolute path to the file should be returned. /// If there is no active snapshot file, then `None` should be returned. pub struct GetCurrentSnapshot { /// The directory where the system has been configured to store snapshots. pub snapshot_dir: String, } impl Message for GetCurrentSnapshot { type Result = StorageResult<Option<String>>; } ////////////////////////////////////////////////////////////////////////////////////////////////// // ApplyEntriesToStateMachine //////////////////////////////////////////////////////////////////// /// A request from the Raft node to apply the given log entries to the state machine. /// /// The Raft protocol guarantees that only logs which have been _committed_, that is, logs which /// have been replicated to a majority of the cluster, will be applied to the state machine. pub struct ApplyEntriesToStateMachine(pub Vec<proto::Entry>); /// Details on the last log entry applied to the state machine as part of an `ApplyEntriesToStateMachine` operation. pub struct ApplyEntriesToStateMachineData { pub index: u64, pub term: u64, } impl Message for ApplyEntriesToStateMachine { type Result = StorageResult<ApplyEntriesToStateMachineData>; } ////////////////////////////////////////////////////////////////////////////////////////////////// // SaveHardState ///////////////////////////////////////////////////////////////////////////////// /// A request from the Raft node to save its HardState. pub struct SaveHardState(pub HardState); /// A record holding the hard state of a Raft node. pub struct HardState { /// The last recorded term observed by this system. pub current_term: u64, /// The ID of the node voted for in the `current_term`. pub voted_for: Option<NodeId>, /// The IDs of all known members of the cluster. pub members: Vec<u64>, } impl Message for SaveHardState { type Result = StorageResult<()>; } ////////////////////////////////////////////////////////////////////////////////////////////////// // RaftStorage /////////////////////////////////////////////////////////////////////////////////// /// A trait defining the interface of a Raft storage actor. /// /// ### implementation notes /// Appending log entries should not be considered complete until the data has been flushed to /// disk. Some of Raft's safety guarantees are premised upon committed log entries being fully /// flushed to disk. If this invariant is not upheld, the system could incur data loss. /// /// ### snapshot /// See §7. /// /// Each node in the cluster will independently snapshot its data for compaction purposes. The /// conditions for when a new snapshot will be generated is based on the nodes `Config`. In /// addition to periodic snapshots, a leader may need to send an `InstallSnapshot` RPC to /// followers which are far behind or which are new to the cluster. This is based on the same /// `Config` value. The Raft node will send a message to this `RaftStorage` interface when a /// periodic snapshot is to be generated based on its configuration. /// /// Log compaction, which is part of what taking a snapshot is for, is an application specific /// process. The essential idea is that superfluous records in the log will be removed. See §7 for /// more details. There are a few snapshot related messages which the `RaftStorage` actor must /// handle: /// /// - `CreateSnapshot`: a request to create a new snapshot of the current log. /// - `InstallSnapshot`: the Raft leader is streaming over a snapshot, install it. /// - `GetCurrentSnapshot`: the Raft node needs to know the location of the current snapshot. /// /// See each message type for more details on the message and how to properly implement their /// behaviors. pub trait RaftStorage where Self: Actor<Context=Context<Self>>, Self: Handler<GetInitialState> + ToEnvelope<Self, GetInitialState>, Self: Handler<SaveHardState> + ToEnvelope<Self, SaveHardState>, Self: Handler<GetLogEntries> + ToEnvelope<Self, GetLogEntries>, Self: Handler<AppendLogEntries> + ToEnvelope<Self, AppendLogEntries>, Self: Handler<ApplyEntriesToStateMachine> + ToEnvelope<Self, ApplyEntriesToStateMachine>, Self: Handler<CreateSnapshot> + ToEnvelope<Self, CreateSnapshot>, Self: Handler<InstallSnapshot> + ToEnvelope<Self, InstallSnapshot>, Self: Handler<GetCurrentSnapshot> + ToEnvelope<Self, GetCurrentSnapshot>, {}
StorageError
identifier_name
main.rs
use std::collections::HashMap; // To allow me to use the hashmap I created with the menu items. /* This function will remove any newline characters or returns that are read in. In this the string is passed by reference. I created this as an example and initially used both remove, and removed, but for convienence I eneded up just using one throughout.*/ fn remove(string: &mut String){ if let Some('\n')=string.chars().next_back() { string.pop(); } if let Some('\r')=string.chars().next_back() { string.pop(); } } //Same function as remove but passed by value. fn
(mut string: String) -> String { if let Some('\n')=string.chars().next_back() { string.pop(); } if let Some('\r')=string.chars().next_back() { string.pop(); } string } /*This will set up to take input from the keyboard. It will then remove any newline or return characters, by calling removed, and then return the string splice */ fn rdin() -> String{ let mut reader = String::new(); std::io::stdin().read_line(&mut reader).unwrap(); reader = removed(reader.to_lowercase()); // Changes everything to lowercase so it is not a case sensetive program. println!(); return reader; } /*Rounded takes floating point integers and rounds them to two decimal places. With the way that Rust rounds, it first needs to be rounded to three decimal places, and then two, in order to get an accurate rounding. */ fn rounded(mut rounder: f32) -> f32{ rounder = format!("{:.3}", rounder).parse::<f32>().unwrap(); rounder = format!("{:.2}", rounder).parse::<f32>().unwrap(); return rounder; } /*This function was created for checking for correct input when integers were to be used. It is necessary before trying to convert the string to an integer. This is implimented with the tips. */ fn strchecker(mut temp: String) -> String{ while!temp.contains(&"1") &&!temp.contains(&"2") &&!temp.contains(&"3") &&!temp.contains(&"4"){ println!("It seems you entered an unrecognized value.\nYou entered, {}, please try again with either '1', '2', '3', or '4'.", temp); temp = rdin(); } return temp; } /*intchecker will check the input as the actual expected ints. This is a necessary second layer, since the strchecker will allow say 21, or 34 as inputs. If the value is incorrect it will call for a new input, and the strchecker again.*/ fn intchecker(mut tip: i16) -> i16{ let mut temp = String::new(); while tip!= 1 && tip!=2 && tip!=3 && tip!=4{ println!("It seems you entered an unrecognized value.\nYou entered, {}, please try again with either '1', '2', '3' or '4'.", tip); temp = rdin(); temp = strchecker(temp); tip = temp.parse::<i16>().unwrap(); } return tip; } /*ynchecker will do everything necessary to check the for the correct input of either a y or a n. It calls the rdin function to get input. Then it will check for empyt string so that there is no broken code. Then it checks the chars and if it is not within the range of acceptable values, it will use recursion to do get a new value and run the checks again. This is done by Reference.*/ fn ynchecker(selector: &mut char){ let mut temp = String::new(); temp = rdin(); //Simply error checks for incorrect values. if temp.is_empty(){ // Will check for an empty string. *selector =''; println!("You got an empty sting there"); } else { *selector = temp.chars().nth(0).unwrap(); // Have to convert from a string, to a slice, to a char. } if *selector!= 'y' && *selector!= 'n'{ println!("It seems you entered an unrecognized value.\nYou entered, {}, please try again with either 'y' or 'n'.", selector); ynchecker(selector); } } //main is necessary to run the code. fn main() { //Constants declared for the tax rate, default tip rates, and special tip cases. const TAX: f32 = 0.06; const FIVE: f32 = 0.05; const TEN: f32 = 0.10; const FTN: f32 = 0.15; const TWN: f32 = 0.20; const QRT: f32 = 0.25; const HALF: f32 = 0.50; const DOLLAR: f32 = 1.00; //use mut to say the variable can be changed. let mut i: u8; let mut selector: char = 'y'; let mut cost: f32 = 0.0; let mut taxes: f32; //Created a hashmap, it then is populated with the menu information let mut items = HashMap::new(); items.insert(String::from("soda"), 1.95); items.insert(String::from("water"), 0.00); items.insert(String::from("burger"), 6.95); items.insert(String::from("pizza"), 2.95); items.insert(String::from("fries"), 1.95); items.insert(String::from("stake"), 9.95); //Creates a vector. It is necessary to specify the data type that will be in the vector. let mut itemPrice: Vec<f32> = Vec::new(); let mut total: f32; let mut fivet: f32; let mut tent: f32; let mut ftnt: f32; let mut twnt: f32; //Cannot initialize a string with values already in it. let mut temp = String::new(); let mut tip: i16; println!("Welcome to the restaurant of Rusty Lake!"); //Do you get the reference here? xD //Loops through the entire body of the code to allow multiple iterations of orders. while selector!= 'n'{ //Needs to be cleared from any past iterations. cost = 0.0; i = 0; //Specifically for clearing the vector, instead of wasting memory creating a new one each time. //Will iterate through the length of the vector using.rev, which is basically just a backwards iterator. for i in (0..itemPrice.len()).rev(){ itemPrice.remove(i); } //Will loop through for each item being selected from the menu. while selector!= 'n'{ println!("What item from the menu would you like to order?"); //Prints out the entire HashMap for (key, value) in &items { println!("{}: {:.2}", key, value); } temp = rdin(); //If the input does not match with a key we need to get the correct value. while!items.contains_key(&temp){ println!("It seems what you entered did not quite match one of the items from the menu.\nPlease try again."); for (key, value) in &items { println!("{}: {:.2}", key, value); } temp = rdin(); } //Checks that the input really is a key. if items.contains_key(&temp){ /*A little bit of a different descision structure here. The match will compare the given statement to the pattern of the other types. In a way this reminds me of the when statement from Kotlin.*/ match items.get(&temp){ Some(price) => { itemPrice.push(*price); println!("Item price, ${:.2}", price); } None => { println!("Error! Something went wrong!"); } } } println!("Is there another item from the menu you wish to order? (y/n)"); ynchecker(&mut selector); i += 1; } //Will add each item from the vector to the cost. for order in itemPrice.iter(){ //println!("The current item is priced ${}", order); cost += order; } //Calculate the costs with tax and various tips. taxes = cost * TAX; taxes = rounded(taxes); total = taxes + cost; println!("Your taxes will be: ${0:.2}\nYour total with taxes will be ${1:.2}\n", taxes, total); fivet = cost * FIVE; tent = cost * TEN; ftnt = cost * FTN; twnt = cost * TWN; fivet = rounded(fivet); tent = rounded(tent); ftnt = rounded(ftnt); twnt = rounded(twnt); /*First check for if they ordered water, when it would brake the normal code for calculating the tips. If there is a large group of people, considering someone may order 2 items on average, then raise the default tip rate.*/ if total == 0.0{ println!("Please consider being generous today and leave a tip for your waiter.\nSelect one of the following:\n1) $0.25 2) $0.50\n3) $1.00 4) Other"); } else if i < 10{ println!("What would you like your tip to be?\nSelect one of the following:\n1) 5%: ${0:.2} {3:<10}2) 10%: ${1:.2}\n3) 15%: ${2:.2}{3:<10} 4) Other", fivet, tent, ftnt, ""); } else { println!("What would you like your tip to be?\nSelect one of the following:\n1) 10%: ${0:.2}{3:<10} 2) 15%: ${1:.2}\n3) 20%: ${2:.2}{3:<10}4) Other", tent, ftnt, twnt, ""); } temp = rdin(); temp = strchecker(temp); // Use the string checker first to make sure there aren't actually and letters read in. tip = temp.parse::<i16>().unwrap(); // After we have check that there are only integers, we can convert the data type to an int. tip = intchecker(tip); // Then we have to actually check the values for correct integers. // First check for the special only water condition. Then go along with normal tips. if total == 0.0{ if tip == 1{ total += QRT; } else if tip == 2{ total += HALF; } else if tip == 3{ total += DOLLAR; } else if tip == 4{ println!("Please enter a specific amount, including the change. Ex '10.00':"); total += rdin().parse::<f32>().unwrap(); //Will convert the string to a floating point number. Will break if letters are read in. } else{ println!("It appears you got through all my checks. In other words, you broke the code! Way to go!"); } } else { if tip == 1{ total += fivet; } else if tip == 2{ total += tent; } else if tip == 3{ total += ftnt; } else if tip == 4{ println!("Please enter a specific amount, including the change. Ex '10.00':"); total += rdin().parse::<f32>().unwrap(); } else{ // Just a random extra else. I found no situations that would enact this code, but fun for just in case. println!("It appears you got through all my checks. In other words, you broke the code! Way to go!"); } } println!("Your total will be: ${:.2}", total); // The :.2 that I used in a lot of these print statements is to enforce the formatting with two decimal places. println!("Is there another order you wish to enter? (y/n)"); ynchecker(&mut selector); // One final error check. } }
removed
identifier_name
main.rs
use std::collections::HashMap; // To allow me to use the hashmap I created with the menu items. /* This function will remove any newline characters or returns that are read in. In this the string is passed by reference. I created this as an example and initially used both remove, and removed, but for convienence I eneded up just using one throughout.*/ fn remove(string: &mut String){ if let Some('\n')=string.chars().next_back() { string.pop(); } if let Some('\r')=string.chars().next_back() { string.pop(); } } //Same function as remove but passed by value. fn removed(mut string: String) -> String { if let Some('\n')=string.chars().next_back() { string.pop(); } if let Some('\r')=string.chars().next_back() { string.pop(); } string } /*This will set up to take input from the keyboard. It will then remove any newline or return characters, by calling removed, and then return the string splice */ fn rdin() -> String{ let mut reader = String::new(); std::io::stdin().read_line(&mut reader).unwrap(); reader = removed(reader.to_lowercase()); // Changes everything to lowercase so it is not a case sensetive program. println!(); return reader; } /*Rounded takes floating point integers and rounds them to two decimal places. With the way that Rust rounds, it first needs to be rounded to three decimal places, and then two, in order to get an accurate rounding. */ fn rounded(mut rounder: f32) -> f32{ rounder = format!("{:.3}", rounder).parse::<f32>().unwrap(); rounder = format!("{:.2}", rounder).parse::<f32>().unwrap(); return rounder; } /*This function was created for checking for correct input when integers were to be used. It is necessary before trying to convert the string to an integer. This is implimented with the tips. */ fn strchecker(mut temp: String) -> String{ while!temp.contains(&"1") &&!temp.contains(&"2") &&!temp.contains(&"3") &&!temp.contains(&"4"){ println!("It seems you entered an unrecognized value.\nYou entered, {}, please try again with either '1', '2', '3', or '4'.", temp); temp = rdin(); } return temp; } /*intchecker will check the input as the actual expected ints. This is a necessary second layer, since the strchecker will allow say 21, or 34 as inputs. If the value is incorrect it will call for a new input, and the strchecker again.*/ fn intchecker(mut tip: i16) -> i16{ let mut temp = String::new(); while tip!= 1 && tip!=2 && tip!=3 && tip!=4{ println!("It seems you entered an unrecognized value.\nYou entered, {}, please try again with either '1', '2', '3' or '4'.", tip); temp = rdin(); temp = strchecker(temp); tip = temp.parse::<i16>().unwrap(); } return tip; } /*ynchecker will do everything necessary to check the for the correct input of either a y or a n. It calls the rdin function to get input. Then it will check for empyt string so that there is no broken code. Then it checks the chars and if it is not within the range of acceptable values, it will use recursion to do get a new value and run the checks again. This is done by Reference.*/ fn ynchecker(selector: &mut char){ let mut temp = String::new(); temp = rdin(); //Simply error checks for incorrect values. if temp.is_empty(){ // Will check for an empty string. *selector =''; println!("You got an empty sting there"); } else { *selector = temp.chars().nth(0).unwrap(); // Have to convert from a string, to a slice, to a char. } if *selector!= 'y' && *selector!= 'n'{ println!("It seems you entered an unrecognized value.\nYou entered, {}, please try again with either 'y' or 'n'.", selector); ynchecker(selector); } } //main is necessary to run the code. fn main() { //Constants declared for the tax rate, default tip rates, and special tip cases. const TAX: f32 = 0.06; const FIVE: f32 = 0.05; const TEN: f32 = 0.10; const FTN: f32 = 0.15; const TWN: f32 = 0.20; const QRT: f32 = 0.25; const HALF: f32 = 0.50; const DOLLAR: f32 = 1.00; //use mut to say the variable can be changed. let mut i: u8; let mut selector: char = 'y'; let mut cost: f32 = 0.0; let mut taxes: f32; //Created a hashmap, it then is populated with the menu information let mut items = HashMap::new(); items.insert(String::from("soda"), 1.95); items.insert(String::from("water"), 0.00); items.insert(String::from("burger"), 6.95); items.insert(String::from("pizza"), 2.95); items.insert(String::from("fries"), 1.95); items.insert(String::from("stake"), 9.95); //Creates a vector. It is necessary to specify the data type that will be in the vector. let mut itemPrice: Vec<f32> = Vec::new(); let mut total: f32; let mut fivet: f32; let mut tent: f32; let mut ftnt: f32; let mut twnt: f32; //Cannot initialize a string with values already in it. let mut temp = String::new(); let mut tip: i16; println!("Welcome to the restaurant of Rusty Lake!"); //Do you get the reference here? xD //Loops through the entire body of the code to allow multiple iterations of orders. while selector!= 'n'{ //Needs to be cleared from any past iterations. cost = 0.0; i = 0; //Specifically for clearing the vector, instead of wasting memory creating a new one each time. //Will iterate through the length of the vector using.rev, which is basically just a backwards iterator. for i in (0..itemPrice.len()).rev(){ itemPrice.remove(i); } //Will loop through for each item being selected from the menu. while selector!= 'n'{ println!("What item from the menu would you like to order?"); //Prints out the entire HashMap for (key, value) in &items { println!("{}: {:.2}", key, value); } temp = rdin(); //If the input does not match with a key we need to get the correct value. while!items.contains_key(&temp){ println!("It seems what you entered did not quite match one of the items from the menu.\nPlease try again."); for (key, value) in &items { println!("{}: {:.2}", key, value); } temp = rdin(); } //Checks that the input really is a key. if items.contains_key(&temp){ /*A little bit of a different descision structure here. The match will compare the given statement to the pattern of the other types. In a way this reminds me of the when statement from Kotlin.*/ match items.get(&temp){ Some(price) => { itemPrice.push(*price); println!("Item price, ${:.2}", price); } None => { println!("Error! Something went wrong!"); } } } println!("Is there another item from the menu you wish to order? (y/n)"); ynchecker(&mut selector); i += 1; } //Will add each item from the vector to the cost. for order in itemPrice.iter(){ //println!("The current item is priced ${}", order); cost += order; } //Calculate the costs with tax and various tips. taxes = cost * TAX; taxes = rounded(taxes); total = taxes + cost; println!("Your taxes will be: ${0:.2}\nYour total with taxes will be ${1:.2}\n", taxes, total); fivet = cost * FIVE; tent = cost * TEN; ftnt = cost * FTN; twnt = cost * TWN; fivet = rounded(fivet); tent = rounded(tent); ftnt = rounded(ftnt); twnt = rounded(twnt); /*First check for if they ordered water, when it would brake the normal code for calculating the tips. If there is a large group of people, considering someone may order 2 items on average, then raise the default tip rate.*/ if total == 0.0{ println!("Please consider being generous today and leave a tip for your waiter.\nSelect one of the following:\n1) $0.25 2) $0.50\n3) $1.00 4) Other"); } else if i < 10{ println!("What would you like your tip to be?\nSelect one of the following:\n1) 5%: ${0:.2} {3:<10}2) 10%: ${1:.2}\n3) 15%: ${2:.2}{3:<10} 4) Other", fivet, tent, ftnt, ""); } else { println!("What would you like your tip to be?\nSelect one of the following:\n1) 10%: ${0:.2}{3:<10} 2) 15%: ${1:.2}\n3) 20%: ${2:.2}{3:<10}4) Other", tent, ftnt, twnt, ""); } temp = rdin(); temp = strchecker(temp); // Use the string checker first to make sure there aren't actually and letters read in. tip = temp.parse::<i16>().unwrap(); // After we have check that there are only integers, we can convert the data type to an int. tip = intchecker(tip); // Then we have to actually check the values for correct integers. // First check for the special only water condition. Then go along with normal tips. if total == 0.0{ if tip == 1{ total += QRT; } else if tip == 2{ total += HALF;
} else if tip == 4{ println!("Please enter a specific amount, including the change. Ex '10.00':"); total += rdin().parse::<f32>().unwrap(); //Will convert the string to a floating point number. Will break if letters are read in. } else{ println!("It appears you got through all my checks. In other words, you broke the code! Way to go!"); } } else { if tip == 1{ total += fivet; } else if tip == 2{ total += tent; } else if tip == 3{ total += ftnt; } else if tip == 4{ println!("Please enter a specific amount, including the change. Ex '10.00':"); total += rdin().parse::<f32>().unwrap(); } else{ // Just a random extra else. I found no situations that would enact this code, but fun for just in case. println!("It appears you got through all my checks. In other words, you broke the code! Way to go!"); } } println!("Your total will be: ${:.2}", total); // The :.2 that I used in a lot of these print statements is to enforce the formatting with two decimal places. println!("Is there another order you wish to enter? (y/n)"); ynchecker(&mut selector); // One final error check. } }
} else if tip == 3{ total += DOLLAR;
random_line_split
main.rs
use std::collections::HashMap; // To allow me to use the hashmap I created with the menu items. /* This function will remove any newline characters or returns that are read in. In this the string is passed by reference. I created this as an example and initially used both remove, and removed, but for convienence I eneded up just using one throughout.*/ fn remove(string: &mut String){ if let Some('\n')=string.chars().next_back() { string.pop(); } if let Some('\r')=string.chars().next_back() { string.pop(); } } //Same function as remove but passed by value. fn removed(mut string: String) -> String { if let Some('\n')=string.chars().next_back() { string.pop(); } if let Some('\r')=string.chars().next_back() { string.pop(); } string } /*This will set up to take input from the keyboard. It will then remove any newline or return characters, by calling removed, and then return the string splice */ fn rdin() -> String{ let mut reader = String::new(); std::io::stdin().read_line(&mut reader).unwrap(); reader = removed(reader.to_lowercase()); // Changes everything to lowercase so it is not a case sensetive program. println!(); return reader; } /*Rounded takes floating point integers and rounds them to two decimal places. With the way that Rust rounds, it first needs to be rounded to three decimal places, and then two, in order to get an accurate rounding. */ fn rounded(mut rounder: f32) -> f32{ rounder = format!("{:.3}", rounder).parse::<f32>().unwrap(); rounder = format!("{:.2}", rounder).parse::<f32>().unwrap(); return rounder; } /*This function was created for checking for correct input when integers were to be used. It is necessary before trying to convert the string to an integer. This is implimented with the tips. */ fn strchecker(mut temp: String) -> String{ while!temp.contains(&"1") &&!temp.contains(&"2") &&!temp.contains(&"3") &&!temp.contains(&"4"){ println!("It seems you entered an unrecognized value.\nYou entered, {}, please try again with either '1', '2', '3', or '4'.", temp); temp = rdin(); } return temp; } /*intchecker will check the input as the actual expected ints. This is a necessary second layer, since the strchecker will allow say 21, or 34 as inputs. If the value is incorrect it will call for a new input, and the strchecker again.*/ fn intchecker(mut tip: i16) -> i16{ let mut temp = String::new(); while tip!= 1 && tip!=2 && tip!=3 && tip!=4{ println!("It seems you entered an unrecognized value.\nYou entered, {}, please try again with either '1', '2', '3' or '4'.", tip); temp = rdin(); temp = strchecker(temp); tip = temp.parse::<i16>().unwrap(); } return tip; } /*ynchecker will do everything necessary to check the for the correct input of either a y or a n. It calls the rdin function to get input. Then it will check for empyt string so that there is no broken code. Then it checks the chars and if it is not within the range of acceptable values, it will use recursion to do get a new value and run the checks again. This is done by Reference.*/ fn ynchecker(selector: &mut char){ let mut temp = String::new(); temp = rdin(); //Simply error checks for incorrect values. if temp.is_empty(){ // Will check for an empty string. *selector =''; println!("You got an empty sting there"); } else { *selector = temp.chars().nth(0).unwrap(); // Have to convert from a string, to a slice, to a char. } if *selector!= 'y' && *selector!= 'n'{ println!("It seems you entered an unrecognized value.\nYou entered, {}, please try again with either 'y' or 'n'.", selector); ynchecker(selector); } } //main is necessary to run the code. fn main()
items.insert(String::from("pizza"), 2.95); items.insert(String::from("fries"), 1.95); items.insert(String::from("stake"), 9.95); //Creates a vector. It is necessary to specify the data type that will be in the vector. let mut itemPrice: Vec<f32> = Vec::new(); let mut total: f32; let mut fivet: f32; let mut tent: f32; let mut ftnt: f32; let mut twnt: f32; //Cannot initialize a string with values already in it. let mut temp = String::new(); let mut tip: i16; println!("Welcome to the restaurant of Rusty Lake!"); //Do you get the reference here? xD //Loops through the entire body of the code to allow multiple iterations of orders. while selector!= 'n'{ //Needs to be cleared from any past iterations. cost = 0.0; i = 0; //Specifically for clearing the vector, instead of wasting memory creating a new one each time. //Will iterate through the length of the vector using.rev, which is basically just a backwards iterator. for i in (0..itemPrice.len()).rev(){ itemPrice.remove(i); } //Will loop through for each item being selected from the menu. while selector!= 'n'{ println!("What item from the menu would you like to order?"); //Prints out the entire HashMap for (key, value) in &items { println!("{}: {:.2}", key, value); } temp = rdin(); //If the input does not match with a key we need to get the correct value. while!items.contains_key(&temp){ println!("It seems what you entered did not quite match one of the items from the menu.\nPlease try again."); for (key, value) in &items { println!("{}: {:.2}", key, value); } temp = rdin(); } //Checks that the input really is a key. if items.contains_key(&temp){ /*A little bit of a different descision structure here. The match will compare the given statement to the pattern of the other types. In a way this reminds me of the when statement from Kotlin.*/ match items.get(&temp){ Some(price) => { itemPrice.push(*price); println!("Item price, ${:.2}", price); } None => { println!("Error! Something went wrong!"); } } } println!("Is there another item from the menu you wish to order? (y/n)"); ynchecker(&mut selector); i += 1; } //Will add each item from the vector to the cost. for order in itemPrice.iter(){ //println!("The current item is priced ${}", order); cost += order; } //Calculate the costs with tax and various tips. taxes = cost * TAX; taxes = rounded(taxes); total = taxes + cost; println!("Your taxes will be: ${0:.2}\nYour total with taxes will be ${1:.2}\n", taxes, total); fivet = cost * FIVE; tent = cost * TEN; ftnt = cost * FTN; twnt = cost * TWN; fivet = rounded(fivet); tent = rounded(tent); ftnt = rounded(ftnt); twnt = rounded(twnt); /*First check for if they ordered water, when it would brake the normal code for calculating the tips. If there is a large group of people, considering someone may order 2 items on average, then raise the default tip rate.*/ if total == 0.0{ println!("Please consider being generous today and leave a tip for your waiter.\nSelect one of the following:\n1) $0.25 2) $0.50\n3) $1.00 4) Other"); } else if i < 10{ println!("What would you like your tip to be?\nSelect one of the following:\n1) 5%: ${0:.2} {3:<10}2) 10%: ${1:.2}\n3) 15%: ${2:.2}{3:<10} 4) Other", fivet, tent, ftnt, ""); } else { println!("What would you like your tip to be?\nSelect one of the following:\n1) 10%: ${0:.2}{3:<10} 2) 15%: ${1:.2}\n3) 20%: ${2:.2}{3:<10}4) Other", tent, ftnt, twnt, ""); } temp = rdin(); temp = strchecker(temp); // Use the string checker first to make sure there aren't actually and letters read in. tip = temp.parse::<i16>().unwrap(); // After we have check that there are only integers, we can convert the data type to an int. tip = intchecker(tip); // Then we have to actually check the values for correct integers. // First check for the special only water condition. Then go along with normal tips. if total == 0.0{ if tip == 1{ total += QRT; } else if tip == 2{ total += HALF; } else if tip == 3{ total += DOLLAR; } else if tip == 4{ println!("Please enter a specific amount, including the change. Ex '10.00':"); total += rdin().parse::<f32>().unwrap(); //Will convert the string to a floating point number. Will break if letters are read in. } else{ println!("It appears you got through all my checks. In other words, you broke the code! Way to go!"); } } else { if tip == 1{ total += fivet; } else if tip == 2{ total += tent; } else if tip == 3{ total += ftnt; } else if tip == 4{ println!("Please enter a specific amount, including the change. Ex '10.00':"); total += rdin().parse::<f32>().unwrap(); } else{ // Just a random extra else. I found no situations that would enact this code, but fun for just in case. println!("It appears you got through all my checks. In other words, you broke the code! Way to go!"); } } println!("Your total will be: ${:.2}", total); // The :.2 that I used in a lot of these print statements is to enforce the formatting with two decimal places. println!("Is there another order you wish to enter? (y/n)"); ynchecker(&mut selector); // One final error check. } }
{ //Constants declared for the tax rate, default tip rates, and special tip cases. const TAX: f32 = 0.06; const FIVE: f32 = 0.05; const TEN: f32 = 0.10; const FTN: f32 = 0.15; const TWN: f32 = 0.20; const QRT: f32 = 0.25; const HALF: f32 = 0.50; const DOLLAR: f32 = 1.00; //use mut to say the variable can be changed. let mut i: u8; let mut selector: char = 'y'; let mut cost: f32 = 0.0; let mut taxes: f32; //Created a hashmap, it then is populated with the menu information let mut items = HashMap::new(); items.insert(String::from("soda"), 1.95); items.insert(String::from("water"), 0.00); items.insert(String::from("burger"), 6.95);
identifier_body
lib.rs
*death_watch += 1; } } // prune all filters with an expired death ticker let level = &self.desperation_level; self.trackers .retain(|(_id, death_count, _tracker)| death_count < level); return predictions; } pub fn dump_filter_reals(&self) -> Vec<GrayImage> { return self.trackers.iter().map(|t| t.2.dump_filter().0).collect(); } pub fn size(&self) -> usize { self.trackers.len() } } pub struct Prediction { pub location: (u32, u32), pub psr: f32, } pub struct MosseTracker { filter: Vec<Complex<f32>>, // constants frame height frame_width: u32, frame_height: u32, // stores dimensions of tracking window and its center // window is square for now, this variable contains the size of the square edge window_size: u32, current_target_center: (u32, u32), // represents center in frame // the 'target' (G). A single Gaussian peak centered at the tracking window. target: Vec<Complex<f32>>, // constants: learning rate and PSR threshold eta: f32, regularization: f32, // not super important for MOSSE: see paper fig 4. // the previous Ai and Bi last_top: Vec<Complex<f32>>, last_bottom: Vec<Complex<f32>>, // the previous psr pub last_psr: f32, // thread-safe FFT objects containing precomputed parameters for this input data size. fft: Arc<dyn Fft<f32>>, inv_fft: Arc<dyn Fft<f32>>, } impl Debug for MosseTracker { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("MosseTracker") .field("filter", &self.filter) .field("frame_width", &self.frame_width) .field("frame_height", &self.frame_height) .field("window_size", &self.window_size) .field("current_target_center", &self.current_target_center) .field("target", &self.target) .field("eta", &self.eta) .field("regularization", &self.regularization) .field("last_top", &self.last_top) .field("last_bottom", &self.last_bottom) .field("last_psr", &self.last_psr) // These fields don't implement Debug, so I can't use the #[derive(Debug)] impl. //.field("fft", &self.fft) //.field("inv_fft", &self.inv_fft) .finish() } } #[derive(Debug)] pub struct MosseTrackerSettings { pub width: u32, pub height: u32, pub window_size: u32, pub learning_rate: f32, pub psr_threshold: f32, pub regularization: f32, } #[allow(non_snake_case)] impl MosseTracker { pub fn new(settings: &MosseTrackerSettings) -> MosseTracker { // parameterize the FFT objects let mut planner = FftPlanner::new(); let mut inv_planner = FftPlanner::new(); // NOTE: we initialize the FFTs based on the size of the window let length = (settings.window_size * settings.window_size) as usize; let fft = planner.plan_fft_forward(length); let inv_fft = inv_planner.plan_fft_inverse(length); // initialize the filter and its top and bottom parts with zeroes. let filter = vec![Complex::zero(); length]; let top = vec![Complex::zero(); length]; let bottom = vec![Complex::zero(); length]; // initialize the target output map (G), with a compact Gaussian peak centered on the target object. // In the Bolme paper, this map is called gi. let mut target: Vec<Complex<f32>> = build_target(settings.window_size, settings.window_size) .into_iter() .map(|p| Complex::new(p as f32, 0.0)) .collect(); fft.process(&mut target); return MosseTracker { filter, last_top: top, last_bottom: bottom, last_psr: 0.0, eta: settings.learning_rate, regularization: settings.regularization, target, fft, inv_fft, frame_width: settings.width, frame_height: settings.height, window_size: settings.window_size, current_target_center: (0, 0), }; } fn compute_2dfft(&self, imagedata: Vec<f32>) -> Vec<Complex<f32>> { let mut buffer: Vec<Complex<f32>> = imagedata .into_iter() .map(|p| Complex::new(p as f32, 0.0)) .collect(); // fft.process() CONSUMES the input buffer as scratch space, make sure it is not reused self.fft.process(&mut buffer); return buffer; } // Train a new filter on the first frame in which the object occurs pub fn train(&mut self, input_frame: &GrayImage, target_center: (u32, u32)) { // store the target center as the current self.current_target_center = target_center; // cut out the training template by cropping let window = &window_crop( input_frame, self.window_size, self.window_size, target_center, ); #[cfg(debug_assertions)] { window.save("WINDOW.png").unwrap(); } // build an iterator that produces training frames that have been slightly rotated according to a theta value. let rotated_frames = [ 0.02, -0.02, 0.05, -0.05, 0.07, -0.07, 0.09, -0.09, 1.1, -1.1, 1.3, -1.3, 1.5, -1.5, 2.0, -2.0, ] .iter() .map(|rad| { // Rotate an image clockwise about its center by theta radians. let training_frame = rotate_about_center(window, *rad, Interpolation::Nearest, Luma([0])); #[cfg(debug_assertions)] { training_frame .save(format!("training_frame_rotated_theta_{}.png", rad)) .unwrap(); } return training_frame; }); // build an iterator that produces training frames that have been slightly scaled to various degrees ('zoomed') let scaled_frames = [0.8, 0.9, 1.1, 1.2].into_iter().map(|scalefactor| { let scale = Projection::scale(scalefactor, scalefactor); let scaled_training_frame = warp(&window, &scale, Interpolation::Nearest, Luma([0])); #[cfg(debug_assertions)] { scaled_training_frame .save(format!("training_frame_scaled_{}.png", scalefactor)) .unwrap(); } return scaled_training_frame; }); // Chain these iterators together. // Note that we add the initial, unperturbed training frame as first in line. let training_frames = std::iter::once(window) .cloned() .chain(rotated_frames) .chain(scaled_frames); // TODO: scaling is not ready yet //.chain(scaled_frames); let mut training_frame_count = 0; for training_frame in training_frames { // preprocess the training frame using preprocess() let vectorized = preprocess(&training_frame); // calculate the 2D FFT of the preprocessed frame: FFT(fi) = Fi let Fi = self.compute_2dfft(vectorized); // compute the complex conjugate of Fi, Fi*. let Fi_star: Vec<Complex<f32>> = Fi.iter().map(|e| e.conj()).collect(); // compute the initial filter let top = self.target.iter().zip(Fi_star.iter()).map(|(g, f)| g * f); let bottom = Fi.iter().zip(Fi_star.iter()).map(|(f, f_star)| f * f_star); // // add the values to the running sum self.last_top .iter_mut() .zip(top) .for_each(|(running, new)| *running += new); self.last_bottom .iter_mut() .zip(bottom) .for_each(|(running, new)| *running += new); training_frame_count += 1 } // divide the values of the top and bottom filters by the number of training perturbations used self.last_top .iter_mut() .for_each(|e| *e /= training_frame_count as f32); self.last_bottom .iter_mut() .for_each(|e| *e /= training_frame_count as f32); // compute the filter by dividing Ai and Bi elementwise // note that we add a small quantity to avoid dividing by zero, which would yield NaN's. self.filter = self .last_top .iter() .zip(&self.last_bottom) .map(|(a, b)| a / b + self.regularization) .collect(); #[cfg(debug_assertions)] { println!( "current center of target in frame: x={}, y={}", self.current_target_center.0, self.current_target_center.1 ); } } pub fn track_new_frame(&mut self, frame: &GrayImage) -> Prediction { // cut out the training template by cropping let window = window_crop( frame, self.window_size, self.window_size, self.current_target_center, ); // preprocess the image using preprocess() let vectorized = preprocess(&window); // calculate the 2D FFT of the preprocessed image: FFT(fi) = Fi let Fi = self.compute_2dfft(vectorized); // elementwise multiplication of F with filter H gives Gi let mut corr_map_gi: Vec<Complex<f32>> = Fi.iter().zip(&self.filter).map(|(a, b)| a * b).collect(); // NOTE: Gi is garbage after this call self.inv_fft.process(&mut corr_map_gi); // find the max value of the filtered image 'gi', along with the position of the maximum let (maxind, max_complex) = corr_map_gi .iter() .enumerate() .max_by(|a, b| { // filtered (gi) is still complex at this point, we only care about the real part a.1.re.partial_cmp(&b.1.re).unwrap_or(Ordering::Equal) }) .unwrap(); // we can unwrap the result of max_by(), as we are sure filtered.len() > 0 // convert the array index of the max to the coordinates in the window let max_coord_in_window = index_to_coords(self.window_size, maxind as u32); let window_half = (self.window_size / 2) as i32; let x_delta = max_coord_in_window.0 as i32 - window_half; let y_delta = max_coord_in_window.1 as i32 - window_half; let x_max = self.frame_width as i32 - window_half; let y_max = self.frame_height as i32 - window_half; #[cfg(debug_assertions)] { println!( "distance of new in-window max from window center: x = {}, y = {}", x_delta, y_delta, ); } // compute the max coord in the frame by looking at the shift of the window center let new_x = (self.current_target_center.0 as i32 + x_delta) .min(x_max) .max(window_half); let new_y = (self.current_target_center.1 as i32 + y_delta) .min(y_max) .max(window_half); self.current_target_center = (new_x as u32, new_y as u32); // compute PSR // Note that we re-use the computed max and its coordinate for downstream simplicity self.last_psr = compute_psr( &corr_map_gi, self.window_size, self.window_size, max_complex.re, max_coord_in_window, ); return Prediction { location: self.current_target_center, psr: self.last_psr, }; } // update the filter fn update(&mut self, frame: &GrayImage) { // cut out the training template by cropping let window = window_crop( frame, self.window_size, self.window_size, self.current_target_center, ); // preprocess the image using preprocess() let vectorized = preprocess(&window); // calculate the 2D FFT of the preprocessed image: FFT(fi) = Fi let new_Fi = self.compute_2dfft(vectorized); //// Update the filter using the prediction // compute the complex conjugate of Fi, Fi*. let Fi_star: Vec<Complex<f32>> = new_Fi.iter().map(|e| e.conj()).collect(); // compute Ai (top) and Bi (bottom) using F*, G, and the learning rate (see paper) let one_minus_eta = 1.0 - self.eta; // update the 'top' of the filter update equation self.last_top = self .target .iter() .zip(&Fi_star) .zip(&self.last_top) .map(|((g, f), prev)| self.eta * (g * f) + (one_minus_eta * prev)) .collect(); // update the 'bottom' of the filter update equation self.last_bottom = new_Fi .iter() .zip(&Fi_star) .zip(&self.last_bottom) .map(|((f, f_star), prev)| self.eta * (f * f_star) + (one_minus_eta * prev)) .collect(); // compute the new filter H* by dividing Ai and Bi elementwise self.filter = self .last_top .iter() .zip(&self.last_bottom) .map(|(a, b)| a / b) .collect(); } // debug method to dump the latest filter to an inspectable image pub fn dump_filter( &self, ) -> ( ImageBuffer<Luma<u8>, Vec<u8>>, ImageBuffer<Luma<u8>, Vec<u8>>, ) { // get the filter out of fourier space // NOTE: input is garbage after this call to inv_fft.process(), so we clone the filter first. let mut h = self.filter.clone(); self.inv_fft.process(&mut h); // turn the real and imaginary values of the filter into separate grayscale images let realfilter = h.iter().map(|c| c.re).collect(); let imfilter = h.iter().map(|c| c.im).collect(); return ( to_imgbuf(&realfilter, self.window_size, self.window_size), to_imgbuf(&imfilter, self.window_size, self.window_size), ); } } fn window_crop( input_frame: &GrayImage, window_width: u32, window_height: u32, center: (u32, u32), ) -> GrayImage { let window = imageops::crop( &mut input_frame.clone(), center .0 .saturating_sub(window_width / 2) .min(input_frame.width() - window_width), center .1 .saturating_sub(window_height / 2) .min(input_frame.height() - window_height), window_width, window_height, ) .to_image(); return window; } fn build_target(window_width: u32, window_height: u32) -> Vec<f32> { let mut target_gi = vec![0f32; (window_width * window_height) as usize]; // Optional: let the sigma depend on the window size (Galoogahi et al. (2015). Correlation Filters with Limited Boundaries) // let sigma = ((window_width * window_height) as f32).sqrt() / 16.0; // let variance = sigma * sigma; let variance = 2.0; // create gaussian peak at the center coordinates let center_x = window_width / 2; let center_y = window_height / 2; for x in 0..window_width { for y in 0..window_height { let distx: f32 = x as f32 - center_x as f32; let disty: f32 = y as f32 - center_y as f32; // apply a crude univariate Gaussian density function target_gi[((y * window_width) + x) as usize] = (-((distx * distx) + (disty * disty) / variance)).exp() } } return target_gi; } // function for debugging the shape of the target // output only depends on the provided target_coords pub fn dump_target(window_width: u32, window_height: u32) -> ImageBuffer<Luma<u8>, Vec<u8>> { let trgt = build_target(window_width, window_height); let normalized = trgt.iter().map(|a| a * 255.0).collect(); return to_imgbuf(&normalized, window_width, window_height); } fn compute_psr( predicted: &Vec<Complex<f32>>, width: u32, height: u32, max: f32, maxpos: (u32, u32), ) -> f32 { // uses running updates of standard deviation and mean let mut running_sum = 0.0; let mut running_sd = 0.0; for e in predicted { running_sum += e.re; running_sd += e.re * e.re; } // subtract the values of a 11*11 window around the max from the running sd and sum // TODO: look up: why 11*11, and not something simpler like 12*12? let max_x = maxpos.0 as i32; let max_y = maxpos.1 as i32; let window_left = (max_x - 5).max(0); let window_right = (max_x + 6).min(width as i32); let window_top = (max_y - 5).min(0); // note: named according to CG conventions let window_bottom = (max_y + 6).min(height as i32); for x in window_left..window_right { for y in window_bottom..window_top { let ind = (y * width as i32 + x) as usize; let val = predicted[ind].re; running_sd -= val * val; running_sum -= val; } } // we need to subtract 11*11 window from predicted.len() to get the sidelobe_size let sidelobe_size = (predicted.len() - (11 * 11)) as f32; let mean_sl = running_sum / sidelobe_size; let sd_sl = ((running_sd / sidelobe_size) - (mean_sl * mean_sl)).sqrt(); let psr = (max - mean_sl) / sd_sl; return psr; } fn index_to_coords(width: u32, index: u32) -> (u32, u32) { // modulo/remainder ops are theoretically O(1) // checked_rem returns None if rhs == 0, which would indicate an upstream error (width == 0). let x = index.checked_rem(width).unwrap(); // checked sub returns None if overflow occurred, which is also a panicable offense. // checked_div returns None if rhs == 0, which would indicate an upstream error (width == 0). let y = (index.checked_sub(x).unwrap()).checked_div(width).unwrap(); return (x, y); } pub fn to_imgbuf(buf: &Vec<f32>, width: u32, height: u32) -> ImageBuffer<Luma<u8>, Vec<u8>>
{ ImageBuffer::from_vec(width, height, buf.iter().map(|c| *c as u8).collect()).unwrap() }
identifier_body
lib.rs
for i in 0..width { for j in 0..height { let cww = ((f32::consts::PI * i as f32) / (width - 1) as f32).sin(); let cwh = ((f32::consts::PI * j as f32) / (height - 1) as f32).sin(); prepped[position] = cww.min(cwh) * prepped[position]; position += 1; } } return prepped; } type Identifier = u32; #[derive(Debug)] pub struct MultiMosseTracker { // we also store the tracker's numeric ID, and the amount of times it did not make the PSR threshold. trackers: Vec<(Identifier, u32, MosseTracker)>, // the global tracker settings settings: MosseTrackerSettings, // how many times a tracker is allowed to fail the PSR threshold desperation_level: u32, } impl MultiMosseTracker { pub fn new(settings: MosseTrackerSettings, desperation_level: u32) -> MultiMosseTracker { return MultiMosseTracker { trackers: Vec::new(), settings: settings, desperation_level: desperation_level, }; } pub fn add_or_replace_target(&mut self, id: Identifier, coords: (u32, u32), frame: &GrayImage) { // Add a target by specifying its coords and a new ID. // Specify an existing ID to replace an existing tracked target. // create a new tracker for this target and train it let mut new_tracker = MosseTracker::new(&self.settings); new_tracker.train(frame, coords); match self.trackers.iter_mut().find(|tracker| tracker.0 == id) { Some(tuple) => { tuple.1 = 0; tuple.2 = new_tracker; } // add the tracker to the map _ => self.trackers.push((id, 0, new_tracker)), }; } pub fn track(&mut self, frame: &GrayImage) -> Vec<(Identifier, Prediction)> { let mut predictions: Vec<(Identifier, Prediction)> = Vec::new(); for (id, death_watch, tracker) in &mut self.trackers { // compute the location of the object in the new frame and save it let pred = tracker.track_new_frame(frame); predictions.push((*id, pred)); // if the tracker made the PSR threshold, update it. // if not, we increment its death ticker. if tracker.last_psr > self.settings.psr_threshold { tracker.update(frame); *death_watch = 0u32; } else { *death_watch += 1; } } // prune all filters with an expired death ticker let level = &self.desperation_level; self.trackers .retain(|(_id, death_count, _tracker)| death_count < level); return predictions; } pub fn dump_filter_reals(&self) -> Vec<GrayImage> { return self.trackers.iter().map(|t| t.2.dump_filter().0).collect(); } pub fn
(&self) -> usize { self.trackers.len() } } pub struct Prediction { pub location: (u32, u32), pub psr: f32, } pub struct MosseTracker { filter: Vec<Complex<f32>>, // constants frame height frame_width: u32, frame_height: u32, // stores dimensions of tracking window and its center // window is square for now, this variable contains the size of the square edge window_size: u32, current_target_center: (u32, u32), // represents center in frame // the 'target' (G). A single Gaussian peak centered at the tracking window. target: Vec<Complex<f32>>, // constants: learning rate and PSR threshold eta: f32, regularization: f32, // not super important for MOSSE: see paper fig 4. // the previous Ai and Bi last_top: Vec<Complex<f32>>, last_bottom: Vec<Complex<f32>>, // the previous psr pub last_psr: f32, // thread-safe FFT objects containing precomputed parameters for this input data size. fft: Arc<dyn Fft<f32>>, inv_fft: Arc<dyn Fft<f32>>, } impl Debug for MosseTracker { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("MosseTracker") .field("filter", &self.filter) .field("frame_width", &self.frame_width) .field("frame_height", &self.frame_height) .field("window_size", &self.window_size) .field("current_target_center", &self.current_target_center) .field("target", &self.target) .field("eta", &self.eta) .field("regularization", &self.regularization) .field("last_top", &self.last_top) .field("last_bottom", &self.last_bottom) .field("last_psr", &self.last_psr) // These fields don't implement Debug, so I can't use the #[derive(Debug)] impl. //.field("fft", &self.fft) //.field("inv_fft", &self.inv_fft) .finish() } } #[derive(Debug)] pub struct MosseTrackerSettings { pub width: u32, pub height: u32, pub window_size: u32, pub learning_rate: f32, pub psr_threshold: f32, pub regularization: f32, } #[allow(non_snake_case)] impl MosseTracker { pub fn new(settings: &MosseTrackerSettings) -> MosseTracker { // parameterize the FFT objects let mut planner = FftPlanner::new(); let mut inv_planner = FftPlanner::new(); // NOTE: we initialize the FFTs based on the size of the window let length = (settings.window_size * settings.window_size) as usize; let fft = planner.plan_fft_forward(length); let inv_fft = inv_planner.plan_fft_inverse(length); // initialize the filter and its top and bottom parts with zeroes. let filter = vec![Complex::zero(); length]; let top = vec![Complex::zero(); length]; let bottom = vec![Complex::zero(); length]; // initialize the target output map (G), with a compact Gaussian peak centered on the target object. // In the Bolme paper, this map is called gi. let mut target: Vec<Complex<f32>> = build_target(settings.window_size, settings.window_size) .into_iter() .map(|p| Complex::new(p as f32, 0.0)) .collect(); fft.process(&mut target); return MosseTracker { filter, last_top: top, last_bottom: bottom, last_psr: 0.0, eta: settings.learning_rate, regularization: settings.regularization, target, fft, inv_fft, frame_width: settings.width, frame_height: settings.height, window_size: settings.window_size, current_target_center: (0, 0), }; } fn compute_2dfft(&self, imagedata: Vec<f32>) -> Vec<Complex<f32>> { let mut buffer: Vec<Complex<f32>> = imagedata .into_iter() .map(|p| Complex::new(p as f32, 0.0)) .collect(); // fft.process() CONSUMES the input buffer as scratch space, make sure it is not reused self.fft.process(&mut buffer); return buffer; } // Train a new filter on the first frame in which the object occurs pub fn train(&mut self, input_frame: &GrayImage, target_center: (u32, u32)) { // store the target center as the current self.current_target_center = target_center; // cut out the training template by cropping let window = &window_crop( input_frame, self.window_size, self.window_size, target_center, ); #[cfg(debug_assertions)] { window.save("WINDOW.png").unwrap(); } // build an iterator that produces training frames that have been slightly rotated according to a theta value. let rotated_frames = [ 0.02, -0.02, 0.05, -0.05, 0.07, -0.07, 0.09, -0.09, 1.1, -1.1, 1.3, -1.3, 1.5, -1.5, 2.0, -2.0, ] .iter() .map(|rad| { // Rotate an image clockwise about its center by theta radians. let training_frame = rotate_about_center(window, *rad, Interpolation::Nearest, Luma([0])); #[cfg(debug_assertions)] { training_frame .save(format!("training_frame_rotated_theta_{}.png", rad)) .unwrap(); } return training_frame; }); // build an iterator that produces training frames that have been slightly scaled to various degrees ('zoomed') let scaled_frames = [0.8, 0.9, 1.1, 1.2].into_iter().map(|scalefactor| { let scale = Projection::scale(scalefactor, scalefactor); let scaled_training_frame = warp(&window, &scale, Interpolation::Nearest, Luma([0])); #[cfg(debug_assertions)] { scaled_training_frame .save(format!("training_frame_scaled_{}.png", scalefactor)) .unwrap(); } return scaled_training_frame; }); // Chain these iterators together. // Note that we add the initial, unperturbed training frame as first in line. let training_frames = std::iter::once(window) .cloned() .chain(rotated_frames) .chain(scaled_frames); // TODO: scaling is not ready yet //.chain(scaled_frames); let mut training_frame_count = 0; for training_frame in training_frames { // preprocess the training frame using preprocess() let vectorized = preprocess(&training_frame); // calculate the 2D FFT of the preprocessed frame: FFT(fi) = Fi let Fi = self.compute_2dfft(vectorized); // compute the complex conjugate of Fi, Fi*. let Fi_star: Vec<Complex<f32>> = Fi.iter().map(|e| e.conj()).collect(); // compute the initial filter let top = self.target.iter().zip(Fi_star.iter()).map(|(g, f)| g * f); let bottom = Fi.iter().zip(Fi_star.iter()).map(|(f, f_star)| f * f_star); // // add the values to the running sum self.last_top .iter_mut() .zip(top) .for_each(|(running, new)| *running += new); self.last_bottom .iter_mut() .zip(bottom) .for_each(|(running, new)| *running += new); training_frame_count += 1 } // divide the values of the top and bottom filters by the number of training perturbations used self.last_top .iter_mut() .for_each(|e| *e /= training_frame_count as f32); self.last_bottom .iter_mut() .for_each(|e| *e /= training_frame_count as f32); // compute the filter by dividing Ai and Bi elementwise // note that we add a small quantity to avoid dividing by zero, which would yield NaN's. self.filter = self .last_top .iter() .zip(&self.last_bottom) .map(|(a, b)| a / b + self.regularization) .collect(); #[cfg(debug_assertions)] { println!( "current center of target in frame: x={}, y={}", self.current_target_center.0, self.current_target_center.1 ); } } pub fn track_new_frame(&mut self, frame: &GrayImage) -> Prediction { // cut out the training template by cropping let window = window_crop( frame, self.window_size, self.window_size, self.current_target_center, ); // preprocess the image using preprocess() let vectorized = preprocess(&window); // calculate the 2D FFT of the preprocessed image: FFT(fi) = Fi let Fi = self.compute_2dfft(vectorized); // elementwise multiplication of F with filter H gives Gi let mut corr_map_gi: Vec<Complex<f32>> = Fi.iter().zip(&self.filter).map(|(a, b)| a * b).collect(); // NOTE: Gi is garbage after this call self.inv_fft.process(&mut corr_map_gi); // find the max value of the filtered image 'gi', along with the position of the maximum let (maxind, max_complex) = corr_map_gi .iter() .enumerate() .max_by(|a, b| { // filtered (gi) is still complex at this point, we only care about the real part a.1.re.partial_cmp(&b.1.re).unwrap_or(Ordering::Equal) }) .unwrap(); // we can unwrap the result of max_by(), as we are sure filtered.len() > 0 // convert the array index of the max to the coordinates in the window let max_coord_in_window = index_to_coords(self.window_size, maxind as u32); let window_half = (self.window_size / 2) as i32; let x_delta = max_coord_in_window.0 as i32 - window_half; let y_delta = max_coord_in_window.1 as i32 - window_half; let x_max = self.frame_width as i32 - window_half; let y_max = self.frame_height as i32 - window_half; #[cfg(debug_assertions)] { println!( "distance of new in-window max from window center: x = {}, y = {}", x_delta, y_delta, ); } // compute the max coord in the frame by looking at the shift of the window center let new_x = (self.current_target_center.0 as i32 + x_delta) .min(x_max) .max(window_half); let new_y = (self.current_target_center.1 as i32 + y_delta) .min(y_max) .max(window_half); self.current_target_center = (new_x as u32, new_y as u32); // compute PSR // Note that we re-use the computed max and its coordinate for downstream simplicity self.last_psr = compute_psr( &corr_map_gi, self.window_size, self.window_size, max_complex.re, max_coord_in_window, ); return Prediction { location: self.current_target_center, psr: self.last_psr, }; } // update the filter fn update(&mut self, frame: &GrayImage) { // cut out the training template by cropping let window = window_crop( frame, self.window_size, self.window_size, self.current_target_center, ); // preprocess the image using preprocess() let vectorized = preprocess(&window); // calculate the 2D FFT of the preprocessed image: FFT(fi) = Fi let new_Fi = self.compute_2dfft(vectorized); //// Update the filter using the prediction // compute the complex conjugate of Fi, Fi*. let Fi_star: Vec<Complex<f32>> = new_Fi.iter().map(|e| e.conj()).collect(); // compute Ai (top) and Bi (bottom) using F*, G, and the learning rate (see paper) let one_minus_eta = 1.0 - self.eta; // update the 'top' of the filter update equation self.last_top = self .target .iter() .zip(&Fi_star) .zip(&self.last_top) .map(|((g, f), prev)| self.eta * (g * f) + (one_minus_eta * prev)) .collect(); // update the 'bottom' of the filter update equation self.last_bottom = new_Fi .iter() .zip(&Fi_star) .zip(&self.last_bottom) .map(|((f, f_star), prev)| self.eta * (f * f_star) + (one_minus_eta * prev)) .collect(); // compute the new filter H* by dividing Ai and Bi elementwise self.filter = self .last_top .iter() .zip(&self.last_bottom) .map(|(a, b)| a / b) .collect(); } // debug method to dump the latest filter to an inspectable image pub fn dump_filter( &self, ) -> ( ImageBuffer<Luma<u8>, Vec<u8>>, ImageBuffer<Luma<u8>, Vec<u8>>, ) { // get the filter out of fourier space // NOTE: input is garbage after this call to inv_fft.process(), so we clone the filter first. let mut h = self.filter.clone(); self.inv_fft.process(&mut h); // turn the real and imaginary values of the filter into separate grayscale images let realfilter = h.iter().map(|c| c.re).collect(); let imfilter = h.iter().map(|c| c.im).collect(); return ( to_imgbuf(&realfilter, self.window_size, self.window_size), to_imgbuf(&imfilter, self.window_size, self.window_size), ); } } fn window_crop( input_frame: &GrayImage, window_width: u32, window_height: u32, center: (u32, u32), ) -> GrayImage { let window = imageops::crop( &mut input_frame.clone(), center .0 .saturating_sub(window_width / 2) .min(input_frame.width() - window_width), center .1 .saturating_sub(window_height / 2) .min(input_frame.height() - window_height), window_width, window_height, ) .to_image(); return window; } fn build_target(window_width: u32, window_height: u32) -> Vec<f32> { let mut target_gi = vec![0f32; (window_width * window_height) as usize]; // Optional: let the sigma depend on the window size (Galoogahi et al. (2015). Correlation Filters with Limited Boundaries) // let sigma = ((window_width * window_height) as f32).sqrt() / 16.0; // let variance = sigma * sigma; let variance = 2.0; // create gaussian peak at the center coordinates let center_x = window_width / 2; let center_y = window_height / 2; for x in 0..window_width { for y in 0..window_height { let distx: f32 = x as f32 - center_x as f32; let disty: f32 = y as f32 - center_y as f32; // apply a crude univariate Gaussian density function target_gi[((y * window_width) + x) as usize] = (-((distx * distx) + (disty * disty) / variance)).exp()
size
identifier_name
lib.rs
track_new_frame(frame); predictions.push((*id, pred)); // if the tracker made the PSR threshold, update it. // if not, we increment its death ticker. if tracker.last_psr > self.settings.psr_threshold { tracker.update(frame); *death_watch = 0u32; } else { *death_watch += 1; } } // prune all filters with an expired death ticker let level = &self.desperation_level; self.trackers .retain(|(_id, death_count, _tracker)| death_count < level); return predictions; } pub fn dump_filter_reals(&self) -> Vec<GrayImage> { return self.trackers.iter().map(|t| t.2.dump_filter().0).collect(); } pub fn size(&self) -> usize { self.trackers.len() } } pub struct Prediction { pub location: (u32, u32), pub psr: f32, } pub struct MosseTracker { filter: Vec<Complex<f32>>, // constants frame height frame_width: u32, frame_height: u32, // stores dimensions of tracking window and its center // window is square for now, this variable contains the size of the square edge window_size: u32, current_target_center: (u32, u32), // represents center in frame // the 'target' (G). A single Gaussian peak centered at the tracking window. target: Vec<Complex<f32>>, // constants: learning rate and PSR threshold eta: f32, regularization: f32, // not super important for MOSSE: see paper fig 4. // the previous Ai and Bi last_top: Vec<Complex<f32>>, last_bottom: Vec<Complex<f32>>, // the previous psr pub last_psr: f32, // thread-safe FFT objects containing precomputed parameters for this input data size. fft: Arc<dyn Fft<f32>>, inv_fft: Arc<dyn Fft<f32>>, } impl Debug for MosseTracker { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("MosseTracker") .field("filter", &self.filter) .field("frame_width", &self.frame_width) .field("frame_height", &self.frame_height) .field("window_size", &self.window_size) .field("current_target_center", &self.current_target_center) .field("target", &self.target) .field("eta", &self.eta) .field("regularization", &self.regularization) .field("last_top", &self.last_top) .field("last_bottom", &self.last_bottom) .field("last_psr", &self.last_psr) // These fields don't implement Debug, so I can't use the #[derive(Debug)] impl. //.field("fft", &self.fft) //.field("inv_fft", &self.inv_fft) .finish() } } #[derive(Debug)] pub struct MosseTrackerSettings { pub width: u32, pub height: u32, pub window_size: u32, pub learning_rate: f32, pub psr_threshold: f32, pub regularization: f32, } #[allow(non_snake_case)] impl MosseTracker { pub fn new(settings: &MosseTrackerSettings) -> MosseTracker { // parameterize the FFT objects let mut planner = FftPlanner::new(); let mut inv_planner = FftPlanner::new(); // NOTE: we initialize the FFTs based on the size of the window let length = (settings.window_size * settings.window_size) as usize; let fft = planner.plan_fft_forward(length); let inv_fft = inv_planner.plan_fft_inverse(length); // initialize the filter and its top and bottom parts with zeroes. let filter = vec![Complex::zero(); length]; let top = vec![Complex::zero(); length]; let bottom = vec![Complex::zero(); length]; // initialize the target output map (G), with a compact Gaussian peak centered on the target object. // In the Bolme paper, this map is called gi. let mut target: Vec<Complex<f32>> = build_target(settings.window_size, settings.window_size) .into_iter() .map(|p| Complex::new(p as f32, 0.0)) .collect(); fft.process(&mut target); return MosseTracker { filter, last_top: top, last_bottom: bottom, last_psr: 0.0, eta: settings.learning_rate, regularization: settings.regularization, target, fft, inv_fft, frame_width: settings.width, frame_height: settings.height, window_size: settings.window_size, current_target_center: (0, 0), }; } fn compute_2dfft(&self, imagedata: Vec<f32>) -> Vec<Complex<f32>> { let mut buffer: Vec<Complex<f32>> = imagedata .into_iter() .map(|p| Complex::new(p as f32, 0.0)) .collect(); // fft.process() CONSUMES the input buffer as scratch space, make sure it is not reused self.fft.process(&mut buffer); return buffer; } // Train a new filter on the first frame in which the object occurs pub fn train(&mut self, input_frame: &GrayImage, target_center: (u32, u32)) { // store the target center as the current self.current_target_center = target_center; // cut out the training template by cropping let window = &window_crop( input_frame, self.window_size, self.window_size, target_center, ); #[cfg(debug_assertions)] { window.save("WINDOW.png").unwrap(); } // build an iterator that produces training frames that have been slightly rotated according to a theta value. let rotated_frames = [ 0.02, -0.02, 0.05, -0.05, 0.07, -0.07, 0.09, -0.09, 1.1, -1.1, 1.3, -1.3, 1.5, -1.5, 2.0, -2.0, ] .iter() .map(|rad| { // Rotate an image clockwise about its center by theta radians. let training_frame = rotate_about_center(window, *rad, Interpolation::Nearest, Luma([0])); #[cfg(debug_assertions)] { training_frame .save(format!("training_frame_rotated_theta_{}.png", rad)) .unwrap(); } return training_frame; }); // build an iterator that produces training frames that have been slightly scaled to various degrees ('zoomed') let scaled_frames = [0.8, 0.9, 1.1, 1.2].into_iter().map(|scalefactor| { let scale = Projection::scale(scalefactor, scalefactor); let scaled_training_frame = warp(&window, &scale, Interpolation::Nearest, Luma([0])); #[cfg(debug_assertions)] { scaled_training_frame .save(format!("training_frame_scaled_{}.png", scalefactor)) .unwrap(); } return scaled_training_frame; }); // Chain these iterators together. // Note that we add the initial, unperturbed training frame as first in line. let training_frames = std::iter::once(window) .cloned() .chain(rotated_frames) .chain(scaled_frames); // TODO: scaling is not ready yet //.chain(scaled_frames); let mut training_frame_count = 0; for training_frame in training_frames { // preprocess the training frame using preprocess() let vectorized = preprocess(&training_frame); // calculate the 2D FFT of the preprocessed frame: FFT(fi) = Fi let Fi = self.compute_2dfft(vectorized); // compute the complex conjugate of Fi, Fi*. let Fi_star: Vec<Complex<f32>> = Fi.iter().map(|e| e.conj()).collect(); // compute the initial filter let top = self.target.iter().zip(Fi_star.iter()).map(|(g, f)| g * f); let bottom = Fi.iter().zip(Fi_star.iter()).map(|(f, f_star)| f * f_star); // // add the values to the running sum self.last_top .iter_mut() .zip(top) .for_each(|(running, new)| *running += new); self.last_bottom .iter_mut() .zip(bottom) .for_each(|(running, new)| *running += new); training_frame_count += 1 } // divide the values of the top and bottom filters by the number of training perturbations used self.last_top .iter_mut() .for_each(|e| *e /= training_frame_count as f32); self.last_bottom .iter_mut() .for_each(|e| *e /= training_frame_count as f32); // compute the filter by dividing Ai and Bi elementwise // note that we add a small quantity to avoid dividing by zero, which would yield NaN's. self.filter = self .last_top .iter() .zip(&self.last_bottom) .map(|(a, b)| a / b + self.regularization) .collect(); #[cfg(debug_assertions)] { println!( "current center of target in frame: x={}, y={}", self.current_target_center.0, self.current_target_center.1 ); } } pub fn track_new_frame(&mut self, frame: &GrayImage) -> Prediction { // cut out the training template by cropping let window = window_crop( frame, self.window_size, self.window_size, self.current_target_center, ); // preprocess the image using preprocess() let vectorized = preprocess(&window); // calculate the 2D FFT of the preprocessed image: FFT(fi) = Fi let Fi = self.compute_2dfft(vectorized); // elementwise multiplication of F with filter H gives Gi let mut corr_map_gi: Vec<Complex<f32>> = Fi.iter().zip(&self.filter).map(|(a, b)| a * b).collect(); // NOTE: Gi is garbage after this call self.inv_fft.process(&mut corr_map_gi); // find the max value of the filtered image 'gi', along with the position of the maximum let (maxind, max_complex) = corr_map_gi .iter() .enumerate() .max_by(|a, b| { // filtered (gi) is still complex at this point, we only care about the real part a.1.re.partial_cmp(&b.1.re).unwrap_or(Ordering::Equal) }) .unwrap(); // we can unwrap the result of max_by(), as we are sure filtered.len() > 0 // convert the array index of the max to the coordinates in the window let max_coord_in_window = index_to_coords(self.window_size, maxind as u32); let window_half = (self.window_size / 2) as i32; let x_delta = max_coord_in_window.0 as i32 - window_half; let y_delta = max_coord_in_window.1 as i32 - window_half; let x_max = self.frame_width as i32 - window_half; let y_max = self.frame_height as i32 - window_half; #[cfg(debug_assertions)] { println!( "distance of new in-window max from window center: x = {}, y = {}", x_delta, y_delta, ); } // compute the max coord in the frame by looking at the shift of the window center let new_x = (self.current_target_center.0 as i32 + x_delta) .min(x_max) .max(window_half); let new_y = (self.current_target_center.1 as i32 + y_delta) .min(y_max) .max(window_half); self.current_target_center = (new_x as u32, new_y as u32); // compute PSR // Note that we re-use the computed max and its coordinate for downstream simplicity self.last_psr = compute_psr( &corr_map_gi, self.window_size, self.window_size, max_complex.re, max_coord_in_window, ); return Prediction { location: self.current_target_center, psr: self.last_psr, }; } // update the filter fn update(&mut self, frame: &GrayImage) { // cut out the training template by cropping let window = window_crop( frame, self.window_size, self.window_size, self.current_target_center, ); // preprocess the image using preprocess() let vectorized = preprocess(&window); // calculate the 2D FFT of the preprocessed image: FFT(fi) = Fi let new_Fi = self.compute_2dfft(vectorized); //// Update the filter using the prediction // compute the complex conjugate of Fi, Fi*. let Fi_star: Vec<Complex<f32>> = new_Fi.iter().map(|e| e.conj()).collect(); // compute Ai (top) and Bi (bottom) using F*, G, and the learning rate (see paper) let one_minus_eta = 1.0 - self.eta; // update the 'top' of the filter update equation self.last_top = self .target .iter() .zip(&Fi_star) .zip(&self.last_top) .map(|((g, f), prev)| self.eta * (g * f) + (one_minus_eta * prev)) .collect(); // update the 'bottom' of the filter update equation self.last_bottom = new_Fi .iter() .zip(&Fi_star) .zip(&self.last_bottom) .map(|((f, f_star), prev)| self.eta * (f * f_star) + (one_minus_eta * prev)) .collect(); // compute the new filter H* by dividing Ai and Bi elementwise self.filter = self .last_top .iter() .zip(&self.last_bottom) .map(|(a, b)| a / b) .collect(); } // debug method to dump the latest filter to an inspectable image pub fn dump_filter( &self, ) -> ( ImageBuffer<Luma<u8>, Vec<u8>>, ImageBuffer<Luma<u8>, Vec<u8>>, ) { // get the filter out of fourier space // NOTE: input is garbage after this call to inv_fft.process(), so we clone the filter first. let mut h = self.filter.clone(); self.inv_fft.process(&mut h); // turn the real and imaginary values of the filter into separate grayscale images let realfilter = h.iter().map(|c| c.re).collect(); let imfilter = h.iter().map(|c| c.im).collect(); return ( to_imgbuf(&realfilter, self.window_size, self.window_size), to_imgbuf(&imfilter, self.window_size, self.window_size), ); } } fn window_crop( input_frame: &GrayImage, window_width: u32, window_height: u32, center: (u32, u32), ) -> GrayImage { let window = imageops::crop( &mut input_frame.clone(), center .0 .saturating_sub(window_width / 2) .min(input_frame.width() - window_width), center .1 .saturating_sub(window_height / 2) .min(input_frame.height() - window_height), window_width, window_height, ) .to_image(); return window; } fn build_target(window_width: u32, window_height: u32) -> Vec<f32> { let mut target_gi = vec![0f32; (window_width * window_height) as usize]; // Optional: let the sigma depend on the window size (Galoogahi et al. (2015). Correlation Filters with Limited Boundaries) // let sigma = ((window_width * window_height) as f32).sqrt() / 16.0; // let variance = sigma * sigma; let variance = 2.0; // create gaussian peak at the center coordinates let center_x = window_width / 2; let center_y = window_height / 2; for x in 0..window_width { for y in 0..window_height { let distx: f32 = x as f32 - center_x as f32; let disty: f32 = y as f32 - center_y as f32; // apply a crude univariate Gaussian density function target_gi[((y * window_width) + x) as usize] = (-((distx * distx) + (disty * disty) / variance)).exp() } } return target_gi; } // function for debugging the shape of the target // output only depends on the provided target_coords pub fn dump_target(window_width: u32, window_height: u32) -> ImageBuffer<Luma<u8>, Vec<u8>> { let trgt = build_target(window_width, window_height); let normalized = trgt.iter().map(|a| a * 255.0).collect(); return to_imgbuf(&normalized, window_width, window_height); } fn compute_psr( predicted: &Vec<Complex<f32>>, width: u32, height: u32, max: f32, maxpos: (u32, u32), ) -> f32 { // uses running updates of standard deviation and mean let mut running_sum = 0.0; let mut running_sd = 0.0; for e in predicted { running_sum += e.re; running_sd += e.re * e.re; } // subtract the values of a 11*11 window around the max from the running sd and sum // TODO: look up: why 11*11, and not something simpler like 12*12? let max_x = maxpos.0 as i32; let max_y = maxpos.1 as i32; let window_left = (max_x - 5).max(0); let window_right = (max_x + 6).min(width as i32); let window_top = (max_y - 5).min(0); // note: named according to CG conventions let window_bottom = (max_y + 6).min(height as i32); for x in window_left..window_right { for y in window_bottom..window_top { let ind = (y * width as i32 + x) as usize; let val = predicted[ind].re; running_sd -= val * val; running_sum -= val; } } // we need to subtract 11*11 window from predicted.len() to get the sidelobe_size let sidelobe_size = (predicted.len() - (11 * 11)) as f32; let mean_sl = running_sum / sidelobe_size; let sd_sl = ((running_sd / sidelobe_size) - (mean_sl * mean_sl)).sqrt(); let psr = (max - mean_sl) / sd_sl; return psr; }
fn index_to_coords(width: u32, index: u32) -> (u32, u32) { // modulo/remainder ops are theoretically O(1) // checked_rem returns None if rhs == 0, which would indicate an upstream error (width == 0). let x = index.checked_rem(width).unwrap();
random_line_split
main.rs
#![deny(unused_must_use)] #![type_length_limit = "1340885"] extern crate serenity; extern crate ctrlc; #[macro_use] pub mod logger; pub mod canary_update; pub mod dogebotno; pub mod permissions; pub mod servers; pub mod voice; use canary_update::*; use futures::{Stream, StreamExt}; use lazy_static::*; use logger::get_guild_members; use rand::Rng; use regex::Regex; use serenity::async_trait; use serenity::client::bridge::gateway::GatewayIntents; use serenity::model::channel::GuildChannel; use serenity::{ client::bridge::gateway::ShardManager, framework::standard::{macros::*, *}, model::{ channel::Message, event::{PresenceUpdateEvent, ResumedEvent}, gateway::Ready, id::{ChannelId, GuildId, UserId}, user::OnlineStatus, }, prelude::*, utils::MessageBuilder, Client, }; use std::{ collections::HashSet, ops::DerefMut, sync::{atomic::AtomicBool, Arc}, }; use tokio::{stream, sync::Mutex}; use voice::OofVoice; /// Unwrapping many of the errors in oofbot, mostly api calls, will result in a panic sometimes. /// This is bad. But I also cant ignore the errors in case theres something bad in there. So my /// solution is this trait, which logs the error. If I look in the logs and see something bad, then /// I know to recheck everything trait LogResult { /// If the result is an error, log the error. fn log_err(&self) where Self: std::fmt::Debug, { log_timestamp!("DEBUG", format!("{:?}", self)) } } impl<T: std::fmt::Debug, E: std::fmt::Debug> LogResult for Result<T, E> { /// If the result is an error, log the error. fn log_err(&self) { if self.is_err() { log_timestamp!("DEBUG", format!("{:?}", self)); } } } /// The general command group. May be deleted later #[group] #[commands(test, executeorder66, getdvsstatus)] struct General; /// A testing command that can only be run by me. #[command] async fn test(ctx: &Context, msg: &Message) -> CommandResult { if msg.author.id!= 453344368913547265 { msg.channel_id.say(&ctx, "No").await.log_err(); return Ok(()); } //let canary = ctx.data.read().get::<CanaryUpdateHandler>().cloned().unwrap(); //let lock = canary.lock()?; //let res = lock.create_db(); //res.log_err(); //if res.is_ok() { msg.channel_id.say(&ctx, "It seems to have worked").log_err(); //} //else { // msg.channel_id.say(&ctx, "killme").log_err(); //} msg.channel_id.say(&ctx, "@admin").await.log_err(); Ok(()) } #[command] async fn executeorder66(ctx: &Context, msg: &Message) -> CommandResult { msg.channel_id.say(&ctx, "not yet").await.log_err(); Ok(()) } /// The event handler for oofbot pub struct Handler { cancel_tyler_ping: Arc<AtomicBool>, mention_regex: Regex, } impl Default for Handler { fn default() -> Self { Self { cancel_tyler_ping: Arc::default(), mention_regex: Regex::new(r"<@!?468928390917783553>").unwrap(), } } } #[async_trait] impl EventHandler for Handler { async fn presence_update(&self, ctx: Context, data: PresenceUpdateEvent) { // oofbot only handles guild presence updates if data.guild_id.is_none() { return; } // Dogebot is oofbots greatest enemy. We got some checks in here just for him. let is_dogebot = data.presence.user_id == 612070962913083405; // Should never be none because we check that up there let guild_id = data.guild_id.unwrap(); // Checks if dogebot is offline in this guild (the main development guild for dogebot and // oofbot) if is_dogebot && guild_id.0 == 561874457283657728 { dogebotno::dogebot_presence(&ctx, &data, &guild_id, self).await; } else if!is_dogebot && data.presence.status == OnlineStatus::Offline { // Inside joke, memeing on how tiny discord canary updates are and how often we get them let canary = ctx .data .read() .await .get::<CanaryUpdateHandler>() .cloned() .unwrap(); let mut lock = canary.lock().await; lock.add_canary_update(&data.presence.user_id).await; } else if!is_dogebot && data.presence.status == OnlineStatus::Online { canary_update::do_update(&ctx, &data).await; } } async fn resume(&self, _ctx: Context, _data: ResumedEvent)
async fn ready(&self, ctx: Context, _data: Ready) { log_timestamp!("INFO", format!("Shard {} ready", ctx.shard_id)); } async fn cache_ready(&self, ctx: Context, guilds: Vec<GuildId>) { let shard = ctx.shard_id; let rctx = &ctx; // Get all the guilds that this shard is connected to // Not that this bot will ever be big enough for me to bother sharding it let guild_info: Vec<_> = stream::iter(guilds) .filter_map(|guild_id| async move { if guild_id.shard_id(&rctx).await == rctx.shard_id { Some(( guild_id, guild_id.to_guild_cached(&rctx).await.unwrap().name.clone(), )) } else { None } }) .collect() .await; log_timestamp!( "INFO", format!("Shard {} connected to guilds\n{:#?}", shard, guild_info) ); } async fn message(&self, ctx: Context, msg: Message) { log_timestamp!("DEBUG", &msg.content); if msg.author.id == 612070962913083405 { dogebotno::dogebotno(ctx, msg).await; return; } if self.mention_regex.is_match(msg.content.as_str()) { let channel_id: ChannelId = msg.channel_id; channel_id .say( &ctx, "For thousands of years I lay dormant, who has disturbed my slumber", ) .await .log_err(); } if msg.content.contains("@someone") &&!msg.author.bot { someone_ping(&ctx, &msg).await; } if (msg.content.contains("@everyone") || msg.content.contains("@here")) && msg.author.id.0!= 468928390917783553 { msg.channel_id .say(&ctx, "https://yeet.kikoho.xyz/files/ping.gif") .await .log_err(); } if msg.author.id == 266345279513427971 && msg.content.contains("https://www.twitch.tv/corporal_q") { msg.channel_id.say(&ctx, "sotp spamming").await.log_err(); } } } #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { log_timestamp!("INFO", "Starting oofbot"); log_timestamp!("INFO", "Getting client secret from file"); let mut framework = StandardFramework::new() .configure(|c| c.prefix("/")) .group(&GENERAL_GROUP) .group(&ADMIN_GROUP) .help(&HELP); voice::do_framework(&mut framework); permissions::do_framework(&mut framework); canary_update::do_framework(&mut framework); let secret = std::fs::read_to_string("client_secret") .expect("Client secret needs to be in a file called client_secret"); let mut client = Client::builder(secret) .add_intent(GatewayIntents::all()) .framework(framework) .event_handler(Handler::default()) .await .expect("Failed to create client"); // Voice initialization { // Lock the clients data let mut data = client.data.write().await; // Add the voice manager log_timestamp!("INFO", "Starting oofvoice"); data.insert::<OofVoice>(OofVoice::new(client.voice_manager.clone()).await); log_timestamp!("INFO", "Started oofvoice"); // Add canary update handler log_timestamp!("INFO", "Starting canary update handler"); let sql = permissions::SqlHandler::new(); data.insert::<CanaryUpdateHandler>(Arc::new(Mutex::new(CanaryUpdateHandler::new(sql)))); log_timestamp!("INFO", "Started canary update handler"); } let shard_manager = client.shard_manager.clone(); // Handle ctrl+c cross platform ctrlc::set_handler(move || { log_timestamp!("INFO", "Caught SIGINT, closing oofbot"); //let mut lock = shard_manager.lock().await; //let sm: &mut ShardManager = lock.deref_mut(); //sm.shutdown_all(); std::process::exit(0); }) .log_err(); // Hah you think this bot is big enough to be sharded? Nice joke // But if yours is use.start_autosharded() client.start().await?; Ok(()) } /// Handles the @someone ping. Yes im evil. async fn someone_ping(ctx: &Context, msg: &Message) { let guild_id: Option<GuildId> = msg.guild_id; let channel_id: ChannelId = msg.channel_id; match guild_id { Some(id) => { let mut message = MessageBuilder::new(); { let members = match get_guild_members(&ctx, id).await { Some(m) => m, None => { log_timestamp!("ERROR", format!("Failed to find guild {}", id)); msg.channel_id.say(&ctx, "Internal Error").await.log_err(); return; } }; let mut rng = rand::thread_rng(); message.mention(&msg.author); message.push(" has pinged: "); let someones = msg.content.split("@someone"); let c = someones.count(); if c > 1 { let r = rng.gen_range(0, members.len()); message.mention(&members[r]); } // Randomly select the @someones msg.content.split("@someone").skip(2).for_each(|_| { message.push(", "); let r = rng.gen_range(0, members.len()); message.mention(&members[r]); }); } channel_id.say(&ctx, message).await.log_err(); } None => { // If guild is none then this is a dm channel_id .say(&ctx.http, "Cannot @someone in dms") .await .log_err(); } } } #[help] async fn help( context: &Context, msg: &Message, args: Args, help_options: &'static HelpOptions, groups: &[&'static CommandGroup], owners: HashSet<UserId>, ) -> CommandResult { help_commands::with_embeds(context, msg, args, help_options, groups, owners).await; Ok(()) } #[check] #[name = "ManageMessages"] #[check_in_help(true)] #[display_in_help(true)] async fn manage_messages_check(ctx: &Context, msg: &Message) -> CheckResult { if msg.author.id == 453344368913547265 { return true.into(); } else if let Ok(member) = msg.member(&ctx).await { if let Ok(permissions) = member.permissions(&ctx.cache).await { return (permissions.administrator() || permissions.manage_messages()).into(); } } false.into() } #[check] #[name = "DVS"] #[check_in_help(true)] #[display_in_help(true)] async fn dvs_check(_ctx: &Context, msg: &Message) -> CheckResult { (msg.guild_id.unwrap_or(0.into()) == 693213312099287153).into() } #[group] #[commands(snapbm, snapping, snapbotcommands, snapspam, snapafter, setslowmode)] /// Admin command group /// Get this, it has admin commands, amazing right? struct Admin; #[command] #[checks(ManageMessages)] #[only_in(guilds)] /// Scan the last X messages and delete all that are from bots. Messages older than 2 weeks cannot be deleted with this command. Maximum of 500 messages async fn snapbm(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult { let count = match args.single::<u64>() { Ok(x) if x <= 500 => x, _ => { msg.channel_id .say(&ctx, "Usage: /snapbm <number>") .await .log_err(); return Ok(()); } }; let channel: GuildChannel = msg .channel_id .to_channel(&ctx) .await .unwrap() .guild() .unwrap(); let messages = channel .messages(&ctx, |retriever| retriever.before(msg.id).limit(count)) .await?; let mut bot_messages: Vec<&Message> = messages .iter() .filter(|msg| { msg.author.bot && chrono::Utc::now().naive_utc() - msg.timestamp.naive_utc() < chrono::Duration::weeks(2) }) .collect(); bot_messages.push(msg); channel.delete_messages(&ctx, bot_messages).await.log_err(); Ok(()) } #[command] #[checks(ManageMessages)] #[only_in(guilds)] /// Scan the last X messages and delete all that contain pings. Messages older than 2 weeks cannot be deleted with this command. Maximum of 500 messages async fn snapping(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult { let ping_regex = Regex::new("<@!?\\d*>").unwrap(); let count = match args.single::<u64>() { Ok(x) if x <= 500 => x, _ => { msg.channel_id .say(&ctx, "Usage: /lazysnapping <number>") .await .log_err(); return Ok(()); } }; let channel = msg .channel_id .to_channel(&ctx) .await .unwrap() .guild() .unwrap(); let messages = channel .messages(&ctx, |retriever| retriever.before(msg.id).limit(count)) .await?; let mut bot_messages: Vec<&Message> = messages .iter() .filter(|msg| { ping_regex.is_match(msg.content.as_str()) && chrono::Utc::now().naive_utc() - msg.timestamp.naive_utc() < chrono::Duration::weeks(2) }) .collect(); bot_messages.push(msg); channel.delete_messages(&ctx, bot_messages).await.log_err(); Ok(()) } #[command] #[checks(ManageMessages)] #[only_in(guilds)] /// Scan the last X messages and delete all that start with /. Messages older than 2 weeks cannot be deleted with this command. Maximum of 500 messages async fn snapbotcommands(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult { let count = match args.single::<u64>() { Ok(x) if x <= 500 => x, _ => { msg.channel_id .say(&ctx, "Usage: /snapbotcommands <number>") .await .log_err(); return Ok(()); } }; let channel = msg .channel_id .to_channel(&ctx) .await .unwrap() .guild() .unwrap(); let messages = channel .messages(&ctx, |retriever| retriever.before(msg.id).limit(count)) .await?; let mut bot_messages: Vec<&Message> = messages .iter() .filter(|msg| { (msg.content.starts_with('/') || msg.content.starts_with('!')) && chrono::Utc::now().naive_utc() - msg.timestamp.naive_utc() < chrono::Duration::weeks(2) }) .collect(); bot_messages.push(msg); channel.delete_messages(&ctx, bot_messages).await.log_err(); Ok(()) } #[command] #[checks(ManageMessages)] #[only_in(guilds)] /// Murder the last X messages. Messages older than 2 weeks cannot be deleted with this command. Maximum of 500 messages async fn snapspam(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult { let count = match args.single::<u64>() { Ok(x) if x <= 500 => x, _ => { msg.channel_id .say(&ctx, "Usage: /snapspam <number>") .await .log_err(); return Ok(()); } }; let channel = msg .channel_id .to_channel(&ctx) .await .unwrap() .guild() .unwrap(); let mut messages = channel .messages(&ctx, |retriever| retriever.before(msg.id).limit(count)) .await?; messages.push(msg.clone()); let messages = messages.into_iter().filter(|m| { chrono::Utc::now().naive_utc() - m.timestamp.naive_utc() < chrono::Duration::weeks(2) }); channel.delete_messages(&ctx, messages).await.log_err(); Ok(()) } #[command] #[only_in(guilds)] #[checks(DVS)] /// Gets the status of the DVS minecraft server async fn getdvsstatus(ctx: &Context, msg: &Message) -> CommandResult { msg.channel_id.broadcast_typing(&ctx).await.log_err(); let code = std::process::Command::new("sh") .args(&[ "-c", "nmap -4 applesthepi.com -Pn -p 25566 | rg '25566/tcp open'", ]) .status() .unwrap(); if code.success() { let message = MessageBuilder::new() .user(msg.author.id) .push(" Server port appears to be open, so it should be up.") .build(); msg.channel_id.say(&ctx, message).await.log_err(); } else { msg.channel_id .say( &ctx, "Server down indeed, <@324381278600298509> your server is on crack", ) .await .log_err(); } Ok(()) } #[command] #[checks(ManageMessages)] #[only_in(guilds)] /// Murder all messages after the message with the given id. Message ids can be gotten by enabling /// developer mode in discord setting and right click -> copy id /// Messages older than 2 weeks cannot be deleted with this. /// Usage: /snapuntil messageid async fn snapafter(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult { let id = args.single::<u64>()?; let channel = msg .channel_id .to_channel(&ctx) .await .unwrap() .guild() .unwrap(); let messages = channel .messages(&ctx, |retriever| retriever.after(id)) .await?; let messages = messages.into_iter().filter(|m| { chrono::Utc::now().naive_utc() - m.timestamp.naive_utc() < chrono::Duration::weeks(2) }); channel.delete_messages(&ctx, messages).await.log_err(); Ok(()) } #[command] #[checks(ManageMessages)] #[only_in(guilds)] /// Sets the slowmode to any second value. This allow more specific slow mode like 1 second. /// Usage: /setslowmode integer async fn setslowmode(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult { let arg: u64 = args.single()?; msg.channel_id .to_channel(&ctx) .await? .guild() .unwrap() .edit(&ctx, |c| c.slow_mode_rate(arg)) .await .log_err(); Ok(()) }
{ log_timestamp!("INFO", "Reconnected to discord"); }
identifier_body
main.rs
#![deny(unused_must_use)] #![type_length_limit = "1340885"] extern crate serenity; extern crate ctrlc; #[macro_use] pub mod logger; pub mod canary_update; pub mod dogebotno; pub mod permissions; pub mod servers; pub mod voice; use canary_update::*; use futures::{Stream, StreamExt}; use lazy_static::*; use logger::get_guild_members; use rand::Rng; use regex::Regex; use serenity::async_trait; use serenity::client::bridge::gateway::GatewayIntents; use serenity::model::channel::GuildChannel; use serenity::{ client::bridge::gateway::ShardManager, framework::standard::{macros::*, *}, model::{ channel::Message, event::{PresenceUpdateEvent, ResumedEvent}, gateway::Ready, id::{ChannelId, GuildId, UserId}, user::OnlineStatus, }, prelude::*, utils::MessageBuilder, Client, }; use std::{ collections::HashSet, ops::DerefMut, sync::{atomic::AtomicBool, Arc}, }; use tokio::{stream, sync::Mutex}; use voice::OofVoice; /// Unwrapping many of the errors in oofbot, mostly api calls, will result in a panic sometimes. /// This is bad. But I also cant ignore the errors in case theres something bad in there. So my /// solution is this trait, which logs the error. If I look in the logs and see something bad, then /// I know to recheck everything trait LogResult { /// If the result is an error, log the error. fn log_err(&self) where Self: std::fmt::Debug, { log_timestamp!("DEBUG", format!("{:?}", self)) } } impl<T: std::fmt::Debug, E: std::fmt::Debug> LogResult for Result<T, E> { /// If the result is an error, log the error. fn log_err(&self) { if self.is_err() { log_timestamp!("DEBUG", format!("{:?}", self)); } } } /// The general command group. May be deleted later #[group] #[commands(test, executeorder66, getdvsstatus)] struct General; /// A testing command that can only be run by me. #[command] async fn test(ctx: &Context, msg: &Message) -> CommandResult { if msg.author.id!= 453344368913547265 { msg.channel_id.say(&ctx, "No").await.log_err(); return Ok(()); } //let canary = ctx.data.read().get::<CanaryUpdateHandler>().cloned().unwrap(); //let lock = canary.lock()?; //let res = lock.create_db(); //res.log_err(); //if res.is_ok() { msg.channel_id.say(&ctx, "It seems to have worked").log_err(); //} //else { // msg.channel_id.say(&ctx, "killme").log_err(); //} msg.channel_id.say(&ctx, "@admin").await.log_err(); Ok(()) } #[command] async fn executeorder66(ctx: &Context, msg: &Message) -> CommandResult { msg.channel_id.say(&ctx, "not yet").await.log_err(); Ok(()) } /// The event handler for oofbot pub struct Handler { cancel_tyler_ping: Arc<AtomicBool>, mention_regex: Regex, } impl Default for Handler { fn default() -> Self { Self { cancel_tyler_ping: Arc::default(), mention_regex: Regex::new(r"<@!?468928390917783553>").unwrap(), } } } #[async_trait] impl EventHandler for Handler { async fn presence_update(&self, ctx: Context, data: PresenceUpdateEvent) { // oofbot only handles guild presence updates if data.guild_id.is_none() { return; } // Dogebot is oofbots greatest enemy. We got some checks in here just for him. let is_dogebot = data.presence.user_id == 612070962913083405; // Should never be none because we check that up there let guild_id = data.guild_id.unwrap(); // Checks if dogebot is offline in this guild (the main development guild for dogebot and // oofbot) if is_dogebot && guild_id.0 == 561874457283657728 { dogebotno::dogebot_presence(&ctx, &data, &guild_id, self).await; } else if!is_dogebot && data.presence.status == OnlineStatus::Offline { // Inside joke, memeing on how tiny discord canary updates are and how often we get them let canary = ctx .data .read() .await .get::<CanaryUpdateHandler>() .cloned() .unwrap(); let mut lock = canary.lock().await; lock.add_canary_update(&data.presence.user_id).await; } else if!is_dogebot && data.presence.status == OnlineStatus::Online { canary_update::do_update(&ctx, &data).await; } } async fn resume(&self, _ctx: Context, _data: ResumedEvent) { log_timestamp!("INFO", "Reconnected to discord"); } async fn ready(&self, ctx: Context, _data: Ready) { log_timestamp!("INFO", format!("Shard {} ready", ctx.shard_id)); } async fn cache_ready(&self, ctx: Context, guilds: Vec<GuildId>) { let shard = ctx.shard_id; let rctx = &ctx; // Get all the guilds that this shard is connected to // Not that this bot will ever be big enough for me to bother sharding it let guild_info: Vec<_> = stream::iter(guilds) .filter_map(|guild_id| async move { if guild_id.shard_id(&rctx).await == rctx.shard_id { Some(( guild_id, guild_id.to_guild_cached(&rctx).await.unwrap().name.clone(), )) } else { None } }) .collect() .await; log_timestamp!( "INFO", format!("Shard {} connected to guilds\n{:#?}", shard, guild_info) ); } async fn message(&self, ctx: Context, msg: Message) { log_timestamp!("DEBUG", &msg.content); if msg.author.id == 612070962913083405 { dogebotno::dogebotno(ctx, msg).await; return; } if self.mention_regex.is_match(msg.content.as_str()) { let channel_id: ChannelId = msg.channel_id; channel_id .say( &ctx, "For thousands of years I lay dormant, who has disturbed my slumber", ) .await .log_err(); } if msg.content.contains("@someone") &&!msg.author.bot { someone_ping(&ctx, &msg).await; } if (msg.content.contains("@everyone") || msg.content.contains("@here")) && msg.author.id.0!= 468928390917783553 { msg.channel_id .say(&ctx, "https://yeet.kikoho.xyz/files/ping.gif") .await .log_err(); } if msg.author.id == 266345279513427971 && msg.content.contains("https://www.twitch.tv/corporal_q") { msg.channel_id.say(&ctx, "sotp spamming").await.log_err(); } } } #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { log_timestamp!("INFO", "Starting oofbot"); log_timestamp!("INFO", "Getting client secret from file"); let mut framework = StandardFramework::new() .configure(|c| c.prefix("/")) .group(&GENERAL_GROUP) .group(&ADMIN_GROUP) .help(&HELP); voice::do_framework(&mut framework); permissions::do_framework(&mut framework); canary_update::do_framework(&mut framework); let secret = std::fs::read_to_string("client_secret") .expect("Client secret needs to be in a file called client_secret"); let mut client = Client::builder(secret) .add_intent(GatewayIntents::all()) .framework(framework) .event_handler(Handler::default()) .await .expect("Failed to create client"); // Voice initialization { // Lock the clients data let mut data = client.data.write().await; // Add the voice manager log_timestamp!("INFO", "Starting oofvoice"); data.insert::<OofVoice>(OofVoice::new(client.voice_manager.clone()).await); log_timestamp!("INFO", "Started oofvoice"); // Add canary update handler log_timestamp!("INFO", "Starting canary update handler"); let sql = permissions::SqlHandler::new(); data.insert::<CanaryUpdateHandler>(Arc::new(Mutex::new(CanaryUpdateHandler::new(sql)))); log_timestamp!("INFO", "Started canary update handler"); } let shard_manager = client.shard_manager.clone(); // Handle ctrl+c cross platform ctrlc::set_handler(move || { log_timestamp!("INFO", "Caught SIGINT, closing oofbot"); //let mut lock = shard_manager.lock().await; //let sm: &mut ShardManager = lock.deref_mut(); //sm.shutdown_all();
}) .log_err(); // Hah you think this bot is big enough to be sharded? Nice joke // But if yours is use.start_autosharded() client.start().await?; Ok(()) } /// Handles the @someone ping. Yes im evil. async fn someone_ping(ctx: &Context, msg: &Message) { let guild_id: Option<GuildId> = msg.guild_id; let channel_id: ChannelId = msg.channel_id; match guild_id { Some(id) => { let mut message = MessageBuilder::new(); { let members = match get_guild_members(&ctx, id).await { Some(m) => m, None => { log_timestamp!("ERROR", format!("Failed to find guild {}", id)); msg.channel_id.say(&ctx, "Internal Error").await.log_err(); return; } }; let mut rng = rand::thread_rng(); message.mention(&msg.author); message.push(" has pinged: "); let someones = msg.content.split("@someone"); let c = someones.count(); if c > 1 { let r = rng.gen_range(0, members.len()); message.mention(&members[r]); } // Randomly select the @someones msg.content.split("@someone").skip(2).for_each(|_| { message.push(", "); let r = rng.gen_range(0, members.len()); message.mention(&members[r]); }); } channel_id.say(&ctx, message).await.log_err(); } None => { // If guild is none then this is a dm channel_id .say(&ctx.http, "Cannot @someone in dms") .await .log_err(); } } } #[help] async fn help( context: &Context, msg: &Message, args: Args, help_options: &'static HelpOptions, groups: &[&'static CommandGroup], owners: HashSet<UserId>, ) -> CommandResult { help_commands::with_embeds(context, msg, args, help_options, groups, owners).await; Ok(()) } #[check] #[name = "ManageMessages"] #[check_in_help(true)] #[display_in_help(true)] async fn manage_messages_check(ctx: &Context, msg: &Message) -> CheckResult { if msg.author.id == 453344368913547265 { return true.into(); } else if let Ok(member) = msg.member(&ctx).await { if let Ok(permissions) = member.permissions(&ctx.cache).await { return (permissions.administrator() || permissions.manage_messages()).into(); } } false.into() } #[check] #[name = "DVS"] #[check_in_help(true)] #[display_in_help(true)] async fn dvs_check(_ctx: &Context, msg: &Message) -> CheckResult { (msg.guild_id.unwrap_or(0.into()) == 693213312099287153).into() } #[group] #[commands(snapbm, snapping, snapbotcommands, snapspam, snapafter, setslowmode)] /// Admin command group /// Get this, it has admin commands, amazing right? struct Admin; #[command] #[checks(ManageMessages)] #[only_in(guilds)] /// Scan the last X messages and delete all that are from bots. Messages older than 2 weeks cannot be deleted with this command. Maximum of 500 messages async fn snapbm(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult { let count = match args.single::<u64>() { Ok(x) if x <= 500 => x, _ => { msg.channel_id .say(&ctx, "Usage: /snapbm <number>") .await .log_err(); return Ok(()); } }; let channel: GuildChannel = msg .channel_id .to_channel(&ctx) .await .unwrap() .guild() .unwrap(); let messages = channel .messages(&ctx, |retriever| retriever.before(msg.id).limit(count)) .await?; let mut bot_messages: Vec<&Message> = messages .iter() .filter(|msg| { msg.author.bot && chrono::Utc::now().naive_utc() - msg.timestamp.naive_utc() < chrono::Duration::weeks(2) }) .collect(); bot_messages.push(msg); channel.delete_messages(&ctx, bot_messages).await.log_err(); Ok(()) } #[command] #[checks(ManageMessages)] #[only_in(guilds)] /// Scan the last X messages and delete all that contain pings. Messages older than 2 weeks cannot be deleted with this command. Maximum of 500 messages async fn snapping(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult { let ping_regex = Regex::new("<@!?\\d*>").unwrap(); let count = match args.single::<u64>() { Ok(x) if x <= 500 => x, _ => { msg.channel_id .say(&ctx, "Usage: /lazysnapping <number>") .await .log_err(); return Ok(()); } }; let channel = msg .channel_id .to_channel(&ctx) .await .unwrap() .guild() .unwrap(); let messages = channel .messages(&ctx, |retriever| retriever.before(msg.id).limit(count)) .await?; let mut bot_messages: Vec<&Message> = messages .iter() .filter(|msg| { ping_regex.is_match(msg.content.as_str()) && chrono::Utc::now().naive_utc() - msg.timestamp.naive_utc() < chrono::Duration::weeks(2) }) .collect(); bot_messages.push(msg); channel.delete_messages(&ctx, bot_messages).await.log_err(); Ok(()) } #[command] #[checks(ManageMessages)] #[only_in(guilds)] /// Scan the last X messages and delete all that start with /. Messages older than 2 weeks cannot be deleted with this command. Maximum of 500 messages async fn snapbotcommands(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult { let count = match args.single::<u64>() { Ok(x) if x <= 500 => x, _ => { msg.channel_id .say(&ctx, "Usage: /snapbotcommands <number>") .await .log_err(); return Ok(()); } }; let channel = msg .channel_id .to_channel(&ctx) .await .unwrap() .guild() .unwrap(); let messages = channel .messages(&ctx, |retriever| retriever.before(msg.id).limit(count)) .await?; let mut bot_messages: Vec<&Message> = messages .iter() .filter(|msg| { (msg.content.starts_with('/') || msg.content.starts_with('!')) && chrono::Utc::now().naive_utc() - msg.timestamp.naive_utc() < chrono::Duration::weeks(2) }) .collect(); bot_messages.push(msg); channel.delete_messages(&ctx, bot_messages).await.log_err(); Ok(()) } #[command] #[checks(ManageMessages)] #[only_in(guilds)] /// Murder the last X messages. Messages older than 2 weeks cannot be deleted with this command. Maximum of 500 messages async fn snapspam(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult { let count = match args.single::<u64>() { Ok(x) if x <= 500 => x, _ => { msg.channel_id .say(&ctx, "Usage: /snapspam <number>") .await .log_err(); return Ok(()); } }; let channel = msg .channel_id .to_channel(&ctx) .await .unwrap() .guild() .unwrap(); let mut messages = channel .messages(&ctx, |retriever| retriever.before(msg.id).limit(count)) .await?; messages.push(msg.clone()); let messages = messages.into_iter().filter(|m| { chrono::Utc::now().naive_utc() - m.timestamp.naive_utc() < chrono::Duration::weeks(2) }); channel.delete_messages(&ctx, messages).await.log_err(); Ok(()) } #[command] #[only_in(guilds)] #[checks(DVS)] /// Gets the status of the DVS minecraft server async fn getdvsstatus(ctx: &Context, msg: &Message) -> CommandResult { msg.channel_id.broadcast_typing(&ctx).await.log_err(); let code = std::process::Command::new("sh") .args(&[ "-c", "nmap -4 applesthepi.com -Pn -p 25566 | rg '25566/tcp open'", ]) .status() .unwrap(); if code.success() { let message = MessageBuilder::new() .user(msg.author.id) .push(" Server port appears to be open, so it should be up.") .build(); msg.channel_id.say(&ctx, message).await.log_err(); } else { msg.channel_id .say( &ctx, "Server down indeed, <@324381278600298509> your server is on crack", ) .await .log_err(); } Ok(()) } #[command] #[checks(ManageMessages)] #[only_in(guilds)] /// Murder all messages after the message with the given id. Message ids can be gotten by enabling /// developer mode in discord setting and right click -> copy id /// Messages older than 2 weeks cannot be deleted with this. /// Usage: /snapuntil messageid async fn snapafter(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult { let id = args.single::<u64>()?; let channel = msg .channel_id .to_channel(&ctx) .await .unwrap() .guild() .unwrap(); let messages = channel .messages(&ctx, |retriever| retriever.after(id)) .await?; let messages = messages.into_iter().filter(|m| { chrono::Utc::now().naive_utc() - m.timestamp.naive_utc() < chrono::Duration::weeks(2) }); channel.delete_messages(&ctx, messages).await.log_err(); Ok(()) } #[command] #[checks(ManageMessages)] #[only_in(guilds)] /// Sets the slowmode to any second value. This allow more specific slow mode like 1 second. /// Usage: /setslowmode integer async fn setslowmode(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult { let arg: u64 = args.single()?; msg.channel_id .to_channel(&ctx) .await? .guild() .unwrap() .edit(&ctx, |c| c.slow_mode_rate(arg)) .await .log_err(); Ok(()) }
std::process::exit(0);
random_line_split
main.rs
#![deny(unused_must_use)] #![type_length_limit = "1340885"] extern crate serenity; extern crate ctrlc; #[macro_use] pub mod logger; pub mod canary_update; pub mod dogebotno; pub mod permissions; pub mod servers; pub mod voice; use canary_update::*; use futures::{Stream, StreamExt}; use lazy_static::*; use logger::get_guild_members; use rand::Rng; use regex::Regex; use serenity::async_trait; use serenity::client::bridge::gateway::GatewayIntents; use serenity::model::channel::GuildChannel; use serenity::{ client::bridge::gateway::ShardManager, framework::standard::{macros::*, *}, model::{ channel::Message, event::{PresenceUpdateEvent, ResumedEvent}, gateway::Ready, id::{ChannelId, GuildId, UserId}, user::OnlineStatus, }, prelude::*, utils::MessageBuilder, Client, }; use std::{ collections::HashSet, ops::DerefMut, sync::{atomic::AtomicBool, Arc}, }; use tokio::{stream, sync::Mutex}; use voice::OofVoice; /// Unwrapping many of the errors in oofbot, mostly api calls, will result in a panic sometimes. /// This is bad. But I also cant ignore the errors in case theres something bad in there. So my /// solution is this trait, which logs the error. If I look in the logs and see something bad, then /// I know to recheck everything trait LogResult { /// If the result is an error, log the error. fn log_err(&self) where Self: std::fmt::Debug, { log_timestamp!("DEBUG", format!("{:?}", self)) } } impl<T: std::fmt::Debug, E: std::fmt::Debug> LogResult for Result<T, E> { /// If the result is an error, log the error. fn log_err(&self) { if self.is_err() { log_timestamp!("DEBUG", format!("{:?}", self)); } } } /// The general command group. May be deleted later #[group] #[commands(test, executeorder66, getdvsstatus)] struct General; /// A testing command that can only be run by me. #[command] async fn
(ctx: &Context, msg: &Message) -> CommandResult { if msg.author.id!= 453344368913547265 { msg.channel_id.say(&ctx, "No").await.log_err(); return Ok(()); } //let canary = ctx.data.read().get::<CanaryUpdateHandler>().cloned().unwrap(); //let lock = canary.lock()?; //let res = lock.create_db(); //res.log_err(); //if res.is_ok() { msg.channel_id.say(&ctx, "It seems to have worked").log_err(); //} //else { // msg.channel_id.say(&ctx, "killme").log_err(); //} msg.channel_id.say(&ctx, "@admin").await.log_err(); Ok(()) } #[command] async fn executeorder66(ctx: &Context, msg: &Message) -> CommandResult { msg.channel_id.say(&ctx, "not yet").await.log_err(); Ok(()) } /// The event handler for oofbot pub struct Handler { cancel_tyler_ping: Arc<AtomicBool>, mention_regex: Regex, } impl Default for Handler { fn default() -> Self { Self { cancel_tyler_ping: Arc::default(), mention_regex: Regex::new(r"<@!?468928390917783553>").unwrap(), } } } #[async_trait] impl EventHandler for Handler { async fn presence_update(&self, ctx: Context, data: PresenceUpdateEvent) { // oofbot only handles guild presence updates if data.guild_id.is_none() { return; } // Dogebot is oofbots greatest enemy. We got some checks in here just for him. let is_dogebot = data.presence.user_id == 612070962913083405; // Should never be none because we check that up there let guild_id = data.guild_id.unwrap(); // Checks if dogebot is offline in this guild (the main development guild for dogebot and // oofbot) if is_dogebot && guild_id.0 == 561874457283657728 { dogebotno::dogebot_presence(&ctx, &data, &guild_id, self).await; } else if!is_dogebot && data.presence.status == OnlineStatus::Offline { // Inside joke, memeing on how tiny discord canary updates are and how often we get them let canary = ctx .data .read() .await .get::<CanaryUpdateHandler>() .cloned() .unwrap(); let mut lock = canary.lock().await; lock.add_canary_update(&data.presence.user_id).await; } else if!is_dogebot && data.presence.status == OnlineStatus::Online { canary_update::do_update(&ctx, &data).await; } } async fn resume(&self, _ctx: Context, _data: ResumedEvent) { log_timestamp!("INFO", "Reconnected to discord"); } async fn ready(&self, ctx: Context, _data: Ready) { log_timestamp!("INFO", format!("Shard {} ready", ctx.shard_id)); } async fn cache_ready(&self, ctx: Context, guilds: Vec<GuildId>) { let shard = ctx.shard_id; let rctx = &ctx; // Get all the guilds that this shard is connected to // Not that this bot will ever be big enough for me to bother sharding it let guild_info: Vec<_> = stream::iter(guilds) .filter_map(|guild_id| async move { if guild_id.shard_id(&rctx).await == rctx.shard_id { Some(( guild_id, guild_id.to_guild_cached(&rctx).await.unwrap().name.clone(), )) } else { None } }) .collect() .await; log_timestamp!( "INFO", format!("Shard {} connected to guilds\n{:#?}", shard, guild_info) ); } async fn message(&self, ctx: Context, msg: Message) { log_timestamp!("DEBUG", &msg.content); if msg.author.id == 612070962913083405 { dogebotno::dogebotno(ctx, msg).await; return; } if self.mention_regex.is_match(msg.content.as_str()) { let channel_id: ChannelId = msg.channel_id; channel_id .say( &ctx, "For thousands of years I lay dormant, who has disturbed my slumber", ) .await .log_err(); } if msg.content.contains("@someone") &&!msg.author.bot { someone_ping(&ctx, &msg).await; } if (msg.content.contains("@everyone") || msg.content.contains("@here")) && msg.author.id.0!= 468928390917783553 { msg.channel_id .say(&ctx, "https://yeet.kikoho.xyz/files/ping.gif") .await .log_err(); } if msg.author.id == 266345279513427971 && msg.content.contains("https://www.twitch.tv/corporal_q") { msg.channel_id.say(&ctx, "sotp spamming").await.log_err(); } } } #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { log_timestamp!("INFO", "Starting oofbot"); log_timestamp!("INFO", "Getting client secret from file"); let mut framework = StandardFramework::new() .configure(|c| c.prefix("/")) .group(&GENERAL_GROUP) .group(&ADMIN_GROUP) .help(&HELP); voice::do_framework(&mut framework); permissions::do_framework(&mut framework); canary_update::do_framework(&mut framework); let secret = std::fs::read_to_string("client_secret") .expect("Client secret needs to be in a file called client_secret"); let mut client = Client::builder(secret) .add_intent(GatewayIntents::all()) .framework(framework) .event_handler(Handler::default()) .await .expect("Failed to create client"); // Voice initialization { // Lock the clients data let mut data = client.data.write().await; // Add the voice manager log_timestamp!("INFO", "Starting oofvoice"); data.insert::<OofVoice>(OofVoice::new(client.voice_manager.clone()).await); log_timestamp!("INFO", "Started oofvoice"); // Add canary update handler log_timestamp!("INFO", "Starting canary update handler"); let sql = permissions::SqlHandler::new(); data.insert::<CanaryUpdateHandler>(Arc::new(Mutex::new(CanaryUpdateHandler::new(sql)))); log_timestamp!("INFO", "Started canary update handler"); } let shard_manager = client.shard_manager.clone(); // Handle ctrl+c cross platform ctrlc::set_handler(move || { log_timestamp!("INFO", "Caught SIGINT, closing oofbot"); //let mut lock = shard_manager.lock().await; //let sm: &mut ShardManager = lock.deref_mut(); //sm.shutdown_all(); std::process::exit(0); }) .log_err(); // Hah you think this bot is big enough to be sharded? Nice joke // But if yours is use.start_autosharded() client.start().await?; Ok(()) } /// Handles the @someone ping. Yes im evil. async fn someone_ping(ctx: &Context, msg: &Message) { let guild_id: Option<GuildId> = msg.guild_id; let channel_id: ChannelId = msg.channel_id; match guild_id { Some(id) => { let mut message = MessageBuilder::new(); { let members = match get_guild_members(&ctx, id).await { Some(m) => m, None => { log_timestamp!("ERROR", format!("Failed to find guild {}", id)); msg.channel_id.say(&ctx, "Internal Error").await.log_err(); return; } }; let mut rng = rand::thread_rng(); message.mention(&msg.author); message.push(" has pinged: "); let someones = msg.content.split("@someone"); let c = someones.count(); if c > 1 { let r = rng.gen_range(0, members.len()); message.mention(&members[r]); } // Randomly select the @someones msg.content.split("@someone").skip(2).for_each(|_| { message.push(", "); let r = rng.gen_range(0, members.len()); message.mention(&members[r]); }); } channel_id.say(&ctx, message).await.log_err(); } None => { // If guild is none then this is a dm channel_id .say(&ctx.http, "Cannot @someone in dms") .await .log_err(); } } } #[help] async fn help( context: &Context, msg: &Message, args: Args, help_options: &'static HelpOptions, groups: &[&'static CommandGroup], owners: HashSet<UserId>, ) -> CommandResult { help_commands::with_embeds(context, msg, args, help_options, groups, owners).await; Ok(()) } #[check] #[name = "ManageMessages"] #[check_in_help(true)] #[display_in_help(true)] async fn manage_messages_check(ctx: &Context, msg: &Message) -> CheckResult { if msg.author.id == 453344368913547265 { return true.into(); } else if let Ok(member) = msg.member(&ctx).await { if let Ok(permissions) = member.permissions(&ctx.cache).await { return (permissions.administrator() || permissions.manage_messages()).into(); } } false.into() } #[check] #[name = "DVS"] #[check_in_help(true)] #[display_in_help(true)] async fn dvs_check(_ctx: &Context, msg: &Message) -> CheckResult { (msg.guild_id.unwrap_or(0.into()) == 693213312099287153).into() } #[group] #[commands(snapbm, snapping, snapbotcommands, snapspam, snapafter, setslowmode)] /// Admin command group /// Get this, it has admin commands, amazing right? struct Admin; #[command] #[checks(ManageMessages)] #[only_in(guilds)] /// Scan the last X messages and delete all that are from bots. Messages older than 2 weeks cannot be deleted with this command. Maximum of 500 messages async fn snapbm(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult { let count = match args.single::<u64>() { Ok(x) if x <= 500 => x, _ => { msg.channel_id .say(&ctx, "Usage: /snapbm <number>") .await .log_err(); return Ok(()); } }; let channel: GuildChannel = msg .channel_id .to_channel(&ctx) .await .unwrap() .guild() .unwrap(); let messages = channel .messages(&ctx, |retriever| retriever.before(msg.id).limit(count)) .await?; let mut bot_messages: Vec<&Message> = messages .iter() .filter(|msg| { msg.author.bot && chrono::Utc::now().naive_utc() - msg.timestamp.naive_utc() < chrono::Duration::weeks(2) }) .collect(); bot_messages.push(msg); channel.delete_messages(&ctx, bot_messages).await.log_err(); Ok(()) } #[command] #[checks(ManageMessages)] #[only_in(guilds)] /// Scan the last X messages and delete all that contain pings. Messages older than 2 weeks cannot be deleted with this command. Maximum of 500 messages async fn snapping(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult { let ping_regex = Regex::new("<@!?\\d*>").unwrap(); let count = match args.single::<u64>() { Ok(x) if x <= 500 => x, _ => { msg.channel_id .say(&ctx, "Usage: /lazysnapping <number>") .await .log_err(); return Ok(()); } }; let channel = msg .channel_id .to_channel(&ctx) .await .unwrap() .guild() .unwrap(); let messages = channel .messages(&ctx, |retriever| retriever.before(msg.id).limit(count)) .await?; let mut bot_messages: Vec<&Message> = messages .iter() .filter(|msg| { ping_regex.is_match(msg.content.as_str()) && chrono::Utc::now().naive_utc() - msg.timestamp.naive_utc() < chrono::Duration::weeks(2) }) .collect(); bot_messages.push(msg); channel.delete_messages(&ctx, bot_messages).await.log_err(); Ok(()) } #[command] #[checks(ManageMessages)] #[only_in(guilds)] /// Scan the last X messages and delete all that start with /. Messages older than 2 weeks cannot be deleted with this command. Maximum of 500 messages async fn snapbotcommands(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult { let count = match args.single::<u64>() { Ok(x) if x <= 500 => x, _ => { msg.channel_id .say(&ctx, "Usage: /snapbotcommands <number>") .await .log_err(); return Ok(()); } }; let channel = msg .channel_id .to_channel(&ctx) .await .unwrap() .guild() .unwrap(); let messages = channel .messages(&ctx, |retriever| retriever.before(msg.id).limit(count)) .await?; let mut bot_messages: Vec<&Message> = messages .iter() .filter(|msg| { (msg.content.starts_with('/') || msg.content.starts_with('!')) && chrono::Utc::now().naive_utc() - msg.timestamp.naive_utc() < chrono::Duration::weeks(2) }) .collect(); bot_messages.push(msg); channel.delete_messages(&ctx, bot_messages).await.log_err(); Ok(()) } #[command] #[checks(ManageMessages)] #[only_in(guilds)] /// Murder the last X messages. Messages older than 2 weeks cannot be deleted with this command. Maximum of 500 messages async fn snapspam(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult { let count = match args.single::<u64>() { Ok(x) if x <= 500 => x, _ => { msg.channel_id .say(&ctx, "Usage: /snapspam <number>") .await .log_err(); return Ok(()); } }; let channel = msg .channel_id .to_channel(&ctx) .await .unwrap() .guild() .unwrap(); let mut messages = channel .messages(&ctx, |retriever| retriever.before(msg.id).limit(count)) .await?; messages.push(msg.clone()); let messages = messages.into_iter().filter(|m| { chrono::Utc::now().naive_utc() - m.timestamp.naive_utc() < chrono::Duration::weeks(2) }); channel.delete_messages(&ctx, messages).await.log_err(); Ok(()) } #[command] #[only_in(guilds)] #[checks(DVS)] /// Gets the status of the DVS minecraft server async fn getdvsstatus(ctx: &Context, msg: &Message) -> CommandResult { msg.channel_id.broadcast_typing(&ctx).await.log_err(); let code = std::process::Command::new("sh") .args(&[ "-c", "nmap -4 applesthepi.com -Pn -p 25566 | rg '25566/tcp open'", ]) .status() .unwrap(); if code.success() { let message = MessageBuilder::new() .user(msg.author.id) .push(" Server port appears to be open, so it should be up.") .build(); msg.channel_id.say(&ctx, message).await.log_err(); } else { msg.channel_id .say( &ctx, "Server down indeed, <@324381278600298509> your server is on crack", ) .await .log_err(); } Ok(()) } #[command] #[checks(ManageMessages)] #[only_in(guilds)] /// Murder all messages after the message with the given id. Message ids can be gotten by enabling /// developer mode in discord setting and right click -> copy id /// Messages older than 2 weeks cannot be deleted with this. /// Usage: /snapuntil messageid async fn snapafter(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult { let id = args.single::<u64>()?; let channel = msg .channel_id .to_channel(&ctx) .await .unwrap() .guild() .unwrap(); let messages = channel .messages(&ctx, |retriever| retriever.after(id)) .await?; let messages = messages.into_iter().filter(|m| { chrono::Utc::now().naive_utc() - m.timestamp.naive_utc() < chrono::Duration::weeks(2) }); channel.delete_messages(&ctx, messages).await.log_err(); Ok(()) } #[command] #[checks(ManageMessages)] #[only_in(guilds)] /// Sets the slowmode to any second value. This allow more specific slow mode like 1 second. /// Usage: /setslowmode integer async fn setslowmode(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult { let arg: u64 = args.single()?; msg.channel_id .to_channel(&ctx) .await? .guild() .unwrap() .edit(&ctx, |c| c.slow_mode_rate(arg)) .await .log_err(); Ok(()) }
test
identifier_name
backend.rs
use crate::intrinsics::Intrinsics; use inkwell::{ memory_buffer::MemoryBuffer, module::Module, targets::{CodeModel, FileType, InitializationConfig, RelocMode, Target, TargetMachine}, OptimizationLevel, }; use libc::{ c_char, mmap, mprotect, munmap, MAP_ANON, MAP_PRIVATE, PROT_EXEC, PROT_NONE, PROT_READ, PROT_WRITE, }; use std::{ any::Any, ffi::{c_void, CString}, mem, ops::Deref, ptr::{self, NonNull}, slice, str, sync::{Arc, Once}, }; use wasmer_runtime_core::{ backend::{ sys::{Memory, Protect}, CacheGen, RunnableModule, }, cache::Error as CacheError, module::ModuleInfo, structures::TypedIndex, typed_func::{Wasm, WasmTrapInfo}, types::{LocalFuncIndex, SigIndex}, vm, vmcalls, }; #[repr(C)] struct LLVMModule { _private: [u8; 0], } #[allow(non_camel_case_types, dead_code)] #[derive(Debug, Copy, Clone, PartialEq, Eq)] #[repr(C)] enum
{ NONE, READ, READ_WRITE, READ_EXECUTE, } #[allow(non_camel_case_types, dead_code)] #[derive(Debug, Copy, Clone, PartialEq, Eq)] #[repr(C)] enum LLVMResult { OK, ALLOCATE_FAILURE, PROTECT_FAILURE, DEALLOC_FAILURE, OBJECT_LOAD_FAILURE, } #[repr(C)] struct Callbacks { alloc_memory: extern "C" fn(usize, MemProtect, &mut *mut u8, &mut usize) -> LLVMResult, protect_memory: extern "C" fn(*mut u8, usize, MemProtect) -> LLVMResult, dealloc_memory: extern "C" fn(*mut u8, usize) -> LLVMResult, lookup_vm_symbol: extern "C" fn(*const c_char, usize) -> *const vm::Func, visit_fde: extern "C" fn(*mut u8, usize, extern "C" fn(*mut u8)), } extern "C" { fn module_load( mem_ptr: *const u8, mem_size: usize, callbacks: Callbacks, module_out: &mut *mut LLVMModule, ) -> LLVMResult; fn module_delete(module: *mut LLVMModule); fn get_func_symbol(module: *mut LLVMModule, name: *const c_char) -> *const vm::Func; fn throw_trap(ty: i32); /// This should be the same as spliting up the fat pointer into two arguments, /// but this is cleaner, I think? #[cfg_attr(nightly, unwind(allowed))] #[allow(improper_ctypes)] fn throw_any(data: *mut dyn Any) ->!; #[allow(improper_ctypes)] fn invoke_trampoline( trampoline: unsafe extern "C" fn(*mut vm::Ctx, NonNull<vm::Func>, *const u64, *mut u64), vmctx_ptr: *mut vm::Ctx, func_ptr: NonNull<vm::Func>, params: *const u64, results: *mut u64, trap_out: *mut WasmTrapInfo, user_error: *mut Option<Box<dyn Any>>, invoke_env: Option<NonNull<c_void>>, ) -> bool; } fn get_callbacks() -> Callbacks { fn round_up_to_page_size(size: usize) -> usize { (size + (4096 - 1)) &!(4096 - 1) } extern "C" fn alloc_memory( size: usize, protect: MemProtect, ptr_out: &mut *mut u8, size_out: &mut usize, ) -> LLVMResult { let size = round_up_to_page_size(size); let ptr = unsafe { mmap( ptr::null_mut(), size, match protect { MemProtect::NONE => PROT_NONE, MemProtect::READ => PROT_READ, MemProtect::READ_WRITE => PROT_READ | PROT_WRITE, MemProtect::READ_EXECUTE => PROT_READ | PROT_EXEC, }, MAP_PRIVATE | MAP_ANON, -1, 0, ) }; if ptr as isize == -1 { return LLVMResult::ALLOCATE_FAILURE; } *ptr_out = ptr as _; *size_out = size; LLVMResult::OK } extern "C" fn protect_memory(ptr: *mut u8, size: usize, protect: MemProtect) -> LLVMResult { let res = unsafe { mprotect( ptr as _, round_up_to_page_size(size), match protect { MemProtect::NONE => PROT_NONE, MemProtect::READ => PROT_READ, MemProtect::READ_WRITE => PROT_READ | PROT_WRITE, MemProtect::READ_EXECUTE => PROT_READ | PROT_EXEC, }, ) }; if res == 0 { LLVMResult::OK } else { LLVMResult::PROTECT_FAILURE } } extern "C" fn dealloc_memory(ptr: *mut u8, size: usize) -> LLVMResult { let res = unsafe { munmap(ptr as _, round_up_to_page_size(size)) }; if res == 0 { LLVMResult::OK } else { LLVMResult::DEALLOC_FAILURE } } extern "C" fn lookup_vm_symbol(name_ptr: *const c_char, length: usize) -> *const vm::Func { #[cfg(target_os = "macos")] macro_rules! fn_name { ($s:literal) => { concat!("_", $s) }; } #[cfg(not(target_os = "macos"))] macro_rules! fn_name { ($s:literal) => { $s }; } let name_slice = unsafe { slice::from_raw_parts(name_ptr as *const u8, length) }; let name = str::from_utf8(name_slice).unwrap(); match name { fn_name!("vm.memory.grow.dynamic.local") => vmcalls::local_dynamic_memory_grow as _, fn_name!("vm.memory.size.dynamic.local") => vmcalls::local_dynamic_memory_size as _, fn_name!("vm.memory.grow.static.local") => vmcalls::local_static_memory_grow as _, fn_name!("vm.memory.size.static.local") => vmcalls::local_static_memory_size as _, fn_name!("vm.exception.trap") => throw_trap as _, _ => ptr::null(), } } extern "C" fn visit_fde(fde: *mut u8, size: usize, visitor: extern "C" fn(*mut u8)) { unsafe { crate::platform::visit_fde(fde, size, visitor); } } Callbacks { alloc_memory, protect_memory, dealloc_memory, lookup_vm_symbol, visit_fde, } } pub enum Buffer { LlvmMemory(MemoryBuffer), Memory(Memory), } impl Deref for Buffer { type Target = [u8]; fn deref(&self) -> &[u8] { match self { Buffer::LlvmMemory(mem_buffer) => mem_buffer.as_slice(), Buffer::Memory(memory) => unsafe { memory.as_slice() }, } } } unsafe impl Send for LLVMBackend {} unsafe impl Sync for LLVMBackend {} pub struct LLVMBackend { module: *mut LLVMModule, #[allow(dead_code)] buffer: Arc<Buffer>, } impl LLVMBackend { pub fn new(module: Module, _intrinsics: Intrinsics) -> (Self, LLVMCache) { Target::initialize_x86(&InitializationConfig { asm_parser: true, asm_printer: true, base: true, disassembler: true, info: true, machine_code: true, }); let triple = TargetMachine::get_default_triple().to_string(); let target = Target::from_triple(&triple).unwrap(); let target_machine = target .create_target_machine( &triple, &TargetMachine::get_host_cpu_name().to_string(), &TargetMachine::get_host_cpu_features().to_string(), OptimizationLevel::Aggressive, RelocMode::PIC, CodeModel::Default, ) .unwrap(); let memory_buffer = target_machine .write_to_memory_buffer(&module, FileType::Object) .unwrap(); let mem_buf_slice = memory_buffer.as_slice(); let callbacks = get_callbacks(); let mut module: *mut LLVMModule = ptr::null_mut(); let res = unsafe { module_load( mem_buf_slice.as_ptr(), mem_buf_slice.len(), callbacks, &mut module, ) }; static SIGNAL_HANDLER_INSTALLED: Once = Once::new(); SIGNAL_HANDLER_INSTALLED.call_once(|| unsafe { crate::platform::install_signal_handler(); }); if res!= LLVMResult::OK { panic!("failed to load object") } let buffer = Arc::new(Buffer::LlvmMemory(memory_buffer)); ( Self { module, buffer: Arc::clone(&buffer), }, LLVMCache { buffer }, ) } pub unsafe fn from_buffer(memory: Memory) -> Result<(Self, LLVMCache), String> { let callbacks = get_callbacks(); let mut module: *mut LLVMModule = ptr::null_mut(); let slice = memory.as_slice(); let res = module_load(slice.as_ptr(), slice.len(), callbacks, &mut module); if res!= LLVMResult::OK { return Err("failed to load object".to_string()); } static SIGNAL_HANDLER_INSTALLED: Once = Once::new(); SIGNAL_HANDLER_INSTALLED.call_once(|| { crate::platform::install_signal_handler(); }); let buffer = Arc::new(Buffer::Memory(memory)); Ok(( Self { module, buffer: Arc::clone(&buffer), }, LLVMCache { buffer }, )) } } impl Drop for LLVMBackend { fn drop(&mut self) { unsafe { module_delete(self.module) } } } impl RunnableModule for LLVMBackend { fn get_func( &self, info: &ModuleInfo, local_func_index: LocalFuncIndex, ) -> Option<NonNull<vm::Func>> { let index = info.imported_functions.len() + local_func_index.index(); let name = if cfg!(target_os = "macos") { format!("_fn{}", index) } else { format!("fn{}", index) }; let c_str = CString::new(name).ok()?; let ptr = unsafe { get_func_symbol(self.module, c_str.as_ptr()) }; NonNull::new(ptr as _) } fn get_trampoline(&self, _: &ModuleInfo, sig_index: SigIndex) -> Option<Wasm> { let trampoline: unsafe extern "C" fn( *mut vm::Ctx, NonNull<vm::Func>, *const u64, *mut u64, ) = unsafe { let name = if cfg!(target_os = "macos") { format!("_trmp{}", sig_index.index()) } else { format!("trmp{}", sig_index.index()) }; let c_str = CString::new(name).unwrap(); let symbol = get_func_symbol(self.module, c_str.as_ptr()); assert!(!symbol.is_null()); mem::transmute(symbol) }; Some(unsafe { Wasm::from_raw_parts(trampoline, invoke_trampoline, None) }) } unsafe fn do_early_trap(&self, data: Box<dyn Any>) ->! { throw_any(Box::leak(data)) } } unsafe impl Send for LLVMCache {} unsafe impl Sync for LLVMCache {} pub struct LLVMCache { buffer: Arc<Buffer>, } impl CacheGen for LLVMCache { fn generate_cache(&self) -> Result<(Box<[u8]>, Memory), CacheError> { let mut memory = Memory::with_size_protect(self.buffer.len(), Protect::ReadWrite) .map_err(CacheError::SerializeError)?; let buffer = self.buffer.deref(); unsafe { memory.as_slice_mut()[..buffer.len()].copy_from_slice(buffer); } Ok(([].as_ref().into(), memory)) } } #[cfg(feature = "disasm")] unsafe fn disass_ptr(ptr: *const u8, size: usize, inst_count: usize) { use capstone::arch::BuildsCapstone; let mut cs = capstone::Capstone::new() // Call builder-pattern .x86() // X86 architecture .mode(capstone::arch::x86::ArchMode::Mode64) // 64-bit mode .detail(true) // Generate extra instruction details .build() .expect("Failed to create Capstone object"); // Get disassembled instructions let insns = cs .disasm_count( std::slice::from_raw_parts(ptr, size), ptr as u64, inst_count, ) .expect("Failed to disassemble"); println!("count = {}", insns.len()); for insn in insns.iter() { println!( "0x{:x}: {:6} {}", insn.address(), insn.mnemonic().unwrap_or(""), insn.op_str().unwrap_or("") ); } }
MemProtect
identifier_name
backend.rs
use crate::intrinsics::Intrinsics; use inkwell::{ memory_buffer::MemoryBuffer, module::Module, targets::{CodeModel, FileType, InitializationConfig, RelocMode, Target, TargetMachine}, OptimizationLevel, }; use libc::{ c_char, mmap, mprotect, munmap, MAP_ANON, MAP_PRIVATE, PROT_EXEC, PROT_NONE, PROT_READ, PROT_WRITE, }; use std::{ any::Any, ffi::{c_void, CString}, mem, ops::Deref, ptr::{self, NonNull}, slice, str, sync::{Arc, Once}, }; use wasmer_runtime_core::{ backend::{ sys::{Memory, Protect}, CacheGen, RunnableModule, }, cache::Error as CacheError, module::ModuleInfo, structures::TypedIndex, typed_func::{Wasm, WasmTrapInfo}, types::{LocalFuncIndex, SigIndex}, vm, vmcalls, }; #[repr(C)] struct LLVMModule { _private: [u8; 0], } #[allow(non_camel_case_types, dead_code)] #[derive(Debug, Copy, Clone, PartialEq, Eq)] #[repr(C)] enum MemProtect { NONE, READ, READ_WRITE, READ_EXECUTE, } #[allow(non_camel_case_types, dead_code)] #[derive(Debug, Copy, Clone, PartialEq, Eq)] #[repr(C)] enum LLVMResult { OK, ALLOCATE_FAILURE, PROTECT_FAILURE, DEALLOC_FAILURE, OBJECT_LOAD_FAILURE, } #[repr(C)] struct Callbacks { alloc_memory: extern "C" fn(usize, MemProtect, &mut *mut u8, &mut usize) -> LLVMResult, protect_memory: extern "C" fn(*mut u8, usize, MemProtect) -> LLVMResult, dealloc_memory: extern "C" fn(*mut u8, usize) -> LLVMResult, lookup_vm_symbol: extern "C" fn(*const c_char, usize) -> *const vm::Func, visit_fde: extern "C" fn(*mut u8, usize, extern "C" fn(*mut u8)), } extern "C" { fn module_load( mem_ptr: *const u8, mem_size: usize, callbacks: Callbacks, module_out: &mut *mut LLVMModule, ) -> LLVMResult; fn module_delete(module: *mut LLVMModule); fn get_func_symbol(module: *mut LLVMModule, name: *const c_char) -> *const vm::Func; fn throw_trap(ty: i32); /// This should be the same as spliting up the fat pointer into two arguments, /// but this is cleaner, I think? #[cfg_attr(nightly, unwind(allowed))] #[allow(improper_ctypes)] fn throw_any(data: *mut dyn Any) ->!; #[allow(improper_ctypes)] fn invoke_trampoline( trampoline: unsafe extern "C" fn(*mut vm::Ctx, NonNull<vm::Func>, *const u64, *mut u64), vmctx_ptr: *mut vm::Ctx, func_ptr: NonNull<vm::Func>, params: *const u64, results: *mut u64, trap_out: *mut WasmTrapInfo, user_error: *mut Option<Box<dyn Any>>, invoke_env: Option<NonNull<c_void>>, ) -> bool; } fn get_callbacks() -> Callbacks { fn round_up_to_page_size(size: usize) -> usize { (size + (4096 - 1)) &!(4096 - 1) } extern "C" fn alloc_memory( size: usize, protect: MemProtect, ptr_out: &mut *mut u8, size_out: &mut usize, ) -> LLVMResult { let size = round_up_to_page_size(size); let ptr = unsafe { mmap( ptr::null_mut(), size, match protect { MemProtect::NONE => PROT_NONE, MemProtect::READ => PROT_READ, MemProtect::READ_WRITE => PROT_READ | PROT_WRITE, MemProtect::READ_EXECUTE => PROT_READ | PROT_EXEC, }, MAP_PRIVATE | MAP_ANON, -1, 0, ) }; if ptr as isize == -1 { return LLVMResult::ALLOCATE_FAILURE; } *ptr_out = ptr as _; *size_out = size; LLVMResult::OK } extern "C" fn protect_memory(ptr: *mut u8, size: usize, protect: MemProtect) -> LLVMResult { let res = unsafe { mprotect( ptr as _, round_up_to_page_size(size), match protect { MemProtect::NONE => PROT_NONE, MemProtect::READ => PROT_READ, MemProtect::READ_WRITE => PROT_READ | PROT_WRITE, MemProtect::READ_EXECUTE => PROT_READ | PROT_EXEC, }, ) }; if res == 0 { LLVMResult::OK } else { LLVMResult::PROTECT_FAILURE } } extern "C" fn dealloc_memory(ptr: *mut u8, size: usize) -> LLVMResult { let res = unsafe { munmap(ptr as _, round_up_to_page_size(size)) }; if res == 0 { LLVMResult::OK } else { LLVMResult::DEALLOC_FAILURE } } extern "C" fn lookup_vm_symbol(name_ptr: *const c_char, length: usize) -> *const vm::Func { #[cfg(target_os = "macos")] macro_rules! fn_name { ($s:literal) => { concat!("_", $s) }; } #[cfg(not(target_os = "macos"))] macro_rules! fn_name { ($s:literal) => { $s }; }
let name_slice = unsafe { slice::from_raw_parts(name_ptr as *const u8, length) }; let name = str::from_utf8(name_slice).unwrap(); match name { fn_name!("vm.memory.grow.dynamic.local") => vmcalls::local_dynamic_memory_grow as _, fn_name!("vm.memory.size.dynamic.local") => vmcalls::local_dynamic_memory_size as _, fn_name!("vm.memory.grow.static.local") => vmcalls::local_static_memory_grow as _, fn_name!("vm.memory.size.static.local") => vmcalls::local_static_memory_size as _, fn_name!("vm.exception.trap") => throw_trap as _, _ => ptr::null(), } } extern "C" fn visit_fde(fde: *mut u8, size: usize, visitor: extern "C" fn(*mut u8)) { unsafe { crate::platform::visit_fde(fde, size, visitor); } } Callbacks { alloc_memory, protect_memory, dealloc_memory, lookup_vm_symbol, visit_fde, } } pub enum Buffer { LlvmMemory(MemoryBuffer), Memory(Memory), } impl Deref for Buffer { type Target = [u8]; fn deref(&self) -> &[u8] { match self { Buffer::LlvmMemory(mem_buffer) => mem_buffer.as_slice(), Buffer::Memory(memory) => unsafe { memory.as_slice() }, } } } unsafe impl Send for LLVMBackend {} unsafe impl Sync for LLVMBackend {} pub struct LLVMBackend { module: *mut LLVMModule, #[allow(dead_code)] buffer: Arc<Buffer>, } impl LLVMBackend { pub fn new(module: Module, _intrinsics: Intrinsics) -> (Self, LLVMCache) { Target::initialize_x86(&InitializationConfig { asm_parser: true, asm_printer: true, base: true, disassembler: true, info: true, machine_code: true, }); let triple = TargetMachine::get_default_triple().to_string(); let target = Target::from_triple(&triple).unwrap(); let target_machine = target .create_target_machine( &triple, &TargetMachine::get_host_cpu_name().to_string(), &TargetMachine::get_host_cpu_features().to_string(), OptimizationLevel::Aggressive, RelocMode::PIC, CodeModel::Default, ) .unwrap(); let memory_buffer = target_machine .write_to_memory_buffer(&module, FileType::Object) .unwrap(); let mem_buf_slice = memory_buffer.as_slice(); let callbacks = get_callbacks(); let mut module: *mut LLVMModule = ptr::null_mut(); let res = unsafe { module_load( mem_buf_slice.as_ptr(), mem_buf_slice.len(), callbacks, &mut module, ) }; static SIGNAL_HANDLER_INSTALLED: Once = Once::new(); SIGNAL_HANDLER_INSTALLED.call_once(|| unsafe { crate::platform::install_signal_handler(); }); if res!= LLVMResult::OK { panic!("failed to load object") } let buffer = Arc::new(Buffer::LlvmMemory(memory_buffer)); ( Self { module, buffer: Arc::clone(&buffer), }, LLVMCache { buffer }, ) } pub unsafe fn from_buffer(memory: Memory) -> Result<(Self, LLVMCache), String> { let callbacks = get_callbacks(); let mut module: *mut LLVMModule = ptr::null_mut(); let slice = memory.as_slice(); let res = module_load(slice.as_ptr(), slice.len(), callbacks, &mut module); if res!= LLVMResult::OK { return Err("failed to load object".to_string()); } static SIGNAL_HANDLER_INSTALLED: Once = Once::new(); SIGNAL_HANDLER_INSTALLED.call_once(|| { crate::platform::install_signal_handler(); }); let buffer = Arc::new(Buffer::Memory(memory)); Ok(( Self { module, buffer: Arc::clone(&buffer), }, LLVMCache { buffer }, )) } } impl Drop for LLVMBackend { fn drop(&mut self) { unsafe { module_delete(self.module) } } } impl RunnableModule for LLVMBackend { fn get_func( &self, info: &ModuleInfo, local_func_index: LocalFuncIndex, ) -> Option<NonNull<vm::Func>> { let index = info.imported_functions.len() + local_func_index.index(); let name = if cfg!(target_os = "macos") { format!("_fn{}", index) } else { format!("fn{}", index) }; let c_str = CString::new(name).ok()?; let ptr = unsafe { get_func_symbol(self.module, c_str.as_ptr()) }; NonNull::new(ptr as _) } fn get_trampoline(&self, _: &ModuleInfo, sig_index: SigIndex) -> Option<Wasm> { let trampoline: unsafe extern "C" fn( *mut vm::Ctx, NonNull<vm::Func>, *const u64, *mut u64, ) = unsafe { let name = if cfg!(target_os = "macos") { format!("_trmp{}", sig_index.index()) } else { format!("trmp{}", sig_index.index()) }; let c_str = CString::new(name).unwrap(); let symbol = get_func_symbol(self.module, c_str.as_ptr()); assert!(!symbol.is_null()); mem::transmute(symbol) }; Some(unsafe { Wasm::from_raw_parts(trampoline, invoke_trampoline, None) }) } unsafe fn do_early_trap(&self, data: Box<dyn Any>) ->! { throw_any(Box::leak(data)) } } unsafe impl Send for LLVMCache {} unsafe impl Sync for LLVMCache {} pub struct LLVMCache { buffer: Arc<Buffer>, } impl CacheGen for LLVMCache { fn generate_cache(&self) -> Result<(Box<[u8]>, Memory), CacheError> { let mut memory = Memory::with_size_protect(self.buffer.len(), Protect::ReadWrite) .map_err(CacheError::SerializeError)?; let buffer = self.buffer.deref(); unsafe { memory.as_slice_mut()[..buffer.len()].copy_from_slice(buffer); } Ok(([].as_ref().into(), memory)) } } #[cfg(feature = "disasm")] unsafe fn disass_ptr(ptr: *const u8, size: usize, inst_count: usize) { use capstone::arch::BuildsCapstone; let mut cs = capstone::Capstone::new() // Call builder-pattern .x86() // X86 architecture .mode(capstone::arch::x86::ArchMode::Mode64) // 64-bit mode .detail(true) // Generate extra instruction details .build() .expect("Failed to create Capstone object"); // Get disassembled instructions let insns = cs .disasm_count( std::slice::from_raw_parts(ptr, size), ptr as u64, inst_count, ) .expect("Failed to disassemble"); println!("count = {}", insns.len()); for insn in insns.iter() { println!( "0x{:x}: {:6} {}", insn.address(), insn.mnemonic().unwrap_or(""), insn.op_str().unwrap_or("") ); } }
random_line_split
kernel.rs
use crate::core::kernel::*; use crate::core::program::*; use mesa_rust_util::ptr::*; use mesa_rust_util::string::*; use rusticl_opencl_gen::*; use std::collections::HashSet; use std::os::raw::c_void; use std::ptr; use std::slice; use std::sync::Arc; impl CLInfo<cl_kernel_info> for cl_kernel { fn query(&self, q: cl_kernel_info, _: &[u8]) -> CLResult<Vec<u8>> { let kernel = self.get_ref()?; Ok(match q { CL_KERNEL_ATTRIBUTES => cl_prop::<&str>(&kernel.attributes_string), CL_KERNEL_CONTEXT => { let ptr = Arc::as_ptr(&kernel.prog.context); cl_prop::<cl_context>(cl_context::from_ptr(ptr)) } CL_KERNEL_FUNCTION_NAME => cl_prop::<&str>(&kernel.name), CL_KERNEL_NUM_ARGS => cl_prop::<cl_uint>(kernel.args.len() as cl_uint), CL_KERNEL_PROGRAM => { let ptr = Arc::as_ptr(&kernel.prog); cl_prop::<cl_program>(cl_program::from_ptr(ptr)) } CL_KERNEL_REFERENCE_COUNT => cl_prop::<cl_uint>(self.refcnt()?), // CL_INVALID_VALUE if param_name is not one of the supported values _ => return Err(CL_INVALID_VALUE), }) } } impl CLInfoObj<cl_kernel_arg_info, cl_uint> for cl_kernel { fn query(&self, idx: cl_uint, q: cl_kernel_arg_info) -> CLResult<Vec<u8>> { let kernel = self.get_ref()?; // CL_INVALID_ARG_INDEX if arg_index is not a valid argument index. if idx as usize >= kernel.args.len() { return Err(CL_INVALID_ARG_INDEX); } Ok(match *q { CL_KERNEL_ARG_ACCESS_QUALIFIER => { cl_prop::<cl_kernel_arg_access_qualifier>(kernel.access_qualifier(idx)) } CL_KERNEL_ARG_ADDRESS_QUALIFIER => { cl_prop::<cl_kernel_arg_address_qualifier>(kernel.address_qualifier(idx)) } CL_KERNEL_ARG_NAME => cl_prop::<&str>(kernel.arg_name(idx)), CL_KERNEL_ARG_TYPE_NAME => cl_prop::<&str>(kernel.arg_type_name(idx)), CL_KERNEL_ARG_TYPE_QUALIFIER => { cl_prop::<cl_kernel_arg_type_qualifier>(kernel.type_qualifier(idx)) } // CL_INVALID_VALUE if param_name is not one of the supported values _ => return Err(CL_INVALID_VALUE), }) } } impl CLInfoObj<cl_kernel_work_group_info, cl_device_id> for cl_kernel { fn query(&self, dev: cl_device_id, q: cl_kernel_work_group_info) -> CLResult<Vec<u8>> { let kernel = self.get_ref()?; // CL_INVALID_DEVICE [..] if device is NULL but there is more than one device associated with kernel. let dev = if dev.is_null() { if kernel.prog.devs.len() > 1 { return Err(CL_INVALID_DEVICE); } else { kernel.prog.devs[0].clone() } } else { dev.get_arc()? }; // CL_INVALID_DEVICE if device is not in the list of devices associated with kernel if!kernel.prog.devs.contains(&dev) { return Err(CL_INVALID_DEVICE); } Ok(match *q { CL_KERNEL_COMPILE_WORK_GROUP_SIZE => cl_prop::<[usize; 3]>(kernel.work_group_size), CL_KERNEL_LOCAL_MEM_SIZE => cl_prop::<cl_ulong>(kernel.local_mem_size(&dev)), CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE => { cl_prop::<usize>(dev.subgroups() as usize) } CL_KERNEL_PRIVATE_MEM_SIZE => cl_prop::<cl_ulong>(kernel.priv_mem_size(&dev)), // TODO CL_KERNEL_WORK_GROUP_SIZE => cl_prop::<usize>(dev.subgroups() as usize), // CL_INVALID_VALUE if param_name is not one of the supported values _ => return Err(CL_INVALID_VALUE), }) } } impl CLInfoObj<cl_kernel_sub_group_info, (cl_device_id, usize, *const c_void)> for cl_kernel { fn query( &self, (d, _input_value_size, _input_value): (cl_device_id, usize, *const c_void), _q: cl_program_build_info, ) -> CLResult<Vec<u8>> { let _kernel = self.get_ref()?; let _dev = d.get_arc()?; Err(CL_INVALID_OPERATION) } } const ZERO_ARR: [usize; 3] = [0; 3]; /// # Safety /// /// This function is only safe when called on an array of `work_dim` length unsafe fn kernel_work_arr_or_default<'a>(arr: *const usize, work_dim: cl_uint) -> &'a [usize] { if!arr.is_null() { slice::from_raw_parts(arr, work_dim as usize) } else { &ZERO_ARR } } fn get_devices_with_valid_build(p: &Arc<Program>) -> CLResult<Vec<&Arc<Device>>> { // CL_INVALID_PROGRAM_EXECUTABLE if there is no successfully built executable for program. let devs: Vec<_> = p .devs .iter() .filter(|d| p.status(d) == CL_BUILD_SUCCESS as cl_build_status) .collect(); if devs.is_empty() { return Err(CL_INVALID_PROGRAM_EXECUTABLE); } Ok(devs) } pub fn create_kernel( program: cl_program, kernel_name: *const ::std::os::raw::c_char, ) -> CLResult<cl_kernel> { let p = program.get_arc()?; let name = c_string_to_string(kernel_name); // CL_INVALID_VALUE if kernel_name is NULL. if kernel_name.is_null() { return Err(CL_INVALID_VALUE); } // CL_INVALID_PROGRAM_EXECUTABLE if there is no successfully built executable for program. if p.kernels().is_empty() { return Err(CL_INVALID_PROGRAM_EXECUTABLE); } // CL_INVALID_KERNEL_NAME if kernel_name is not found in program. if!p.kernels().contains(&name) { return Err(CL_INVALID_KERNEL_NAME); } // CL_INVALID_KERNEL_DEFINITION if the function definition for __kernel function given by // kernel_name such as the number of arguments, the argument types are not the same for all // devices for which the program executable has been built. let devs = get_devices_with_valid_build(&p)?; let kernel_args: HashSet<_> = devs.iter().map(|d| p.args(d, &name)).collect(); if kernel_args.len()!= 1 { return Err(CL_INVALID_KERNEL_DEFINITION); } Ok(cl_kernel::from_arc(Kernel::new( name, p, kernel_args.into_iter().next().unwrap(), ))) } pub fn create_kernels_in_program( program: cl_program, num_kernels: cl_uint, kernels: *mut cl_kernel, num_kernels_ret: *mut cl_uint, ) -> CLResult<()> { let p = program.get_arc()?; let devs = get_devices_with_valid_build(&p)?; // CL_INVALID_VALUE if kernels is not NULL and num_kernels is less than the number of kernels // in program. if!kernels.is_null() && p.kernels().len() > num_kernels as usize { return Err(CL_INVALID_VALUE); } let mut num_kernels = 0; for name in p.kernels() { let kernel_args: HashSet<_> = devs.iter().map(|d| p.args(d, &name)).collect(); // Kernel objects are not created for any __kernel functions in program that do not have the // same function definition across all devices for which a program executable has been // successfully built. if kernel_args.len()!= 1 { continue; } if!kernels.is_null() { // we just assume the client isn't stupid unsafe { kernels .add(num_kernels as usize) .write(cl_kernel::from_arc(Kernel::new( name, p.clone(), kernel_args.into_iter().next().unwrap(), ))); } } num_kernels += 1; } num_kernels_ret.write_checked(num_kernels); Ok(()) } pub fn set_kernel_arg( kernel: cl_kernel, arg_index: cl_uint, arg_size: usize, arg_value: *const ::std::os::raw::c_void, ) -> CLResult<()> { let k = kernel.get_arc()?; // CL_INVALID_ARG_INDEX if arg_index is not a valid argument index. if let Some(arg) = k.args.get(arg_index as usize) { // CL_INVALID_ARG_SIZE if arg_size does not match the size of the data type for an argument // that is not a memory object or if the argument is a memory object and // arg_size!= sizeof(cl_mem) or if arg_size is zero and the argument is declared with the // local qualifier or if the argument is a sampler and arg_size!= sizeof(cl_sampler). match arg.kind { KernelArgType::MemLocal => { if arg_size == 0 { return Err(CL_INVALID_ARG_SIZE); } } KernelArgType::MemGlobal | KernelArgType::MemConstant | KernelArgType::Image | KernelArgType::RWImage | KernelArgType::Texture => { if arg_size!= std::mem::size_of::<cl_mem>() { return Err(CL_INVALID_ARG_SIZE); } } _ => { if arg.size!= arg_size { return Err(CL_INVALID_ARG_SIZE); } } } // CL_INVALID_ARG_VALUE if arg_value specified is not a valid value. match arg.kind { // If the argument is declared with the local qualifier, the arg_value entry must be // NULL. KernelArgType::MemLocal => { if!arg_value.is_null() { return Err(CL_INVALID_ARG_VALUE); }
if arg_value.is_null() { return Err(CL_INVALID_ARG_VALUE); } } _ => {} }; // let's create the arg now let arg = unsafe { if arg.dead { KernelArgValue::None } else { match arg.kind { KernelArgType::Constant => KernelArgValue::Constant( slice::from_raw_parts(arg_value.cast(), arg_size).to_vec(), ), KernelArgType::MemConstant | KernelArgType::MemGlobal => { let ptr: *const cl_mem = arg_value.cast(); if ptr.is_null() || (*ptr).is_null() { KernelArgValue::None } else { KernelArgValue::MemObject((*ptr).get_arc()?) } } KernelArgType::MemLocal => KernelArgValue::LocalMem(arg_size), KernelArgType::Image | KernelArgType::RWImage | KernelArgType::Texture => { let img: *const cl_mem = arg_value.cast(); KernelArgValue::MemObject((*img).get_arc()?) } KernelArgType::Sampler => { let ptr: *const cl_sampler = arg_value.cast(); KernelArgValue::Sampler((*ptr).get_arc()?) } } } }; k.values.get(arg_index as usize).unwrap().replace(Some(arg)); Ok(()) } else { Err(CL_INVALID_ARG_INDEX) } //• CL_INVALID_DEVICE_QUEUE for an argument declared to be of type queue_t when the specified arg_value is not a valid device queue object. This error code is missing before version 2.0. //• CL_INVALID_ARG_VALUE if the argument is an image declared with the read_only qualifier and arg_value refers to an image object created with cl_mem_flags of CL_MEM_WRITE_ONLY or if the image argument is declared with the write_only qualifier and arg_value refers to an image object created with cl_mem_flags of CL_MEM_READ_ONLY. //• CL_MAX_SIZE_RESTRICTION_EXCEEDED if the size in bytes of the memory object (if the argument is a memory object) or arg_size (if the argument is declared with local qualifier) exceeds a language- specified maximum size restriction for this argument, such as the MaxByteOffset SPIR-V decoration. This error code is missing before version 2.2. } pub fn enqueue_ndrange_kernel( command_queue: cl_command_queue, kernel: cl_kernel, work_dim: cl_uint, global_work_offset: *const usize, global_work_size: *const usize, local_work_size: *const usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()> { let q = command_queue.get_arc()?; let k = kernel.get_arc()?; let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?; // CL_INVALID_CONTEXT if context associated with command_queue and kernel are not the same if q.context!= k.prog.context { return Err(CL_INVALID_CONTEXT); } // CL_INVALID_PROGRAM_EXECUTABLE if there is no successfully built program executable available // for device associated with command_queue. if k.prog.status(&q.device)!= CL_BUILD_SUCCESS as cl_build_status { return Err(CL_INVALID_PROGRAM_EXECUTABLE); } // CL_INVALID_KERNEL_ARGS if the kernel argument values have not been specified. if k.values.iter().any(|v| v.borrow().is_none()) { return Err(CL_INVALID_KERNEL_ARGS); } // CL_INVALID_WORK_DIMENSION if work_dim is not a valid value (i.e. a value between 1 and // CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS). if work_dim == 0 || work_dim > q.device.max_grid_dimensions() { return Err(CL_INVALID_WORK_DIMENSION); } // we assume the application gets it right and doesn't pass shorter arrays then actually needed. let global_work_size = unsafe { kernel_work_arr_or_default(global_work_size, work_dim) }; let local_work_size = unsafe { kernel_work_arr_or_default(local_work_size, work_dim) }; let global_work_offset = unsafe { kernel_work_arr_or_default(global_work_offset, work_dim) }; let device_bits = q.device.address_bits(); let device_max = u64::MAX >> (u64::BITS - device_bits); for i in 0..work_dim as usize { let lws = local_work_size[i]; let gws = global_work_size[i]; let gwo = global_work_offset[i]; // CL_INVALID_WORK_ITEM_SIZE if the number of work-items specified in any of // local_work_size[0], … local_work_size[work_dim - 1] is greater than the corresponding // values specified by // CL_DEVICE_MAX_WORK_ITEM_SIZES[0], …, CL_DEVICE_MAX_WORK_ITEM_SIZES[work_dim - 1]. if lws > q.device.max_block_sizes()[i] { return Err(CL_INVALID_WORK_ITEM_SIZE); } // CL_INVALID_WORK_GROUP_SIZE if the work-group size must be uniform and the // local_work_size is not NULL, [...] if the global_work_size is not evenly divisible by // the local_work_size. if lws!= 0 && gws % lws!= 0 { return Err(CL_INVALID_WORK_GROUP_SIZE); } // CL_INVALID_WORK_GROUP_SIZE if local_work_size is specified and does not match the // required work-group size for kernel in the program source. if lws!= 0 && k.work_group_size[i]!= 0 && lws!= k.work_group_size[i] { return Err(CL_INVALID_WORK_GROUP_SIZE); } // CL_INVALID_GLOBAL_WORK_SIZE if any of the values specified in global_work_size[0], … // global_work_size[work_dim - 1] exceed the maximum value representable by size_t on // the device on which the kernel-instance will be enqueued. if gws as u64 > device_max { return Err(CL_INVALID_GLOBAL_WORK_SIZE); } // CL_INVALID_GLOBAL_OFFSET if the value specified in global_work_size + the // corresponding values in global_work_offset for any dimensions is greater than the // maximum value representable by size t on the device on which the kernel-instance // will be enqueued if u64::checked_add(gws as u64, gwo as u64) .filter(|&x| x <= device_max) .is_none() { return Err(CL_INVALID_GLOBAL_OFFSET); } } // If global_work_size is NULL, or the value in any passed dimension is 0 then the kernel // command will trivially succeed after its event dependencies are satisfied and subsequently // update its completion event. let cb: EventSig = if global_work_size.contains(&0) { Box::new(|_, _| Ok(())) } else { k.launch( &q, work_dim, local_work_size, global_work_size, global_work_offset, )? }; create_and_queue(q, CL_COMMAND_NDRANGE_KERNEL, evs, event, false, cb) //• CL_INVALID_WORK_GROUP_SIZE if local_work_size is specified and is not consistent with the required number of sub-groups for kernel in the program source. //• CL_INVALID_WORK_GROUP_SIZE if local_work_size is specified and the total number of work-items in the work-group computed as local_work_size[0] × … local_work_size[work_dim - 1] is greater than the value specified by CL_KERNEL_WORK_GROUP_SIZE in the Kernel Object Device Queries table. //• CL_MISALIGNED_SUB_BUFFER_OFFSET if a sub-buffer object is specified as the value for an argument that is a buffer object and the offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue. This error code //• CL_INVALID_IMAGE_SIZE if an image object is specified as an argument value and the image dimensions (image width, height, specified or compute row and/or slice pitch) are not supported by device associated with queue. //• CL_IMAGE_FORMAT_NOT_SUPPORTED if an image object is specified as an argument value and the image format (image channel order and data type) is not supported by device associated with queue. //• CL_OUT_OF_RESOURCES if there is a failure to queue the execution instance of kernel on the command-queue because of insufficient resources needed to execute the kernel. For example, the explicitly specified local_work_size causes a failure to execute the kernel because of insufficient resources such as registers or local memory. Another example would be the number of read-only image args used in kernel exceed the CL_DEVICE_MAX_READ_IMAGE_ARGS value for device or the number of write-only and read-write image args used in kernel exceed the CL_DEVICE_MAX_READ_WRITE_IMAGE_ARGS value for device or the number of samplers used in kernel exceed CL_DEVICE_MAX_SAMPLERS for device. //• CL_MEM_OBJECT_ALLOCATION_FAILURE if there is a failure to allocate memory for data store associated with image or buffer objects specified as arguments to kernel. //• CL_INVALID_OPERATION if SVM pointers are passed as arguments to a kernel and the device does not support SVM or if system pointers are passed as arguments to a kernel and/or stored inside SVM allocations passed as kernel arguments and the device does not support fine grain system SVM allocations. } pub fn enqueue_task( command_queue: cl_command_queue, kernel: cl_kernel, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()> { // clEnqueueTask is equivalent to calling clEnqueueNDRangeKernel with work_dim set to 1, // global_work_offset set to NULL, global_work_size[0] set to 1, and local_work_size[0] set to // 1. enqueue_ndrange_kernel( command_queue, kernel, 1, ptr::null(), [1, 1, 1].as_ptr(), [1, 0, 0].as_ptr(), num
} // If the argument is of type sampler_t, the arg_value entry must be a pointer to the // sampler object. KernelArgType::Constant | KernelArgType::Sampler => {
random_line_split
kernel.rs
use crate::core::kernel::*; use crate::core::program::*; use mesa_rust_util::ptr::*; use mesa_rust_util::string::*; use rusticl_opencl_gen::*; use std::collections::HashSet; use std::os::raw::c_void; use std::ptr; use std::slice; use std::sync::Arc; impl CLInfo<cl_kernel_info> for cl_kernel { fn query(&self, q: cl_kernel_info, _: &[u8]) -> CLResult<Vec<u8>> { let kernel = self.get_ref()?; Ok(match q { CL_KERNEL_ATTRIBUTES => cl_prop::<&str>(&kernel.attributes_string), CL_KERNEL_CONTEXT => { let ptr = Arc::as_ptr(&kernel.prog.context); cl_prop::<cl_context>(cl_context::from_ptr(ptr)) } CL_KERNEL_FUNCTION_NAME => cl_prop::<&str>(&kernel.name), CL_KERNEL_NUM_ARGS => cl_prop::<cl_uint>(kernel.args.len() as cl_uint), CL_KERNEL_PROGRAM => { let ptr = Arc::as_ptr(&kernel.prog); cl_prop::<cl_program>(cl_program::from_ptr(ptr)) } CL_KERNEL_REFERENCE_COUNT => cl_prop::<cl_uint>(self.refcnt()?), // CL_INVALID_VALUE if param_name is not one of the supported values _ => return Err(CL_INVALID_VALUE), }) } } impl CLInfoObj<cl_kernel_arg_info, cl_uint> for cl_kernel { fn query(&self, idx: cl_uint, q: cl_kernel_arg_info) -> CLResult<Vec<u8>> { let kernel = self.get_ref()?; // CL_INVALID_ARG_INDEX if arg_index is not a valid argument index. if idx as usize >= kernel.args.len() { return Err(CL_INVALID_ARG_INDEX); } Ok(match *q { CL_KERNEL_ARG_ACCESS_QUALIFIER => { cl_prop::<cl_kernel_arg_access_qualifier>(kernel.access_qualifier(idx)) } CL_KERNEL_ARG_ADDRESS_QUALIFIER => { cl_prop::<cl_kernel_arg_address_qualifier>(kernel.address_qualifier(idx)) } CL_KERNEL_ARG_NAME => cl_prop::<&str>(kernel.arg_name(idx)), CL_KERNEL_ARG_TYPE_NAME => cl_prop::<&str>(kernel.arg_type_name(idx)), CL_KERNEL_ARG_TYPE_QUALIFIER => { cl_prop::<cl_kernel_arg_type_qualifier>(kernel.type_qualifier(idx)) } // CL_INVALID_VALUE if param_name is not one of the supported values _ => return Err(CL_INVALID_VALUE), }) } } impl CLInfoObj<cl_kernel_work_group_info, cl_device_id> for cl_kernel { fn query(&self, dev: cl_device_id, q: cl_kernel_work_group_info) -> CLResult<Vec<u8>> { let kernel = self.get_ref()?; // CL_INVALID_DEVICE [..] if device is NULL but there is more than one device associated with kernel. let dev = if dev.is_null() { if kernel.prog.devs.len() > 1 { return Err(CL_INVALID_DEVICE); } else { kernel.prog.devs[0].clone() } } else { dev.get_arc()? }; // CL_INVALID_DEVICE if device is not in the list of devices associated with kernel if!kernel.prog.devs.contains(&dev) { return Err(CL_INVALID_DEVICE); } Ok(match *q { CL_KERNEL_COMPILE_WORK_GROUP_SIZE => cl_prop::<[usize; 3]>(kernel.work_group_size), CL_KERNEL_LOCAL_MEM_SIZE => cl_prop::<cl_ulong>(kernel.local_mem_size(&dev)), CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE => { cl_prop::<usize>(dev.subgroups() as usize) } CL_KERNEL_PRIVATE_MEM_SIZE => cl_prop::<cl_ulong>(kernel.priv_mem_size(&dev)), // TODO CL_KERNEL_WORK_GROUP_SIZE => cl_prop::<usize>(dev.subgroups() as usize), // CL_INVALID_VALUE if param_name is not one of the supported values _ => return Err(CL_INVALID_VALUE), }) } } impl CLInfoObj<cl_kernel_sub_group_info, (cl_device_id, usize, *const c_void)> for cl_kernel { fn query( &self, (d, _input_value_size, _input_value): (cl_device_id, usize, *const c_void), _q: cl_program_build_info, ) -> CLResult<Vec<u8>> { let _kernel = self.get_ref()?; let _dev = d.get_arc()?; Err(CL_INVALID_OPERATION) } } const ZERO_ARR: [usize; 3] = [0; 3]; /// # Safety /// /// This function is only safe when called on an array of `work_dim` length unsafe fn kernel_work_arr_or_default<'a>(arr: *const usize, work_dim: cl_uint) -> &'a [usize] { if!arr.is_null() { slice::from_raw_parts(arr, work_dim as usize) } else { &ZERO_ARR } } fn get_devices_with_valid_build(p: &Arc<Program>) -> CLResult<Vec<&Arc<Device>>> { // CL_INVALID_PROGRAM_EXECUTABLE if there is no successfully built executable for program. let devs: Vec<_> = p .devs .iter() .filter(|d| p.status(d) == CL_BUILD_SUCCESS as cl_build_status) .collect(); if devs.is_empty() { return Err(CL_INVALID_PROGRAM_EXECUTABLE); } Ok(devs) } pub fn create_kernel( program: cl_program, kernel_name: *const ::std::os::raw::c_char, ) -> CLResult<cl_kernel> { let p = program.get_arc()?; let name = c_string_to_string(kernel_name); // CL_INVALID_VALUE if kernel_name is NULL. if kernel_name.is_null() { return Err(CL_INVALID_VALUE); } // CL_INVALID_PROGRAM_EXECUTABLE if there is no successfully built executable for program. if p.kernels().is_empty() { return Err(CL_INVALID_PROGRAM_EXECUTABLE); } // CL_INVALID_KERNEL_NAME if kernel_name is not found in program. if!p.kernels().contains(&name) { return Err(CL_INVALID_KERNEL_NAME); } // CL_INVALID_KERNEL_DEFINITION if the function definition for __kernel function given by // kernel_name such as the number of arguments, the argument types are not the same for all // devices for which the program executable has been built. let devs = get_devices_with_valid_build(&p)?; let kernel_args: HashSet<_> = devs.iter().map(|d| p.args(d, &name)).collect(); if kernel_args.len()!= 1 { return Err(CL_INVALID_KERNEL_DEFINITION); } Ok(cl_kernel::from_arc(Kernel::new( name, p, kernel_args.into_iter().next().unwrap(), ))) } pub fn create_kernels_in_program( program: cl_program, num_kernels: cl_uint, kernels: *mut cl_kernel, num_kernels_ret: *mut cl_uint, ) -> CLResult<()> { let p = program.get_arc()?; let devs = get_devices_with_valid_build(&p)?; // CL_INVALID_VALUE if kernels is not NULL and num_kernels is less than the number of kernels // in program. if!kernels.is_null() && p.kernels().len() > num_kernels as usize { return Err(CL_INVALID_VALUE); } let mut num_kernels = 0; for name in p.kernels() { let kernel_args: HashSet<_> = devs.iter().map(|d| p.args(d, &name)).collect(); // Kernel objects are not created for any __kernel functions in program that do not have the // same function definition across all devices for which a program executable has been // successfully built. if kernel_args.len()!= 1 { continue; } if!kernels.is_null() { // we just assume the client isn't stupid unsafe { kernels .add(num_kernels as usize) .write(cl_kernel::from_arc(Kernel::new( name, p.clone(), kernel_args.into_iter().next().unwrap(), ))); } } num_kernels += 1; } num_kernels_ret.write_checked(num_kernels); Ok(()) } pub fn
( kernel: cl_kernel, arg_index: cl_uint, arg_size: usize, arg_value: *const ::std::os::raw::c_void, ) -> CLResult<()> { let k = kernel.get_arc()?; // CL_INVALID_ARG_INDEX if arg_index is not a valid argument index. if let Some(arg) = k.args.get(arg_index as usize) { // CL_INVALID_ARG_SIZE if arg_size does not match the size of the data type for an argument // that is not a memory object or if the argument is a memory object and // arg_size!= sizeof(cl_mem) or if arg_size is zero and the argument is declared with the // local qualifier or if the argument is a sampler and arg_size!= sizeof(cl_sampler). match arg.kind { KernelArgType::MemLocal => { if arg_size == 0 { return Err(CL_INVALID_ARG_SIZE); } } KernelArgType::MemGlobal | KernelArgType::MemConstant | KernelArgType::Image | KernelArgType::RWImage | KernelArgType::Texture => { if arg_size!= std::mem::size_of::<cl_mem>() { return Err(CL_INVALID_ARG_SIZE); } } _ => { if arg.size!= arg_size { return Err(CL_INVALID_ARG_SIZE); } } } // CL_INVALID_ARG_VALUE if arg_value specified is not a valid value. match arg.kind { // If the argument is declared with the local qualifier, the arg_value entry must be // NULL. KernelArgType::MemLocal => { if!arg_value.is_null() { return Err(CL_INVALID_ARG_VALUE); } } // If the argument is of type sampler_t, the arg_value entry must be a pointer to the // sampler object. KernelArgType::Constant | KernelArgType::Sampler => { if arg_value.is_null() { return Err(CL_INVALID_ARG_VALUE); } } _ => {} }; // let's create the arg now let arg = unsafe { if arg.dead { KernelArgValue::None } else { match arg.kind { KernelArgType::Constant => KernelArgValue::Constant( slice::from_raw_parts(arg_value.cast(), arg_size).to_vec(), ), KernelArgType::MemConstant | KernelArgType::MemGlobal => { let ptr: *const cl_mem = arg_value.cast(); if ptr.is_null() || (*ptr).is_null() { KernelArgValue::None } else { KernelArgValue::MemObject((*ptr).get_arc()?) } } KernelArgType::MemLocal => KernelArgValue::LocalMem(arg_size), KernelArgType::Image | KernelArgType::RWImage | KernelArgType::Texture => { let img: *const cl_mem = arg_value.cast(); KernelArgValue::MemObject((*img).get_arc()?) } KernelArgType::Sampler => { let ptr: *const cl_sampler = arg_value.cast(); KernelArgValue::Sampler((*ptr).get_arc()?) } } } }; k.values.get(arg_index as usize).unwrap().replace(Some(arg)); Ok(()) } else { Err(CL_INVALID_ARG_INDEX) } //• CL_INVALID_DEVICE_QUEUE for an argument declared to be of type queue_t when the specified arg_value is not a valid device queue object. This error code is missing before version 2.0. //• CL_INVALID_ARG_VALUE if the argument is an image declared with the read_only qualifier and arg_value refers to an image object created with cl_mem_flags of CL_MEM_WRITE_ONLY or if the image argument is declared with the write_only qualifier and arg_value refers to an image object created with cl_mem_flags of CL_MEM_READ_ONLY. //• CL_MAX_SIZE_RESTRICTION_EXCEEDED if the size in bytes of the memory object (if the argument is a memory object) or arg_size (if the argument is declared with local qualifier) exceeds a language- specified maximum size restriction for this argument, such as the MaxByteOffset SPIR-V decoration. This error code is missing before version 2.2. } pub fn enqueue_ndrange_kernel( command_queue: cl_command_queue, kernel: cl_kernel, work_dim: cl_uint, global_work_offset: *const usize, global_work_size: *const usize, local_work_size: *const usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()> { let q = command_queue.get_arc()?; let k = kernel.get_arc()?; let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?; // CL_INVALID_CONTEXT if context associated with command_queue and kernel are not the same if q.context!= k.prog.context { return Err(CL_INVALID_CONTEXT); } // CL_INVALID_PROGRAM_EXECUTABLE if there is no successfully built program executable available // for device associated with command_queue. if k.prog.status(&q.device)!= CL_BUILD_SUCCESS as cl_build_status { return Err(CL_INVALID_PROGRAM_EXECUTABLE); } // CL_INVALID_KERNEL_ARGS if the kernel argument values have not been specified. if k.values.iter().any(|v| v.borrow().is_none()) { return Err(CL_INVALID_KERNEL_ARGS); } // CL_INVALID_WORK_DIMENSION if work_dim is not a valid value (i.e. a value between 1 and // CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS). if work_dim == 0 || work_dim > q.device.max_grid_dimensions() { return Err(CL_INVALID_WORK_DIMENSION); } // we assume the application gets it right and doesn't pass shorter arrays then actually needed. let global_work_size = unsafe { kernel_work_arr_or_default(global_work_size, work_dim) }; let local_work_size = unsafe { kernel_work_arr_or_default(local_work_size, work_dim) }; let global_work_offset = unsafe { kernel_work_arr_or_default(global_work_offset, work_dim) }; let device_bits = q.device.address_bits(); let device_max = u64::MAX >> (u64::BITS - device_bits); for i in 0..work_dim as usize { let lws = local_work_size[i]; let gws = global_work_size[i]; let gwo = global_work_offset[i]; // CL_INVALID_WORK_ITEM_SIZE if the number of work-items specified in any of // local_work_size[0], … local_work_size[work_dim - 1] is greater than the corresponding // values specified by // CL_DEVICE_MAX_WORK_ITEM_SIZES[0], …, CL_DEVICE_MAX_WORK_ITEM_SIZES[work_dim - 1]. if lws > q.device.max_block_sizes()[i] { return Err(CL_INVALID_WORK_ITEM_SIZE); } // CL_INVALID_WORK_GROUP_SIZE if the work-group size must be uniform and the // local_work_size is not NULL, [...] if the global_work_size is not evenly divisible by // the local_work_size. if lws!= 0 && gws % lws!= 0 { return Err(CL_INVALID_WORK_GROUP_SIZE); } // CL_INVALID_WORK_GROUP_SIZE if local_work_size is specified and does not match the // required work-group size for kernel in the program source. if lws!= 0 && k.work_group_size[i]!= 0 && lws!= k.work_group_size[i] { return Err(CL_INVALID_WORK_GROUP_SIZE); } // CL_INVALID_GLOBAL_WORK_SIZE if any of the values specified in global_work_size[0], … // global_work_size[work_dim - 1] exceed the maximum value representable by size_t on // the device on which the kernel-instance will be enqueued. if gws as u64 > device_max { return Err(CL_INVALID_GLOBAL_WORK_SIZE); } // CL_INVALID_GLOBAL_OFFSET if the value specified in global_work_size + the // corresponding values in global_work_offset for any dimensions is greater than the // maximum value representable by size t on the device on which the kernel-instance // will be enqueued if u64::checked_add(gws as u64, gwo as u64) .filter(|&x| x <= device_max) .is_none() { return Err(CL_INVALID_GLOBAL_OFFSET); } } // If global_work_size is NULL, or the value in any passed dimension is 0 then the kernel // command will trivially succeed after its event dependencies are satisfied and subsequently // update its completion event. let cb: EventSig = if global_work_size.contains(&0) { Box::new(|_, _| Ok(())) } else { k.launch( &q, work_dim, local_work_size, global_work_size, global_work_offset, )? }; create_and_queue(q, CL_COMMAND_NDRANGE_KERNEL, evs, event, false, cb) //• CL_INVALID_WORK_GROUP_SIZE if local_work_size is specified and is not consistent with the required number of sub-groups for kernel in the program source. //• CL_INVALID_WORK_GROUP_SIZE if local_work_size is specified and the total number of work-items in the work-group computed as local_work_size[0] × … local_work_size[work_dim - 1] is greater than the value specified by CL_KERNEL_WORK_GROUP_SIZE in the Kernel Object Device Queries table. //• CL_MISALIGNED_SUB_BUFFER_OFFSET if a sub-buffer object is specified as the value for an argument that is a buffer object and the offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue. This error code //• CL_INVALID_IMAGE_SIZE if an image object is specified as an argument value and the image dimensions (image width, height, specified or compute row and/or slice pitch) are not supported by device associated with queue. //• CL_IMAGE_FORMAT_NOT_SUPPORTED if an image object is specified as an argument value and the image format (image channel order and data type) is not supported by device associated with queue. //• CL_OUT_OF_RESOURCES if there is a failure to queue the execution instance of kernel on the command-queue because of insufficient resources needed to execute the kernel. For example, the explicitly specified local_work_size causes a failure to execute the kernel because of insufficient resources such as registers or local memory. Another example would be the number of read-only image args used in kernel exceed the CL_DEVICE_MAX_READ_IMAGE_ARGS value for device or the number of write-only and read-write image args used in kernel exceed the CL_DEVICE_MAX_READ_WRITE_IMAGE_ARGS value for device or the number of samplers used in kernel exceed CL_DEVICE_MAX_SAMPLERS for device. //• CL_MEM_OBJECT_ALLOCATION_FAILURE if there is a failure to allocate memory for data store associated with image or buffer objects specified as arguments to kernel. //• CL_INVALID_OPERATION if SVM pointers are passed as arguments to a kernel and the device does not support SVM or if system pointers are passed as arguments to a kernel and/or stored inside SVM allocations passed as kernel arguments and the device does not support fine grain system SVM allocations. } pub fn enqueue_task( command_queue: cl_command_queue, kernel: cl_kernel, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()> { // clEnqueueTask is equivalent to calling clEnqueueNDRangeKernel with work_dim set to 1, // global_work_offset set to NULL, global_work_size[0] set to 1, and local_work_size[0] set to // 1. enqueue_ndrange_kernel( command_queue, kernel, 1, ptr::null(), [1, 1, 1].as_ptr(), [1, 0, 0].as_ptr(),
set_kernel_arg
identifier_name
kernel.rs
use crate::core::kernel::*; use crate::core::program::*; use mesa_rust_util::ptr::*; use mesa_rust_util::string::*; use rusticl_opencl_gen::*; use std::collections::HashSet; use std::os::raw::c_void; use std::ptr; use std::slice; use std::sync::Arc; impl CLInfo<cl_kernel_info> for cl_kernel { fn query(&self, q: cl_kernel_info, _: &[u8]) -> CLResult<Vec<u8>> { let kernel = self.get_ref()?; Ok(match q { CL_KERNEL_ATTRIBUTES => cl_prop::<&str>(&kernel.attributes_string), CL_KERNEL_CONTEXT => { let ptr = Arc::as_ptr(&kernel.prog.context); cl_prop::<cl_context>(cl_context::from_ptr(ptr)) } CL_KERNEL_FUNCTION_NAME => cl_prop::<&str>(&kernel.name), CL_KERNEL_NUM_ARGS => cl_prop::<cl_uint>(kernel.args.len() as cl_uint), CL_KERNEL_PROGRAM => { let ptr = Arc::as_ptr(&kernel.prog); cl_prop::<cl_program>(cl_program::from_ptr(ptr)) } CL_KERNEL_REFERENCE_COUNT => cl_prop::<cl_uint>(self.refcnt()?), // CL_INVALID_VALUE if param_name is not one of the supported values _ => return Err(CL_INVALID_VALUE), }) } } impl CLInfoObj<cl_kernel_arg_info, cl_uint> for cl_kernel { fn query(&self, idx: cl_uint, q: cl_kernel_arg_info) -> CLResult<Vec<u8>> { let kernel = self.get_ref()?; // CL_INVALID_ARG_INDEX if arg_index is not a valid argument index. if idx as usize >= kernel.args.len() { return Err(CL_INVALID_ARG_INDEX); } Ok(match *q { CL_KERNEL_ARG_ACCESS_QUALIFIER => { cl_prop::<cl_kernel_arg_access_qualifier>(kernel.access_qualifier(idx)) } CL_KERNEL_ARG_ADDRESS_QUALIFIER => { cl_prop::<cl_kernel_arg_address_qualifier>(kernel.address_qualifier(idx)) } CL_KERNEL_ARG_NAME => cl_prop::<&str>(kernel.arg_name(idx)), CL_KERNEL_ARG_TYPE_NAME => cl_prop::<&str>(kernel.arg_type_name(idx)), CL_KERNEL_ARG_TYPE_QUALIFIER => { cl_prop::<cl_kernel_arg_type_qualifier>(kernel.type_qualifier(idx)) } // CL_INVALID_VALUE if param_name is not one of the supported values _ => return Err(CL_INVALID_VALUE), }) } } impl CLInfoObj<cl_kernel_work_group_info, cl_device_id> for cl_kernel { fn query(&self, dev: cl_device_id, q: cl_kernel_work_group_info) -> CLResult<Vec<u8>> { let kernel = self.get_ref()?; // CL_INVALID_DEVICE [..] if device is NULL but there is more than one device associated with kernel. let dev = if dev.is_null() { if kernel.prog.devs.len() > 1 { return Err(CL_INVALID_DEVICE); } else { kernel.prog.devs[0].clone() } } else { dev.get_arc()? }; // CL_INVALID_DEVICE if device is not in the list of devices associated with kernel if!kernel.prog.devs.contains(&dev) { return Err(CL_INVALID_DEVICE); } Ok(match *q { CL_KERNEL_COMPILE_WORK_GROUP_SIZE => cl_prop::<[usize; 3]>(kernel.work_group_size), CL_KERNEL_LOCAL_MEM_SIZE => cl_prop::<cl_ulong>(kernel.local_mem_size(&dev)), CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE => { cl_prop::<usize>(dev.subgroups() as usize) } CL_KERNEL_PRIVATE_MEM_SIZE => cl_prop::<cl_ulong>(kernel.priv_mem_size(&dev)), // TODO CL_KERNEL_WORK_GROUP_SIZE => cl_prop::<usize>(dev.subgroups() as usize), // CL_INVALID_VALUE if param_name is not one of the supported values _ => return Err(CL_INVALID_VALUE), }) } } impl CLInfoObj<cl_kernel_sub_group_info, (cl_device_id, usize, *const c_void)> for cl_kernel { fn query( &self, (d, _input_value_size, _input_value): (cl_device_id, usize, *const c_void), _q: cl_program_build_info, ) -> CLResult<Vec<u8>> { let _kernel = self.get_ref()?; let _dev = d.get_arc()?; Err(CL_INVALID_OPERATION) } } const ZERO_ARR: [usize; 3] = [0; 3]; /// # Safety /// /// This function is only safe when called on an array of `work_dim` length unsafe fn kernel_work_arr_or_default<'a>(arr: *const usize, work_dim: cl_uint) -> &'a [usize] { if!arr.is_null() { slice::from_raw_parts(arr, work_dim as usize) } else { &ZERO_ARR } } fn get_devices_with_valid_build(p: &Arc<Program>) -> CLResult<Vec<&Arc<Device>>> { // CL_INVALID_PROGRAM_EXECUTABLE if there is no successfully built executable for program. let devs: Vec<_> = p .devs .iter() .filter(|d| p.status(d) == CL_BUILD_SUCCESS as cl_build_status) .collect(); if devs.is_empty() { return Err(CL_INVALID_PROGRAM_EXECUTABLE); } Ok(devs) } pub fn create_kernel( program: cl_program, kernel_name: *const ::std::os::raw::c_char, ) -> CLResult<cl_kernel>
// kernel_name such as the number of arguments, the argument types are not the same for all // devices for which the program executable has been built. let devs = get_devices_with_valid_build(&p)?; let kernel_args: HashSet<_> = devs.iter().map(|d| p.args(d, &name)).collect(); if kernel_args.len()!= 1 { return Err(CL_INVALID_KERNEL_DEFINITION); } Ok(cl_kernel::from_arc(Kernel::new( name, p, kernel_args.into_iter().next().unwrap(), ))) } pub fn create_kernels_in_program( program: cl_program, num_kernels: cl_uint, kernels: *mut cl_kernel, num_kernels_ret: *mut cl_uint, ) -> CLResult<()> { let p = program.get_arc()?; let devs = get_devices_with_valid_build(&p)?; // CL_INVALID_VALUE if kernels is not NULL and num_kernels is less than the number of kernels // in program. if!kernels.is_null() && p.kernels().len() > num_kernels as usize { return Err(CL_INVALID_VALUE); } let mut num_kernels = 0; for name in p.kernels() { let kernel_args: HashSet<_> = devs.iter().map(|d| p.args(d, &name)).collect(); // Kernel objects are not created for any __kernel functions in program that do not have the // same function definition across all devices for which a program executable has been // successfully built. if kernel_args.len()!= 1 { continue; } if!kernels.is_null() { // we just assume the client isn't stupid unsafe { kernels .add(num_kernels as usize) .write(cl_kernel::from_arc(Kernel::new( name, p.clone(), kernel_args.into_iter().next().unwrap(), ))); } } num_kernels += 1; } num_kernels_ret.write_checked(num_kernels); Ok(()) } pub fn set_kernel_arg( kernel: cl_kernel, arg_index: cl_uint, arg_size: usize, arg_value: *const ::std::os::raw::c_void, ) -> CLResult<()> { let k = kernel.get_arc()?; // CL_INVALID_ARG_INDEX if arg_index is not a valid argument index. if let Some(arg) = k.args.get(arg_index as usize) { // CL_INVALID_ARG_SIZE if arg_size does not match the size of the data type for an argument // that is not a memory object or if the argument is a memory object and // arg_size!= sizeof(cl_mem) or if arg_size is zero and the argument is declared with the // local qualifier or if the argument is a sampler and arg_size!= sizeof(cl_sampler). match arg.kind { KernelArgType::MemLocal => { if arg_size == 0 { return Err(CL_INVALID_ARG_SIZE); } } KernelArgType::MemGlobal | KernelArgType::MemConstant | KernelArgType::Image | KernelArgType::RWImage | KernelArgType::Texture => { if arg_size!= std::mem::size_of::<cl_mem>() { return Err(CL_INVALID_ARG_SIZE); } } _ => { if arg.size!= arg_size { return Err(CL_INVALID_ARG_SIZE); } } } // CL_INVALID_ARG_VALUE if arg_value specified is not a valid value. match arg.kind { // If the argument is declared with the local qualifier, the arg_value entry must be // NULL. KernelArgType::MemLocal => { if!arg_value.is_null() { return Err(CL_INVALID_ARG_VALUE); } } // If the argument is of type sampler_t, the arg_value entry must be a pointer to the // sampler object. KernelArgType::Constant | KernelArgType::Sampler => { if arg_value.is_null() { return Err(CL_INVALID_ARG_VALUE); } } _ => {} }; // let's create the arg now let arg = unsafe { if arg.dead { KernelArgValue::None } else { match arg.kind { KernelArgType::Constant => KernelArgValue::Constant( slice::from_raw_parts(arg_value.cast(), arg_size).to_vec(), ), KernelArgType::MemConstant | KernelArgType::MemGlobal => { let ptr: *const cl_mem = arg_value.cast(); if ptr.is_null() || (*ptr).is_null() { KernelArgValue::None } else { KernelArgValue::MemObject((*ptr).get_arc()?) } } KernelArgType::MemLocal => KernelArgValue::LocalMem(arg_size), KernelArgType::Image | KernelArgType::RWImage | KernelArgType::Texture => { let img: *const cl_mem = arg_value.cast(); KernelArgValue::MemObject((*img).get_arc()?) } KernelArgType::Sampler => { let ptr: *const cl_sampler = arg_value.cast(); KernelArgValue::Sampler((*ptr).get_arc()?) } } } }; k.values.get(arg_index as usize).unwrap().replace(Some(arg)); Ok(()) } else { Err(CL_INVALID_ARG_INDEX) } //• CL_INVALID_DEVICE_QUEUE for an argument declared to be of type queue_t when the specified arg_value is not a valid device queue object. This error code is missing before version 2.0. //• CL_INVALID_ARG_VALUE if the argument is an image declared with the read_only qualifier and arg_value refers to an image object created with cl_mem_flags of CL_MEM_WRITE_ONLY or if the image argument is declared with the write_only qualifier and arg_value refers to an image object created with cl_mem_flags of CL_MEM_READ_ONLY. //• CL_MAX_SIZE_RESTRICTION_EXCEEDED if the size in bytes of the memory object (if the argument is a memory object) or arg_size (if the argument is declared with local qualifier) exceeds a language- specified maximum size restriction for this argument, such as the MaxByteOffset SPIR-V decoration. This error code is missing before version 2.2. } pub fn enqueue_ndrange_kernel( command_queue: cl_command_queue, kernel: cl_kernel, work_dim: cl_uint, global_work_offset: *const usize, global_work_size: *const usize, local_work_size: *const usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()> { let q = command_queue.get_arc()?; let k = kernel.get_arc()?; let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?; // CL_INVALID_CONTEXT if context associated with command_queue and kernel are not the same if q.context!= k.prog.context { return Err(CL_INVALID_CONTEXT); } // CL_INVALID_PROGRAM_EXECUTABLE if there is no successfully built program executable available // for device associated with command_queue. if k.prog.status(&q.device)!= CL_BUILD_SUCCESS as cl_build_status { return Err(CL_INVALID_PROGRAM_EXECUTABLE); } // CL_INVALID_KERNEL_ARGS if the kernel argument values have not been specified. if k.values.iter().any(|v| v.borrow().is_none()) { return Err(CL_INVALID_KERNEL_ARGS); } // CL_INVALID_WORK_DIMENSION if work_dim is not a valid value (i.e. a value between 1 and // CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS). if work_dim == 0 || work_dim > q.device.max_grid_dimensions() { return Err(CL_INVALID_WORK_DIMENSION); } // we assume the application gets it right and doesn't pass shorter arrays then actually needed. let global_work_size = unsafe { kernel_work_arr_or_default(global_work_size, work_dim) }; let local_work_size = unsafe { kernel_work_arr_or_default(local_work_size, work_dim) }; let global_work_offset = unsafe { kernel_work_arr_or_default(global_work_offset, work_dim) }; let device_bits = q.device.address_bits(); let device_max = u64::MAX >> (u64::BITS - device_bits); for i in 0..work_dim as usize { let lws = local_work_size[i]; let gws = global_work_size[i]; let gwo = global_work_offset[i]; // CL_INVALID_WORK_ITEM_SIZE if the number of work-items specified in any of // local_work_size[0], … local_work_size[work_dim - 1] is greater than the corresponding // values specified by // CL_DEVICE_MAX_WORK_ITEM_SIZES[0], …, CL_DEVICE_MAX_WORK_ITEM_SIZES[work_dim - 1]. if lws > q.device.max_block_sizes()[i] { return Err(CL_INVALID_WORK_ITEM_SIZE); } // CL_INVALID_WORK_GROUP_SIZE if the work-group size must be uniform and the // local_work_size is not NULL, [...] if the global_work_size is not evenly divisible by // the local_work_size. if lws!= 0 && gws % lws!= 0 { return Err(CL_INVALID_WORK_GROUP_SIZE); } // CL_INVALID_WORK_GROUP_SIZE if local_work_size is specified and does not match the // required work-group size for kernel in the program source. if lws!= 0 && k.work_group_size[i]!= 0 && lws!= k.work_group_size[i] { return Err(CL_INVALID_WORK_GROUP_SIZE); } // CL_INVALID_GLOBAL_WORK_SIZE if any of the values specified in global_work_size[0], … // global_work_size[work_dim - 1] exceed the maximum value representable by size_t on // the device on which the kernel-instance will be enqueued. if gws as u64 > device_max { return Err(CL_INVALID_GLOBAL_WORK_SIZE); } // CL_INVALID_GLOBAL_OFFSET if the value specified in global_work_size + the // corresponding values in global_work_offset for any dimensions is greater than the // maximum value representable by size t on the device on which the kernel-instance // will be enqueued if u64::checked_add(gws as u64, gwo as u64) .filter(|&x| x <= device_max) .is_none() { return Err(CL_INVALID_GLOBAL_OFFSET); } } // If global_work_size is NULL, or the value in any passed dimension is 0 then the kernel // command will trivially succeed after its event dependencies are satisfied and subsequently // update its completion event. let cb: EventSig = if global_work_size.contains(&0) { Box::new(|_, _| Ok(())) } else { k.launch( &q, work_dim, local_work_size, global_work_size, global_work_offset, )? }; create_and_queue(q, CL_COMMAND_NDRANGE_KERNEL, evs, event, false, cb) //• CL_INVALID_WORK_GROUP_SIZE if local_work_size is specified and is not consistent with the required number of sub-groups for kernel in the program source. //• CL_INVALID_WORK_GROUP_SIZE if local_work_size is specified and the total number of work-items in the work-group computed as local_work_size[0] × … local_work_size[work_dim - 1] is greater than the value specified by CL_KERNEL_WORK_GROUP_SIZE in the Kernel Object Device Queries table. //• CL_MISALIGNED_SUB_BUFFER_OFFSET if a sub-buffer object is specified as the value for an argument that is a buffer object and the offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue. This error code //• CL_INVALID_IMAGE_SIZE if an image object is specified as an argument value and the image dimensions (image width, height, specified or compute row and/or slice pitch) are not supported by device associated with queue. //• CL_IMAGE_FORMAT_NOT_SUPPORTED if an image object is specified as an argument value and the image format (image channel order and data type) is not supported by device associated with queue. //• CL_OUT_OF_RESOURCES if there is a failure to queue the execution instance of kernel on the command-queue because of insufficient resources needed to execute the kernel. For example, the explicitly specified local_work_size causes a failure to execute the kernel because of insufficient resources such as registers or local memory. Another example would be the number of read-only image args used in kernel exceed the CL_DEVICE_MAX_READ_IMAGE_ARGS value for device or the number of write-only and read-write image args used in kernel exceed the CL_DEVICE_MAX_READ_WRITE_IMAGE_ARGS value for device or the number of samplers used in kernel exceed CL_DEVICE_MAX_SAMPLERS for device. //• CL_MEM_OBJECT_ALLOCATION_FAILURE if there is a failure to allocate memory for data store associated with image or buffer objects specified as arguments to kernel. //• CL_INVALID_OPERATION if SVM pointers are passed as arguments to a kernel and the device does not support SVM or if system pointers are passed as arguments to a kernel and/or stored inside SVM allocations passed as kernel arguments and the device does not support fine grain system SVM allocations. } pub fn enqueue_task( command_queue: cl_command_queue, kernel: cl_kernel, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()> { // clEnqueueTask is equivalent to calling clEnqueueNDRangeKernel with work_dim set to 1, // global_work_offset set to NULL, global_work_size[0] set to 1, and local_work_size[0] set to // 1. enqueue_ndrange_kernel( command_queue, kernel, 1, ptr::null(), [1, 1, 1].as_ptr(), [1, 0, 0].as_ptr(),
{ let p = program.get_arc()?; let name = c_string_to_string(kernel_name); // CL_INVALID_VALUE if kernel_name is NULL. if kernel_name.is_null() { return Err(CL_INVALID_VALUE); } // CL_INVALID_PROGRAM_EXECUTABLE if there is no successfully built executable for program. if p.kernels().is_empty() { return Err(CL_INVALID_PROGRAM_EXECUTABLE); } // CL_INVALID_KERNEL_NAME if kernel_name is not found in program. if !p.kernels().contains(&name) { return Err(CL_INVALID_KERNEL_NAME); } // CL_INVALID_KERNEL_DEFINITION if the function definition for __kernel function given by
identifier_body
kernel.rs
use crate::core::kernel::*; use crate::core::program::*; use mesa_rust_util::ptr::*; use mesa_rust_util::string::*; use rusticl_opencl_gen::*; use std::collections::HashSet; use std::os::raw::c_void; use std::ptr; use std::slice; use std::sync::Arc; impl CLInfo<cl_kernel_info> for cl_kernel { fn query(&self, q: cl_kernel_info, _: &[u8]) -> CLResult<Vec<u8>> { let kernel = self.get_ref()?; Ok(match q { CL_KERNEL_ATTRIBUTES => cl_prop::<&str>(&kernel.attributes_string), CL_KERNEL_CONTEXT => { let ptr = Arc::as_ptr(&kernel.prog.context); cl_prop::<cl_context>(cl_context::from_ptr(ptr)) } CL_KERNEL_FUNCTION_NAME => cl_prop::<&str>(&kernel.name), CL_KERNEL_NUM_ARGS => cl_prop::<cl_uint>(kernel.args.len() as cl_uint), CL_KERNEL_PROGRAM => { let ptr = Arc::as_ptr(&kernel.prog); cl_prop::<cl_program>(cl_program::from_ptr(ptr)) } CL_KERNEL_REFERENCE_COUNT => cl_prop::<cl_uint>(self.refcnt()?), // CL_INVALID_VALUE if param_name is not one of the supported values _ => return Err(CL_INVALID_VALUE), }) } } impl CLInfoObj<cl_kernel_arg_info, cl_uint> for cl_kernel { fn query(&self, idx: cl_uint, q: cl_kernel_arg_info) -> CLResult<Vec<u8>> { let kernel = self.get_ref()?; // CL_INVALID_ARG_INDEX if arg_index is not a valid argument index. if idx as usize >= kernel.args.len() { return Err(CL_INVALID_ARG_INDEX); } Ok(match *q { CL_KERNEL_ARG_ACCESS_QUALIFIER => { cl_prop::<cl_kernel_arg_access_qualifier>(kernel.access_qualifier(idx)) } CL_KERNEL_ARG_ADDRESS_QUALIFIER => { cl_prop::<cl_kernel_arg_address_qualifier>(kernel.address_qualifier(idx)) } CL_KERNEL_ARG_NAME => cl_prop::<&str>(kernel.arg_name(idx)), CL_KERNEL_ARG_TYPE_NAME => cl_prop::<&str>(kernel.arg_type_name(idx)), CL_KERNEL_ARG_TYPE_QUALIFIER => { cl_prop::<cl_kernel_arg_type_qualifier>(kernel.type_qualifier(idx)) } // CL_INVALID_VALUE if param_name is not one of the supported values _ => return Err(CL_INVALID_VALUE), }) } } impl CLInfoObj<cl_kernel_work_group_info, cl_device_id> for cl_kernel { fn query(&self, dev: cl_device_id, q: cl_kernel_work_group_info) -> CLResult<Vec<u8>> { let kernel = self.get_ref()?; // CL_INVALID_DEVICE [..] if device is NULL but there is more than one device associated with kernel. let dev = if dev.is_null() { if kernel.prog.devs.len() > 1 { return Err(CL_INVALID_DEVICE); } else { kernel.prog.devs[0].clone() } } else { dev.get_arc()? }; // CL_INVALID_DEVICE if device is not in the list of devices associated with kernel if!kernel.prog.devs.contains(&dev) { return Err(CL_INVALID_DEVICE); } Ok(match *q { CL_KERNEL_COMPILE_WORK_GROUP_SIZE => cl_prop::<[usize; 3]>(kernel.work_group_size), CL_KERNEL_LOCAL_MEM_SIZE => cl_prop::<cl_ulong>(kernel.local_mem_size(&dev)), CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE => { cl_prop::<usize>(dev.subgroups() as usize) } CL_KERNEL_PRIVATE_MEM_SIZE => cl_prop::<cl_ulong>(kernel.priv_mem_size(&dev)), // TODO CL_KERNEL_WORK_GROUP_SIZE => cl_prop::<usize>(dev.subgroups() as usize), // CL_INVALID_VALUE if param_name is not one of the supported values _ => return Err(CL_INVALID_VALUE), }) } } impl CLInfoObj<cl_kernel_sub_group_info, (cl_device_id, usize, *const c_void)> for cl_kernel { fn query( &self, (d, _input_value_size, _input_value): (cl_device_id, usize, *const c_void), _q: cl_program_build_info, ) -> CLResult<Vec<u8>> { let _kernel = self.get_ref()?; let _dev = d.get_arc()?; Err(CL_INVALID_OPERATION) } } const ZERO_ARR: [usize; 3] = [0; 3]; /// # Safety /// /// This function is only safe when called on an array of `work_dim` length unsafe fn kernel_work_arr_or_default<'a>(arr: *const usize, work_dim: cl_uint) -> &'a [usize] { if!arr.is_null() { slice::from_raw_parts(arr, work_dim as usize) } else { &ZERO_ARR } } fn get_devices_with_valid_build(p: &Arc<Program>) -> CLResult<Vec<&Arc<Device>>> { // CL_INVALID_PROGRAM_EXECUTABLE if there is no successfully built executable for program. let devs: Vec<_> = p .devs .iter() .filter(|d| p.status(d) == CL_BUILD_SUCCESS as cl_build_status) .collect(); if devs.is_empty() { return Err(CL_INVALID_PROGRAM_EXECUTABLE); } Ok(devs) } pub fn create_kernel( program: cl_program, kernel_name: *const ::std::os::raw::c_char, ) -> CLResult<cl_kernel> { let p = program.get_arc()?; let name = c_string_to_string(kernel_name); // CL_INVALID_VALUE if kernel_name is NULL. if kernel_name.is_null() { return Err(CL_INVALID_VALUE); } // CL_INVALID_PROGRAM_EXECUTABLE if there is no successfully built executable for program. if p.kernels().is_empty() { return Err(CL_INVALID_PROGRAM_EXECUTABLE); } // CL_INVALID_KERNEL_NAME if kernel_name is not found in program. if!p.kernels().contains(&name) { return Err(CL_INVALID_KERNEL_NAME); } // CL_INVALID_KERNEL_DEFINITION if the function definition for __kernel function given by // kernel_name such as the number of arguments, the argument types are not the same for all // devices for which the program executable has been built. let devs = get_devices_with_valid_build(&p)?; let kernel_args: HashSet<_> = devs.iter().map(|d| p.args(d, &name)).collect(); if kernel_args.len()!= 1 { return Err(CL_INVALID_KERNEL_DEFINITION); } Ok(cl_kernel::from_arc(Kernel::new( name, p, kernel_args.into_iter().next().unwrap(), ))) } pub fn create_kernels_in_program( program: cl_program, num_kernels: cl_uint, kernels: *mut cl_kernel, num_kernels_ret: *mut cl_uint, ) -> CLResult<()> { let p = program.get_arc()?; let devs = get_devices_with_valid_build(&p)?; // CL_INVALID_VALUE if kernels is not NULL and num_kernels is less than the number of kernels // in program. if!kernels.is_null() && p.kernels().len() > num_kernels as usize { return Err(CL_INVALID_VALUE); } let mut num_kernels = 0; for name in p.kernels() { let kernel_args: HashSet<_> = devs.iter().map(|d| p.args(d, &name)).collect(); // Kernel objects are not created for any __kernel functions in program that do not have the // same function definition across all devices for which a program executable has been // successfully built. if kernel_args.len()!= 1 { continue; } if!kernels.is_null() { // we just assume the client isn't stupid unsafe { kernels .add(num_kernels as usize) .write(cl_kernel::from_arc(Kernel::new( name, p.clone(), kernel_args.into_iter().next().unwrap(), ))); } } num_kernels += 1; } num_kernels_ret.write_checked(num_kernels); Ok(()) } pub fn set_kernel_arg( kernel: cl_kernel, arg_index: cl_uint, arg_size: usize, arg_value: *const ::std::os::raw::c_void, ) -> CLResult<()> { let k = kernel.get_arc()?; // CL_INVALID_ARG_INDEX if arg_index is not a valid argument index. if let Some(arg) = k.args.get(arg_index as usize) { // CL_INVALID_ARG_SIZE if arg_size does not match the size of the data type for an argument // that is not a memory object or if the argument is a memory object and // arg_size!= sizeof(cl_mem) or if arg_size is zero and the argument is declared with the // local qualifier or if the argument is a sampler and arg_size!= sizeof(cl_sampler). match arg.kind { KernelArgType::MemLocal => { if arg_size == 0 { return Err(CL_INVALID_ARG_SIZE); } } KernelArgType::MemGlobal | KernelArgType::MemConstant | KernelArgType::Image | KernelArgType::RWImage | KernelArgType::Texture =>
_ => { if arg.size!= arg_size { return Err(CL_INVALID_ARG_SIZE); } } } // CL_INVALID_ARG_VALUE if arg_value specified is not a valid value. match arg.kind { // If the argument is declared with the local qualifier, the arg_value entry must be // NULL. KernelArgType::MemLocal => { if!arg_value.is_null() { return Err(CL_INVALID_ARG_VALUE); } } // If the argument is of type sampler_t, the arg_value entry must be a pointer to the // sampler object. KernelArgType::Constant | KernelArgType::Sampler => { if arg_value.is_null() { return Err(CL_INVALID_ARG_VALUE); } } _ => {} }; // let's create the arg now let arg = unsafe { if arg.dead { KernelArgValue::None } else { match arg.kind { KernelArgType::Constant => KernelArgValue::Constant( slice::from_raw_parts(arg_value.cast(), arg_size).to_vec(), ), KernelArgType::MemConstant | KernelArgType::MemGlobal => { let ptr: *const cl_mem = arg_value.cast(); if ptr.is_null() || (*ptr).is_null() { KernelArgValue::None } else { KernelArgValue::MemObject((*ptr).get_arc()?) } } KernelArgType::MemLocal => KernelArgValue::LocalMem(arg_size), KernelArgType::Image | KernelArgType::RWImage | KernelArgType::Texture => { let img: *const cl_mem = arg_value.cast(); KernelArgValue::MemObject((*img).get_arc()?) } KernelArgType::Sampler => { let ptr: *const cl_sampler = arg_value.cast(); KernelArgValue::Sampler((*ptr).get_arc()?) } } } }; k.values.get(arg_index as usize).unwrap().replace(Some(arg)); Ok(()) } else { Err(CL_INVALID_ARG_INDEX) } //• CL_INVALID_DEVICE_QUEUE for an argument declared to be of type queue_t when the specified arg_value is not a valid device queue object. This error code is missing before version 2.0. //• CL_INVALID_ARG_VALUE if the argument is an image declared with the read_only qualifier and arg_value refers to an image object created with cl_mem_flags of CL_MEM_WRITE_ONLY or if the image argument is declared with the write_only qualifier and arg_value refers to an image object created with cl_mem_flags of CL_MEM_READ_ONLY. //• CL_MAX_SIZE_RESTRICTION_EXCEEDED if the size in bytes of the memory object (if the argument is a memory object) or arg_size (if the argument is declared with local qualifier) exceeds a language- specified maximum size restriction for this argument, such as the MaxByteOffset SPIR-V decoration. This error code is missing before version 2.2. } pub fn enqueue_ndrange_kernel( command_queue: cl_command_queue, kernel: cl_kernel, work_dim: cl_uint, global_work_offset: *const usize, global_work_size: *const usize, local_work_size: *const usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()> { let q = command_queue.get_arc()?; let k = kernel.get_arc()?; let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?; // CL_INVALID_CONTEXT if context associated with command_queue and kernel are not the same if q.context!= k.prog.context { return Err(CL_INVALID_CONTEXT); } // CL_INVALID_PROGRAM_EXECUTABLE if there is no successfully built program executable available // for device associated with command_queue. if k.prog.status(&q.device)!= CL_BUILD_SUCCESS as cl_build_status { return Err(CL_INVALID_PROGRAM_EXECUTABLE); } // CL_INVALID_KERNEL_ARGS if the kernel argument values have not been specified. if k.values.iter().any(|v| v.borrow().is_none()) { return Err(CL_INVALID_KERNEL_ARGS); } // CL_INVALID_WORK_DIMENSION if work_dim is not a valid value (i.e. a value between 1 and // CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS). if work_dim == 0 || work_dim > q.device.max_grid_dimensions() { return Err(CL_INVALID_WORK_DIMENSION); } // we assume the application gets it right and doesn't pass shorter arrays then actually needed. let global_work_size = unsafe { kernel_work_arr_or_default(global_work_size, work_dim) }; let local_work_size = unsafe { kernel_work_arr_or_default(local_work_size, work_dim) }; let global_work_offset = unsafe { kernel_work_arr_or_default(global_work_offset, work_dim) }; let device_bits = q.device.address_bits(); let device_max = u64::MAX >> (u64::BITS - device_bits); for i in 0..work_dim as usize { let lws = local_work_size[i]; let gws = global_work_size[i]; let gwo = global_work_offset[i]; // CL_INVALID_WORK_ITEM_SIZE if the number of work-items specified in any of // local_work_size[0], … local_work_size[work_dim - 1] is greater than the corresponding // values specified by // CL_DEVICE_MAX_WORK_ITEM_SIZES[0], …, CL_DEVICE_MAX_WORK_ITEM_SIZES[work_dim - 1]. if lws > q.device.max_block_sizes()[i] { return Err(CL_INVALID_WORK_ITEM_SIZE); } // CL_INVALID_WORK_GROUP_SIZE if the work-group size must be uniform and the // local_work_size is not NULL, [...] if the global_work_size is not evenly divisible by // the local_work_size. if lws!= 0 && gws % lws!= 0 { return Err(CL_INVALID_WORK_GROUP_SIZE); } // CL_INVALID_WORK_GROUP_SIZE if local_work_size is specified and does not match the // required work-group size for kernel in the program source. if lws!= 0 && k.work_group_size[i]!= 0 && lws!= k.work_group_size[i] { return Err(CL_INVALID_WORK_GROUP_SIZE); } // CL_INVALID_GLOBAL_WORK_SIZE if any of the values specified in global_work_size[0], … // global_work_size[work_dim - 1] exceed the maximum value representable by size_t on // the device on which the kernel-instance will be enqueued. if gws as u64 > device_max { return Err(CL_INVALID_GLOBAL_WORK_SIZE); } // CL_INVALID_GLOBAL_OFFSET if the value specified in global_work_size + the // corresponding values in global_work_offset for any dimensions is greater than the // maximum value representable by size t on the device on which the kernel-instance // will be enqueued if u64::checked_add(gws as u64, gwo as u64) .filter(|&x| x <= device_max) .is_none() { return Err(CL_INVALID_GLOBAL_OFFSET); } } // If global_work_size is NULL, or the value in any passed dimension is 0 then the kernel // command will trivially succeed after its event dependencies are satisfied and subsequently // update its completion event. let cb: EventSig = if global_work_size.contains(&0) { Box::new(|_, _| Ok(())) } else { k.launch( &q, work_dim, local_work_size, global_work_size, global_work_offset, )? }; create_and_queue(q, CL_COMMAND_NDRANGE_KERNEL, evs, event, false, cb) //• CL_INVALID_WORK_GROUP_SIZE if local_work_size is specified and is not consistent with the required number of sub-groups for kernel in the program source. //• CL_INVALID_WORK_GROUP_SIZE if local_work_size is specified and the total number of work-items in the work-group computed as local_work_size[0] × … local_work_size[work_dim - 1] is greater than the value specified by CL_KERNEL_WORK_GROUP_SIZE in the Kernel Object Device Queries table. //• CL_MISALIGNED_SUB_BUFFER_OFFSET if a sub-buffer object is specified as the value for an argument that is a buffer object and the offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue. This error code //• CL_INVALID_IMAGE_SIZE if an image object is specified as an argument value and the image dimensions (image width, height, specified or compute row and/or slice pitch) are not supported by device associated with queue. //• CL_IMAGE_FORMAT_NOT_SUPPORTED if an image object is specified as an argument value and the image format (image channel order and data type) is not supported by device associated with queue. //• CL_OUT_OF_RESOURCES if there is a failure to queue the execution instance of kernel on the command-queue because of insufficient resources needed to execute the kernel. For example, the explicitly specified local_work_size causes a failure to execute the kernel because of insufficient resources such as registers or local memory. Another example would be the number of read-only image args used in kernel exceed the CL_DEVICE_MAX_READ_IMAGE_ARGS value for device or the number of write-only and read-write image args used in kernel exceed the CL_DEVICE_MAX_READ_WRITE_IMAGE_ARGS value for device or the number of samplers used in kernel exceed CL_DEVICE_MAX_SAMPLERS for device. //• CL_MEM_OBJECT_ALLOCATION_FAILURE if there is a failure to allocate memory for data store associated with image or buffer objects specified as arguments to kernel. //• CL_INVALID_OPERATION if SVM pointers are passed as arguments to a kernel and the device does not support SVM or if system pointers are passed as arguments to a kernel and/or stored inside SVM allocations passed as kernel arguments and the device does not support fine grain system SVM allocations. } pub fn enqueue_task( command_queue: cl_command_queue, kernel: cl_kernel, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()> { // clEnqueueTask is equivalent to calling clEnqueueNDRangeKernel with work_dim set to 1, // global_work_offset set to NULL, global_work_size[0] set to 1, and local_work_size[0] set to // 1. enqueue_ndrange_kernel( command_queue, kernel, 1, ptr::null(), [1, 1, 1].as_ptr(), [1, 0, 0].as_ptr(),
{ if arg_size != std::mem::size_of::<cl_mem>() { return Err(CL_INVALID_ARG_SIZE); } }
conditional_block
view.rs
, &dbopts, )?; { let txn = db.transaction()?; let mut maybe_guesser = if opts.guess_ranges { Some(SegmentRangeGuesser::new(&txn, "")?) } else { None }; let mut tag_editor = |segment_id: i64, tags: &mut json::JsonValue| -> Result<()> { if let Some(ref mut guesser) = maybe_guesser { if let Some(gr) = guesser.get(segment_id)? { tags.insert("gr:Z", gr).unwrap() } } Ok(()) }; if opts.output_gfa == "-" &&!opts.bandage && atty::is(atty::Stream::Stdout) { // interactive mode: pipe into less -S less(|less_in| { write_header(&txn, less_in) .and_then(|_| { write_segments(&txn, "",!opts.no_sequences, &mut tag_editor, less_in) }) .and_then(|_| write_links(&txn, "", less_in)) .and_then(|_| write_paths(&txn, "", less_in)) })? } else { let mut output_gfa = String::from(&opts.output_gfa); if opts.bandage && output_gfa == "-" { output_gfa = bandage_temp_filename()? } { let mut writer_box = writer(&output_gfa)?; let out = &mut *writer_box; write_header(&txn, out)?; write_segments(&txn, "",!opts.no_sequences, &mut tag_editor, out)?; write_links(&txn, "", out)?; write_paths(&txn, "", out)?; write_walks(&txn, "", out)? } if opts.bandage { if let Some(ref mut guesser) = maybe_guesser { guesser.write_bandage_csv(&output_gfa)? } bandage(&output_gfa)? } } } Ok(()) } pub fn writer(gfa_filename: &str) -> Result<Box<dyn io::Write>> { if gfa_filename.is_empty() || gfa_filename == "-" { return Ok(Box::new(io::BufWriter::new(io::stdout()))); } if!gfa_filename.ends_with(".gfa") { warn!("output filename should end with.gfa") } Ok(Box::new(io::BufWriter::new(fs::File::create( gfa_filename, )?))) } /// Start `less -S` and call `write` with its standard input pipe. /// Tolerate BrokenPipe errors (user exited before viewing all data) pub fn less<F>(write: F) -> Result<()> where F: FnOnce(&mut dyn io::Write) -> Result<()>, { if which::which("less").is_err() { return write(&mut io::stdout()); } let mut child = process::Command::new("less") .arg("-S") .stdin(process::Stdio::piped()) .spawn()?; { let mut less_in = child.stdin.take().unwrap(); match write(&mut less_in) { Ok(()) => (), Err(util::Error::IoError(err)) if err.kind() == io::ErrorKind::BrokenPipe => (), Err(e) => return Err(e), } } child.wait()?; Ok(()) } pub fn bandage(gfa: &str) -> Result<()> { info!("Bandage load {} --draw", gfa); if process::Command::new("Bandage") .arg("load") .arg(gfa) .arg("--draw") .spawn() .is_err() { bad_command!("failed to launch Bandage; make sure it's in PATH") } Ok(()) } pub fn bandage_temp_filename() -> Result<String> { if!atty::is(atty::Stream::Stdout) { bad_command!("supply -o filename.gfa on which to launch Bandage") } Ok(String::from( path::Path::new(&env::var("TMPDIR").unwrap_or(String::from("/tmp"))) .join(format!( "gfabase-bandage-{}.gfa", chrono::Local::now().to_rfc3339_opts(chrono::SecondsFormat::Micros, true) )) .to_str() .unwrap(), )) } pub fn write_header(db: &rusqlite::Connection, writer: &mut dyn io::Write) -> Result<()> { let tags_json: String = db.query_row( "SELECT tags_json FROM gfa1_header WHERE _rowid_ = 1", [], |row| row.get(0), )?; writer.write(b"H")?; write_tags("gfa1_header", 1, &tags_json, writer)?; writer.write(b"\n")?; Ok(()) } pub fn write_segments( db: &rusqlite::Connection, where_clause: &str, with_sequences: bool, mut tag_editor: impl FnMut(i64, &mut json::JsonValue) -> Result<()>, writer: &mut dyn io::Write, ) -> Result<()> { let segments_query_sql = String::from(if with_sequences { "SELECT segment_id, coalesce(name, cast(segment_id AS TEXT)), sequence_length, coalesce(tags_json, '{}'), sequence FROM gfa1_segment " } else { "SELECT segment_id, coalesce(name, cast(segment_id AS TEXT)), sequence_length, coalesce(tags_json, '{}') FROM gfa1_segment_meta " }) + where_clause; let mut segments_query = db.prepare(&segments_query_sql)?; let mut segments_cursor = segments_query.query([])?; while let Some(segrow) = segments_cursor.next()? { let rowid: i64 = segrow.get(0)?; let name: String = segrow.get(1)?; let maybe_sequence_length: Option<i64> = segrow.get(2)?; let tags_json: String = segrow.get(3)?; writer.write_fmt(format_args!("S\t{}\t", name))?; if with_sequences { match segrow.get_ref(4)? { ValueRef::Text(sequence) => writer.write(sequence)?, ValueRef::Null => writer.write(b"*")?, _ => { return Err(util::Error::InvalidGfab { message: String::from("segment row has invalid sequence value type"), table: String::from("gfa1_segment_sequence"), rowid: rowid, }) } }; } else { writer.write(b"*")?; } if let Some(sequence_length) = maybe_sequence_length { writer.write_fmt(format_args!("\tLN:i:{}", sequence_length))?; } write_tags_with_editor( "gfa1_segments_meta", rowid, &tags_json, &mut tag_editor, writer, )?; writer.write(b"\n")?; } Ok(()) } pub fn write_links( db: &rusqlite::Connection, where_clause: &str, writer: &mut dyn io::Write, ) -> Result<()> { let links_query_sql = format!( // this two-layer join resolves the two segment IDs to names (if any) "SELECT link_id, from_segment_name, from_reverse, coalesce(gfa1_segment_meta.name, cast(to_segment AS TEXT)) AS to_segment_name, to_reverse, cigar, link_tags_json FROM (SELECT gfa1_link._rowid_ AS link_id, coalesce(gfa1_segment_meta.name, cast(from_segment AS TEXT)) AS from_segment_name, from_reverse, to_segment, to_reverse, coalesce(cigar, '*') AS cigar, coalesce(gfa1_link.tags_json, '{{}}') AS link_tags_json FROM gfa1_link LEFT JOIN gfa1_segment_meta ON from_segment = segment_id {} ORDER BY from_segment, to_segment) LEFT JOIN gfa1_segment_meta ON to_segment = segment_id", where_clause ); let mut links_query = db.prepare(&links_query_sql)?; let mut links_cursor = links_query.query([])?; while let Some(linkrow) = links_cursor.next()? { let link_id: i64 = linkrow.get(0)?; let from_segment: String = linkrow.get(1)?; let from_reverse: i8 = linkrow.get(2)?; let to_segment: String = linkrow.get(3)?; let to_reverse: i8 = linkrow.get(4)?; let cigar: String = linkrow.get(5)?; let tags_json: String = linkrow.get(6)?; writer.write_fmt(format_args!( "L\t{}\t{}\t{}\t{}\t{}", from_segment, if from_reverse == 0 { '+' } else { '-' }, to_segment, if to_reverse == 0 { '+' } else { '-' }, cigar ))?; write_tags("gfa1_link", link_id, &tags_json, writer)?; writer.write(b"\n")?; } Ok(()) } pub fn write_paths( db: &rusqlite::Connection, where_clause: &str, writer: &mut dyn io::Write, ) -> Result<()> { let paths_query_sql = format!( "SELECT path_id, coalesce(name, cast(path_id AS TEXT)), coalesce(tags_json, '{{}}') FROM gfa1_path {} ORDER BY path_id", where_clause ); let mut paths_query = db.prepare(&paths_query_sql)?; let mut elements_query = db.prepare( "SELECT coalesce(name, cast(segment_id AS TEXT)) AS segment_name, reverse, cigar_vs_previous FROM gfa1_path_element LEFT JOIN gfa1_segment_meta USING(segment_id) WHERE path_id=? ORDER BY path_id, ordinal", )?; let mut paths_cursor = paths_query.query([])?; while let Some(pathrow) = paths_cursor.next()? { let path_id: i64 = pathrow.get(0)?; let name: String = pathrow.get(1)?; let tags_json: String = pathrow.get(2)?; let mut elts_csv = Vec::new(); let mut cigars_csv = Vec::new(); let mut elts_cursor = elements_query.query(params![path_id])?; while let Some(eltrow) = elts_cursor.next()? { let segment_name: String = eltrow.get(0)?; let reverse: i64 = eltrow.get(1)?; let maybe_cigar: Option<String> = eltrow.get(2)?; elts_csv.push(segment_name + if reverse == 0 { "+" } else { "-" }); if let Some(cigar) = maybe_cigar { cigars_csv.push(cigar); } } writer.write_fmt(format_args!( "P\t{}\t{}\t{}", &name, &elts_csv.join(","), if cigars_csv.len() > 0 { cigars_csv.join(",") } else { String::from("*") } ))?; write_tags("gfa1_path", path_id, &tags_json, writer)?; writer.write(b"\n")?; } Ok(()) } pub fn write_walks( db: &rusqlite::Connection, where_clause: &str, writer: &mut dyn io::Write, ) -> Result<()> { let fwd = ">".as_bytes(); let rev = "<".as_bytes(); let mut iter_walk_query = prepare_iter_walk(db)?; let walks_query_sql = format!( "SELECT walk_id, sample, hap_idx, refseq_name, refseq_begin, refseq_end, coalesce(tags_json, '{{}}') FROM gfa1_walk {} ORDER BY sample, refseq_name, hap_idx, refseq_begin", where_clause); let mut walks_query = db.prepare(&walks_query_sql)?; let mut walks_cursor = walks_query.query([])?; while let Some(row) = walks_cursor.next()? { let walk_id: i64 = row.get(0)?; let sample: String = row.get(1)?; let hap_idx: i64 = row.get(2)?; let refseq_name: String = row.get(3)?; let refseq_begin: i64 = row.get(4)?; let refseq_end: i64 = row.get(5)?; let tags_json: String = row.get(6)?; writer.write_fmt(format_args!( "W\t{}\t{}\t{}\t{}\t{}\t", sample, hap_idx, refseq_name, refseq_begin, refseq_end ))?; iter_walk(&mut iter_walk_query, walk_id, |segment_id, reverse| { writer.write(if reverse { rev } else { fwd })?; writer.write(segment_id.to_string().as_bytes())?; Ok(true) })?; write_tags("gfa1_walk", walk_id, &tags_json, writer)?; writer.write(b"\n")?; } Ok(()) } fn write_tags_with_editor( table: &str, rowid: i64, tags_json: &str, mut editor: impl FnMut(i64, &mut json::JsonValue) -> Result<()>, writer: &mut dyn io::Write, ) -> Result<()> { let invalid = || util::Error::InvalidGfab { message: String::from("invalid tags_json"), table: String::from(table), rowid: rowid, }; let mut tags = json::parse(&tags_json).map_err(|_| invalid())?; editor(rowid, &mut tags)?; for (k, v) in tags.entries() { let kfields: Vec<&str> = k.split(':').collect(); if kfields.len()!= 2 { return Err(invalid()); } let vstr = match kfields[1] { "A" | "Z" | "H" => JsonValue::as_str(v).ok_or_else(invalid)?.to_string(), "i" => JsonValue::as_i64(v).ok_or_else(invalid)?.to_string(), "f" => JsonValue::as_f64(v).ok_or_else(invalid)?.to_string(), // TODO: B & J _ => return Err(invalid()), }; writer.write_fmt(format_args!("\t{}:{}", k, vstr))?; } Ok(()) } fn write_tags(table: &str, rowid: i64, tags_json: &str, writer: &mut dyn io::Write) -> Result<()> { write_tags_with_editor(table, rowid, tags_json, |_, _| Ok(()), writer) } // Helpers roughly guessing a genomic range for a segment based on its PAF mappings. Selects the // chromosome with the most coverage in the mappings, then the min and max mapped position on that // chromosome. pub struct SegmentRangeGuesser<'a> { getter: rusqlite::Statement<'a>, csv_query: rusqlite::Statement<'a>, } impl<'a> SegmentRangeGuesser<'_> { pub fn new( db: &'a rusqlite::Connection, where_clause: &str, ) -> Result<SegmentRangeGuesser<'a>> { // analyze mappings to generate temp.segment_range_guess db.execute( "CREATE TABLE temp.segment_range_guess( segment_id INTEGER PRIMARY KEY, refseq_name TEXT NOT NULL, refseq_begin INTEGER NOT NULL, refseq_end INTEGER NOT NULL)", [], )?; let sql = format!( "WITH summary AS (SELECT segment_id, refseq_name, min(refseq_begin) AS min_begin, max(refseq_end) AS max_end, max(refseq_end) - min(refseq_begin) AS coverage, sum(refseq_end - refseq_begin) AS coverage2 FROM gfa1_segment_mapping {} GROUP BY segment_id, refseq_name) INSERT INTO temp.segment_range_guess(segment_id, refseq_name, refseq_begin, refseq_end) SELECT segment_id, refseq_name, min_begin, max_end FROM (SELECT segment_id, refseq_name, min_begin, max_end, row_number() OVER (PARTITION BY segment_id ORDER BY coverage DESC, coverage2 DESC) AS coverage_rank FROM summary) WHERE coverage_rank = 1", where_clause ); let n = db.execute(&sql, [])?; info!("guessed ranges for {} segments", n); // prepare queries on temp.segment_range_guess Ok(SegmentRangeGuesser { getter: db.prepare( "SELECT refseq_name, refseq_begin, refseq_end FROM temp.segment_range_guess WHERE segment_id =?", )?, csv_query: db.prepare( "SELECT coalesce(name, cast(segment_id AS TEXT)), refseq_name, refseq_begin, refseq_end FROM temp.segment_range_guess LEFT JOIN gfa1_segment_meta USING(segment_id)", )?, }) } pub fn get(&mut self, segment_id: i64) -> Result<Option<String>> { let maybe_row: Option<(String, i64, i64)> = self .getter .query_row(params![segment_id], |row| { Ok((row.get(0)?, row.get(1)?, row.get(2)?)) }) .optional()?; if let Some((refseq_name, refseq_begin, refseq_end)) = maybe_row { return Ok(Some(format!( "~{}:{}-{}", refseq_name, (refseq_begin + 1).to_formatted_string(&Locale::en), refseq_end.to_formatted_string(&Locale::en) ))); } Ok(None) } pub fn write_bandage_csv(&mut self, gfa_filename: &str) -> Result<()> { // write a CSV file with the guessed ranges that Bandage can show as labels let csv_filename = String::from(gfa_filename.strip_suffix(".gfa").unwrap_or(gfa_filename)) + ".guessed_ranges.csv"; { let mut writer = io::BufWriter::new(fs::File::create(&csv_filename)?); writer.write_fmt(format_args!("Name,Guessed range\n"))?; let mut cursor = self.csv_query.query([])?; while let Some(row) = cursor.next()? { let name: String = row.get(0)?; let refseq_name: String = row.get(1)?; let refseq_begin: i64 = row.get(2)?; let refseq_end: i64 = row.get(3)?;
writer.write_fmt(format_args!( "\"{}\",\"~{}:{}-{}\"\n", name,
random_line_split
view.rs
NO_MUTEX, &dbopts, )?; { let txn = db.transaction()?; let mut maybe_guesser = if opts.guess_ranges { Some(SegmentRangeGuesser::new(&txn, "")?) } else { None }; let mut tag_editor = |segment_id: i64, tags: &mut json::JsonValue| -> Result<()> { if let Some(ref mut guesser) = maybe_guesser { if let Some(gr) = guesser.get(segment_id)? { tags.insert("gr:Z", gr).unwrap() } } Ok(()) }; if opts.output_gfa == "-" &&!opts.bandage && atty::is(atty::Stream::Stdout) { // interactive mode: pipe into less -S less(|less_in| { write_header(&txn, less_in) .and_then(|_| { write_segments(&txn, "",!opts.no_sequences, &mut tag_editor, less_in) }) .and_then(|_| write_links(&txn, "", less_in)) .and_then(|_| write_paths(&txn, "", less_in)) })? } else { let mut output_gfa = String::from(&opts.output_gfa); if opts.bandage && output_gfa == "-" { output_gfa = bandage_temp_filename()? } { let mut writer_box = writer(&output_gfa)?; let out = &mut *writer_box; write_header(&txn, out)?; write_segments(&txn, "",!opts.no_sequences, &mut tag_editor, out)?; write_links(&txn, "", out)?; write_paths(&txn, "", out)?; write_walks(&txn, "", out)? } if opts.bandage { if let Some(ref mut guesser) = maybe_guesser { guesser.write_bandage_csv(&output_gfa)? } bandage(&output_gfa)? } } } Ok(()) } pub fn writer(gfa_filename: &str) -> Result<Box<dyn io::Write>> { if gfa_filename.is_empty() || gfa_filename == "-" { return Ok(Box::new(io::BufWriter::new(io::stdout()))); } if!gfa_filename.ends_with(".gfa") { warn!("output filename should end with.gfa") } Ok(Box::new(io::BufWriter::new(fs::File::create( gfa_filename, )?))) } /// Start `less -S` and call `write` with its standard input pipe. /// Tolerate BrokenPipe errors (user exited before viewing all data) pub fn less<F>(write: F) -> Result<()> where F: FnOnce(&mut dyn io::Write) -> Result<()>, { if which::which("less").is_err() { return write(&mut io::stdout()); } let mut child = process::Command::new("less") .arg("-S") .stdin(process::Stdio::piped()) .spawn()?; { let mut less_in = child.stdin.take().unwrap(); match write(&mut less_in) { Ok(()) => (), Err(util::Error::IoError(err)) if err.kind() == io::ErrorKind::BrokenPipe => (), Err(e) => return Err(e), } } child.wait()?; Ok(()) } pub fn bandage(gfa: &str) -> Result<()> { info!("Bandage load {} --draw", gfa); if process::Command::new("Bandage") .arg("load") .arg(gfa) .arg("--draw") .spawn() .is_err() { bad_command!("failed to launch Bandage; make sure it's in PATH") } Ok(()) } pub fn bandage_temp_filename() -> Result<String> { if!atty::is(atty::Stream::Stdout) { bad_command!("supply -o filename.gfa on which to launch Bandage") } Ok(String::from( path::Path::new(&env::var("TMPDIR").unwrap_or(String::from("/tmp"))) .join(format!( "gfabase-bandage-{}.gfa", chrono::Local::now().to_rfc3339_opts(chrono::SecondsFormat::Micros, true) )) .to_str() .unwrap(), )) } pub fn
(db: &rusqlite::Connection, writer: &mut dyn io::Write) -> Result<()> { let tags_json: String = db.query_row( "SELECT tags_json FROM gfa1_header WHERE _rowid_ = 1", [], |row| row.get(0), )?; writer.write(b"H")?; write_tags("gfa1_header", 1, &tags_json, writer)?; writer.write(b"\n")?; Ok(()) } pub fn write_segments( db: &rusqlite::Connection, where_clause: &str, with_sequences: bool, mut tag_editor: impl FnMut(i64, &mut json::JsonValue) -> Result<()>, writer: &mut dyn io::Write, ) -> Result<()> { let segments_query_sql = String::from(if with_sequences { "SELECT segment_id, coalesce(name, cast(segment_id AS TEXT)), sequence_length, coalesce(tags_json, '{}'), sequence FROM gfa1_segment " } else { "SELECT segment_id, coalesce(name, cast(segment_id AS TEXT)), sequence_length, coalesce(tags_json, '{}') FROM gfa1_segment_meta " }) + where_clause; let mut segments_query = db.prepare(&segments_query_sql)?; let mut segments_cursor = segments_query.query([])?; while let Some(segrow) = segments_cursor.next()? { let rowid: i64 = segrow.get(0)?; let name: String = segrow.get(1)?; let maybe_sequence_length: Option<i64> = segrow.get(2)?; let tags_json: String = segrow.get(3)?; writer.write_fmt(format_args!("S\t{}\t", name))?; if with_sequences { match segrow.get_ref(4)? { ValueRef::Text(sequence) => writer.write(sequence)?, ValueRef::Null => writer.write(b"*")?, _ => { return Err(util::Error::InvalidGfab { message: String::from("segment row has invalid sequence value type"), table: String::from("gfa1_segment_sequence"), rowid: rowid, }) } }; } else { writer.write(b"*")?; } if let Some(sequence_length) = maybe_sequence_length { writer.write_fmt(format_args!("\tLN:i:{}", sequence_length))?; } write_tags_with_editor( "gfa1_segments_meta", rowid, &tags_json, &mut tag_editor, writer, )?; writer.write(b"\n")?; } Ok(()) } pub fn write_links( db: &rusqlite::Connection, where_clause: &str, writer: &mut dyn io::Write, ) -> Result<()> { let links_query_sql = format!( // this two-layer join resolves the two segment IDs to names (if any) "SELECT link_id, from_segment_name, from_reverse, coalesce(gfa1_segment_meta.name, cast(to_segment AS TEXT)) AS to_segment_name, to_reverse, cigar, link_tags_json FROM (SELECT gfa1_link._rowid_ AS link_id, coalesce(gfa1_segment_meta.name, cast(from_segment AS TEXT)) AS from_segment_name, from_reverse, to_segment, to_reverse, coalesce(cigar, '*') AS cigar, coalesce(gfa1_link.tags_json, '{{}}') AS link_tags_json FROM gfa1_link LEFT JOIN gfa1_segment_meta ON from_segment = segment_id {} ORDER BY from_segment, to_segment) LEFT JOIN gfa1_segment_meta ON to_segment = segment_id", where_clause ); let mut links_query = db.prepare(&links_query_sql)?; let mut links_cursor = links_query.query([])?; while let Some(linkrow) = links_cursor.next()? { let link_id: i64 = linkrow.get(0)?; let from_segment: String = linkrow.get(1)?; let from_reverse: i8 = linkrow.get(2)?; let to_segment: String = linkrow.get(3)?; let to_reverse: i8 = linkrow.get(4)?; let cigar: String = linkrow.get(5)?; let tags_json: String = linkrow.get(6)?; writer.write_fmt(format_args!( "L\t{}\t{}\t{}\t{}\t{}", from_segment, if from_reverse == 0 { '+' } else { '-' }, to_segment, if to_reverse == 0 { '+' } else { '-' }, cigar ))?; write_tags("gfa1_link", link_id, &tags_json, writer)?; writer.write(b"\n")?; } Ok(()) } pub fn write_paths( db: &rusqlite::Connection, where_clause: &str, writer: &mut dyn io::Write, ) -> Result<()> { let paths_query_sql = format!( "SELECT path_id, coalesce(name, cast(path_id AS TEXT)), coalesce(tags_json, '{{}}') FROM gfa1_path {} ORDER BY path_id", where_clause ); let mut paths_query = db.prepare(&paths_query_sql)?; let mut elements_query = db.prepare( "SELECT coalesce(name, cast(segment_id AS TEXT)) AS segment_name, reverse, cigar_vs_previous FROM gfa1_path_element LEFT JOIN gfa1_segment_meta USING(segment_id) WHERE path_id=? ORDER BY path_id, ordinal", )?; let mut paths_cursor = paths_query.query([])?; while let Some(pathrow) = paths_cursor.next()? { let path_id: i64 = pathrow.get(0)?; let name: String = pathrow.get(1)?; let tags_json: String = pathrow.get(2)?; let mut elts_csv = Vec::new(); let mut cigars_csv = Vec::new(); let mut elts_cursor = elements_query.query(params![path_id])?; while let Some(eltrow) = elts_cursor.next()? { let segment_name: String = eltrow.get(0)?; let reverse: i64 = eltrow.get(1)?; let maybe_cigar: Option<String> = eltrow.get(2)?; elts_csv.push(segment_name + if reverse == 0 { "+" } else { "-" }); if let Some(cigar) = maybe_cigar { cigars_csv.push(cigar); } } writer.write_fmt(format_args!( "P\t{}\t{}\t{}", &name, &elts_csv.join(","), if cigars_csv.len() > 0 { cigars_csv.join(",") } else { String::from("*") } ))?; write_tags("gfa1_path", path_id, &tags_json, writer)?; writer.write(b"\n")?; } Ok(()) } pub fn write_walks( db: &rusqlite::Connection, where_clause: &str, writer: &mut dyn io::Write, ) -> Result<()> { let fwd = ">".as_bytes(); let rev = "<".as_bytes(); let mut iter_walk_query = prepare_iter_walk(db)?; let walks_query_sql = format!( "SELECT walk_id, sample, hap_idx, refseq_name, refseq_begin, refseq_end, coalesce(tags_json, '{{}}') FROM gfa1_walk {} ORDER BY sample, refseq_name, hap_idx, refseq_begin", where_clause); let mut walks_query = db.prepare(&walks_query_sql)?; let mut walks_cursor = walks_query.query([])?; while let Some(row) = walks_cursor.next()? { let walk_id: i64 = row.get(0)?; let sample: String = row.get(1)?; let hap_idx: i64 = row.get(2)?; let refseq_name: String = row.get(3)?; let refseq_begin: i64 = row.get(4)?; let refseq_end: i64 = row.get(5)?; let tags_json: String = row.get(6)?; writer.write_fmt(format_args!( "W\t{}\t{}\t{}\t{}\t{}\t", sample, hap_idx, refseq_name, refseq_begin, refseq_end ))?; iter_walk(&mut iter_walk_query, walk_id, |segment_id, reverse| { writer.write(if reverse { rev } else { fwd })?; writer.write(segment_id.to_string().as_bytes())?; Ok(true) })?; write_tags("gfa1_walk", walk_id, &tags_json, writer)?; writer.write(b"\n")?; } Ok(()) } fn write_tags_with_editor( table: &str, rowid: i64, tags_json: &str, mut editor: impl FnMut(i64, &mut json::JsonValue) -> Result<()>, writer: &mut dyn io::Write, ) -> Result<()> { let invalid = || util::Error::InvalidGfab { message: String::from("invalid tags_json"), table: String::from(table), rowid: rowid, }; let mut tags = json::parse(&tags_json).map_err(|_| invalid())?; editor(rowid, &mut tags)?; for (k, v) in tags.entries() { let kfields: Vec<&str> = k.split(':').collect(); if kfields.len()!= 2 { return Err(invalid()); } let vstr = match kfields[1] { "A" | "Z" | "H" => JsonValue::as_str(v).ok_or_else(invalid)?.to_string(), "i" => JsonValue::as_i64(v).ok_or_else(invalid)?.to_string(), "f" => JsonValue::as_f64(v).ok_or_else(invalid)?.to_string(), // TODO: B & J _ => return Err(invalid()), }; writer.write_fmt(format_args!("\t{}:{}", k, vstr))?; } Ok(()) } fn write_tags(table: &str, rowid: i64, tags_json: &str, writer: &mut dyn io::Write) -> Result<()> { write_tags_with_editor(table, rowid, tags_json, |_, _| Ok(()), writer) } // Helpers roughly guessing a genomic range for a segment based on its PAF mappings. Selects the // chromosome with the most coverage in the mappings, then the min and max mapped position on that // chromosome. pub struct SegmentRangeGuesser<'a> { getter: rusqlite::Statement<'a>, csv_query: rusqlite::Statement<'a>, } impl<'a> SegmentRangeGuesser<'_> { pub fn new( db: &'a rusqlite::Connection, where_clause: &str, ) -> Result<SegmentRangeGuesser<'a>> { // analyze mappings to generate temp.segment_range_guess db.execute( "CREATE TABLE temp.segment_range_guess( segment_id INTEGER PRIMARY KEY, refseq_name TEXT NOT NULL, refseq_begin INTEGER NOT NULL, refseq_end INTEGER NOT NULL)", [], )?; let sql = format!( "WITH summary AS (SELECT segment_id, refseq_name, min(refseq_begin) AS min_begin, max(refseq_end) AS max_end, max(refseq_end) - min(refseq_begin) AS coverage, sum(refseq_end - refseq_begin) AS coverage2 FROM gfa1_segment_mapping {} GROUP BY segment_id, refseq_name) INSERT INTO temp.segment_range_guess(segment_id, refseq_name, refseq_begin, refseq_end) SELECT segment_id, refseq_name, min_begin, max_end FROM (SELECT segment_id, refseq_name, min_begin, max_end, row_number() OVER (PARTITION BY segment_id ORDER BY coverage DESC, coverage2 DESC) AS coverage_rank FROM summary) WHERE coverage_rank = 1", where_clause ); let n = db.execute(&sql, [])?; info!("guessed ranges for {} segments", n); // prepare queries on temp.segment_range_guess Ok(SegmentRangeGuesser { getter: db.prepare( "SELECT refseq_name, refseq_begin, refseq_end FROM temp.segment_range_guess WHERE segment_id =?", )?, csv_query: db.prepare( "SELECT coalesce(name, cast(segment_id AS TEXT)), refseq_name, refseq_begin, refseq_end FROM temp.segment_range_guess LEFT JOIN gfa1_segment_meta USING(segment_id)", )?, }) } pub fn get(&mut self, segment_id: i64) -> Result<Option<String>> { let maybe_row: Option<(String, i64, i64)> = self .getter .query_row(params![segment_id], |row| { Ok((row.get(0)?, row.get(1)?, row.get(2)?)) }) .optional()?; if let Some((refseq_name, refseq_begin, refseq_end)) = maybe_row { return Ok(Some(format!( "~{}:{}-{}", refseq_name, (refseq_begin + 1).to_formatted_string(&Locale::en), refseq_end.to_formatted_string(&Locale::en) ))); } Ok(None) } pub fn write_bandage_csv(&mut self, gfa_filename: &str) -> Result<()> { // write a CSV file with the guessed ranges that Bandage can show as labels let csv_filename = String::from(gfa_filename.strip_suffix(".gfa").unwrap_or(gfa_filename)) + ".guessed_ranges.csv"; { let mut writer = io::BufWriter::new(fs::File::create(&csv_filename)?); writer.write_fmt(format_args!("Name,Guessed range\n"))?; let mut cursor = self.csv_query.query([])?; while let Some(row) = cursor.next()? { let name: String = row.get(0)?; let refseq_name: String = row.get(1)?; let refseq_begin: i64 = row.get(2)?; let refseq_end: i64 = row.get(3)?; writer.write_fmt(format_args!( "\"{}\",\"~{}:{}-{}\"\n",
write_header
identifier_name
view.rs
NO_MUTEX, &dbopts, )?; { let txn = db.transaction()?; let mut maybe_guesser = if opts.guess_ranges { Some(SegmentRangeGuesser::new(&txn, "")?) } else { None }; let mut tag_editor = |segment_id: i64, tags: &mut json::JsonValue| -> Result<()> { if let Some(ref mut guesser) = maybe_guesser { if let Some(gr) = guesser.get(segment_id)? { tags.insert("gr:Z", gr).unwrap() } } Ok(()) }; if opts.output_gfa == "-" &&!opts.bandage && atty::is(atty::Stream::Stdout) { // interactive mode: pipe into less -S less(|less_in| { write_header(&txn, less_in) .and_then(|_| { write_segments(&txn, "",!opts.no_sequences, &mut tag_editor, less_in) }) .and_then(|_| write_links(&txn, "", less_in)) .and_then(|_| write_paths(&txn, "", less_in)) })? } else { let mut output_gfa = String::from(&opts.output_gfa); if opts.bandage && output_gfa == "-" { output_gfa = bandage_temp_filename()? } { let mut writer_box = writer(&output_gfa)?; let out = &mut *writer_box; write_header(&txn, out)?; write_segments(&txn, "",!opts.no_sequences, &mut tag_editor, out)?; write_links(&txn, "", out)?; write_paths(&txn, "", out)?; write_walks(&txn, "", out)? } if opts.bandage { if let Some(ref mut guesser) = maybe_guesser { guesser.write_bandage_csv(&output_gfa)? } bandage(&output_gfa)? } } } Ok(()) } pub fn writer(gfa_filename: &str) -> Result<Box<dyn io::Write>> { if gfa_filename.is_empty() || gfa_filename == "-"
if!gfa_filename.ends_with(".gfa") { warn!("output filename should end with.gfa") } Ok(Box::new(io::BufWriter::new(fs::File::create( gfa_filename, )?))) } /// Start `less -S` and call `write` with its standard input pipe. /// Tolerate BrokenPipe errors (user exited before viewing all data) pub fn less<F>(write: F) -> Result<()> where F: FnOnce(&mut dyn io::Write) -> Result<()>, { if which::which("less").is_err() { return write(&mut io::stdout()); } let mut child = process::Command::new("less") .arg("-S") .stdin(process::Stdio::piped()) .spawn()?; { let mut less_in = child.stdin.take().unwrap(); match write(&mut less_in) { Ok(()) => (), Err(util::Error::IoError(err)) if err.kind() == io::ErrorKind::BrokenPipe => (), Err(e) => return Err(e), } } child.wait()?; Ok(()) } pub fn bandage(gfa: &str) -> Result<()> { info!("Bandage load {} --draw", gfa); if process::Command::new("Bandage") .arg("load") .arg(gfa) .arg("--draw") .spawn() .is_err() { bad_command!("failed to launch Bandage; make sure it's in PATH") } Ok(()) } pub fn bandage_temp_filename() -> Result<String> { if!atty::is(atty::Stream::Stdout) { bad_command!("supply -o filename.gfa on which to launch Bandage") } Ok(String::from( path::Path::new(&env::var("TMPDIR").unwrap_or(String::from("/tmp"))) .join(format!( "gfabase-bandage-{}.gfa", chrono::Local::now().to_rfc3339_opts(chrono::SecondsFormat::Micros, true) )) .to_str() .unwrap(), )) } pub fn write_header(db: &rusqlite::Connection, writer: &mut dyn io::Write) -> Result<()> { let tags_json: String = db.query_row( "SELECT tags_json FROM gfa1_header WHERE _rowid_ = 1", [], |row| row.get(0), )?; writer.write(b"H")?; write_tags("gfa1_header", 1, &tags_json, writer)?; writer.write(b"\n")?; Ok(()) } pub fn write_segments( db: &rusqlite::Connection, where_clause: &str, with_sequences: bool, mut tag_editor: impl FnMut(i64, &mut json::JsonValue) -> Result<()>, writer: &mut dyn io::Write, ) -> Result<()> { let segments_query_sql = String::from(if with_sequences { "SELECT segment_id, coalesce(name, cast(segment_id AS TEXT)), sequence_length, coalesce(tags_json, '{}'), sequence FROM gfa1_segment " } else { "SELECT segment_id, coalesce(name, cast(segment_id AS TEXT)), sequence_length, coalesce(tags_json, '{}') FROM gfa1_segment_meta " }) + where_clause; let mut segments_query = db.prepare(&segments_query_sql)?; let mut segments_cursor = segments_query.query([])?; while let Some(segrow) = segments_cursor.next()? { let rowid: i64 = segrow.get(0)?; let name: String = segrow.get(1)?; let maybe_sequence_length: Option<i64> = segrow.get(2)?; let tags_json: String = segrow.get(3)?; writer.write_fmt(format_args!("S\t{}\t", name))?; if with_sequences { match segrow.get_ref(4)? { ValueRef::Text(sequence) => writer.write(sequence)?, ValueRef::Null => writer.write(b"*")?, _ => { return Err(util::Error::InvalidGfab { message: String::from("segment row has invalid sequence value type"), table: String::from("gfa1_segment_sequence"), rowid: rowid, }) } }; } else { writer.write(b"*")?; } if let Some(sequence_length) = maybe_sequence_length { writer.write_fmt(format_args!("\tLN:i:{}", sequence_length))?; } write_tags_with_editor( "gfa1_segments_meta", rowid, &tags_json, &mut tag_editor, writer, )?; writer.write(b"\n")?; } Ok(()) } pub fn write_links( db: &rusqlite::Connection, where_clause: &str, writer: &mut dyn io::Write, ) -> Result<()> { let links_query_sql = format!( // this two-layer join resolves the two segment IDs to names (if any) "SELECT link_id, from_segment_name, from_reverse, coalesce(gfa1_segment_meta.name, cast(to_segment AS TEXT)) AS to_segment_name, to_reverse, cigar, link_tags_json FROM (SELECT gfa1_link._rowid_ AS link_id, coalesce(gfa1_segment_meta.name, cast(from_segment AS TEXT)) AS from_segment_name, from_reverse, to_segment, to_reverse, coalesce(cigar, '*') AS cigar, coalesce(gfa1_link.tags_json, '{{}}') AS link_tags_json FROM gfa1_link LEFT JOIN gfa1_segment_meta ON from_segment = segment_id {} ORDER BY from_segment, to_segment) LEFT JOIN gfa1_segment_meta ON to_segment = segment_id", where_clause ); let mut links_query = db.prepare(&links_query_sql)?; let mut links_cursor = links_query.query([])?; while let Some(linkrow) = links_cursor.next()? { let link_id: i64 = linkrow.get(0)?; let from_segment: String = linkrow.get(1)?; let from_reverse: i8 = linkrow.get(2)?; let to_segment: String = linkrow.get(3)?; let to_reverse: i8 = linkrow.get(4)?; let cigar: String = linkrow.get(5)?; let tags_json: String = linkrow.get(6)?; writer.write_fmt(format_args!( "L\t{}\t{}\t{}\t{}\t{}", from_segment, if from_reverse == 0 { '+' } else { '-' }, to_segment, if to_reverse == 0 { '+' } else { '-' }, cigar ))?; write_tags("gfa1_link", link_id, &tags_json, writer)?; writer.write(b"\n")?; } Ok(()) } pub fn write_paths( db: &rusqlite::Connection, where_clause: &str, writer: &mut dyn io::Write, ) -> Result<()> { let paths_query_sql = format!( "SELECT path_id, coalesce(name, cast(path_id AS TEXT)), coalesce(tags_json, '{{}}') FROM gfa1_path {} ORDER BY path_id", where_clause ); let mut paths_query = db.prepare(&paths_query_sql)?; let mut elements_query = db.prepare( "SELECT coalesce(name, cast(segment_id AS TEXT)) AS segment_name, reverse, cigar_vs_previous FROM gfa1_path_element LEFT JOIN gfa1_segment_meta USING(segment_id) WHERE path_id=? ORDER BY path_id, ordinal", )?; let mut paths_cursor = paths_query.query([])?; while let Some(pathrow) = paths_cursor.next()? { let path_id: i64 = pathrow.get(0)?; let name: String = pathrow.get(1)?; let tags_json: String = pathrow.get(2)?; let mut elts_csv = Vec::new(); let mut cigars_csv = Vec::new(); let mut elts_cursor = elements_query.query(params![path_id])?; while let Some(eltrow) = elts_cursor.next()? { let segment_name: String = eltrow.get(0)?; let reverse: i64 = eltrow.get(1)?; let maybe_cigar: Option<String> = eltrow.get(2)?; elts_csv.push(segment_name + if reverse == 0 { "+" } else { "-" }); if let Some(cigar) = maybe_cigar { cigars_csv.push(cigar); } } writer.write_fmt(format_args!( "P\t{}\t{}\t{}", &name, &elts_csv.join(","), if cigars_csv.len() > 0 { cigars_csv.join(",") } else { String::from("*") } ))?; write_tags("gfa1_path", path_id, &tags_json, writer)?; writer.write(b"\n")?; } Ok(()) } pub fn write_walks( db: &rusqlite::Connection, where_clause: &str, writer: &mut dyn io::Write, ) -> Result<()> { let fwd = ">".as_bytes(); let rev = "<".as_bytes(); let mut iter_walk_query = prepare_iter_walk(db)?; let walks_query_sql = format!( "SELECT walk_id, sample, hap_idx, refseq_name, refseq_begin, refseq_end, coalesce(tags_json, '{{}}') FROM gfa1_walk {} ORDER BY sample, refseq_name, hap_idx, refseq_begin", where_clause); let mut walks_query = db.prepare(&walks_query_sql)?; let mut walks_cursor = walks_query.query([])?; while let Some(row) = walks_cursor.next()? { let walk_id: i64 = row.get(0)?; let sample: String = row.get(1)?; let hap_idx: i64 = row.get(2)?; let refseq_name: String = row.get(3)?; let refseq_begin: i64 = row.get(4)?; let refseq_end: i64 = row.get(5)?; let tags_json: String = row.get(6)?; writer.write_fmt(format_args!( "W\t{}\t{}\t{}\t{}\t{}\t", sample, hap_idx, refseq_name, refseq_begin, refseq_end ))?; iter_walk(&mut iter_walk_query, walk_id, |segment_id, reverse| { writer.write(if reverse { rev } else { fwd })?; writer.write(segment_id.to_string().as_bytes())?; Ok(true) })?; write_tags("gfa1_walk", walk_id, &tags_json, writer)?; writer.write(b"\n")?; } Ok(()) } fn write_tags_with_editor( table: &str, rowid: i64, tags_json: &str, mut editor: impl FnMut(i64, &mut json::JsonValue) -> Result<()>, writer: &mut dyn io::Write, ) -> Result<()> { let invalid = || util::Error::InvalidGfab { message: String::from("invalid tags_json"), table: String::from(table), rowid: rowid, }; let mut tags = json::parse(&tags_json).map_err(|_| invalid())?; editor(rowid, &mut tags)?; for (k, v) in tags.entries() { let kfields: Vec<&str> = k.split(':').collect(); if kfields.len()!= 2 { return Err(invalid()); } let vstr = match kfields[1] { "A" | "Z" | "H" => JsonValue::as_str(v).ok_or_else(invalid)?.to_string(), "i" => JsonValue::as_i64(v).ok_or_else(invalid)?.to_string(), "f" => JsonValue::as_f64(v).ok_or_else(invalid)?.to_string(), // TODO: B & J _ => return Err(invalid()), }; writer.write_fmt(format_args!("\t{}:{}", k, vstr))?; } Ok(()) } fn write_tags(table: &str, rowid: i64, tags_json: &str, writer: &mut dyn io::Write) -> Result<()> { write_tags_with_editor(table, rowid, tags_json, |_, _| Ok(()), writer) } // Helpers roughly guessing a genomic range for a segment based on its PAF mappings. Selects the // chromosome with the most coverage in the mappings, then the min and max mapped position on that // chromosome. pub struct SegmentRangeGuesser<'a> { getter: rusqlite::Statement<'a>, csv_query: rusqlite::Statement<'a>, } impl<'a> SegmentRangeGuesser<'_> { pub fn new( db: &'a rusqlite::Connection, where_clause: &str, ) -> Result<SegmentRangeGuesser<'a>> { // analyze mappings to generate temp.segment_range_guess db.execute( "CREATE TABLE temp.segment_range_guess( segment_id INTEGER PRIMARY KEY, refseq_name TEXT NOT NULL, refseq_begin INTEGER NOT NULL, refseq_end INTEGER NOT NULL)", [], )?; let sql = format!( "WITH summary AS (SELECT segment_id, refseq_name, min(refseq_begin) AS min_begin, max(refseq_end) AS max_end, max(refseq_end) - min(refseq_begin) AS coverage, sum(refseq_end - refseq_begin) AS coverage2 FROM gfa1_segment_mapping {} GROUP BY segment_id, refseq_name) INSERT INTO temp.segment_range_guess(segment_id, refseq_name, refseq_begin, refseq_end) SELECT segment_id, refseq_name, min_begin, max_end FROM (SELECT segment_id, refseq_name, min_begin, max_end, row_number() OVER (PARTITION BY segment_id ORDER BY coverage DESC, coverage2 DESC) AS coverage_rank FROM summary) WHERE coverage_rank = 1", where_clause ); let n = db.execute(&sql, [])?; info!("guessed ranges for {} segments", n); // prepare queries on temp.segment_range_guess Ok(SegmentRangeGuesser { getter: db.prepare( "SELECT refseq_name, refseq_begin, refseq_end FROM temp.segment_range_guess WHERE segment_id =?", )?, csv_query: db.prepare( "SELECT coalesce(name, cast(segment_id AS TEXT)), refseq_name, refseq_begin, refseq_end FROM temp.segment_range_guess LEFT JOIN gfa1_segment_meta USING(segment_id)", )?, }) } pub fn get(&mut self, segment_id: i64) -> Result<Option<String>> { let maybe_row: Option<(String, i64, i64)> = self .getter .query_row(params![segment_id], |row| { Ok((row.get(0)?, row.get(1)?, row.get(2)?)) }) .optional()?; if let Some((refseq_name, refseq_begin, refseq_end)) = maybe_row { return Ok(Some(format!( "~{}:{}-{}", refseq_name, (refseq_begin + 1).to_formatted_string(&Locale::en), refseq_end.to_formatted_string(&Locale::en) ))); } Ok(None) } pub fn write_bandage_csv(&mut self, gfa_filename: &str) -> Result<()> { // write a CSV file with the guessed ranges that Bandage can show as labels let csv_filename = String::from(gfa_filename.strip_suffix(".gfa").unwrap_or(gfa_filename)) + ".guessed_ranges.csv"; { let mut writer = io::BufWriter::new(fs::File::create(&csv_filename)?); writer.write_fmt(format_args!("Name,Guessed range\n"))?; let mut cursor = self.csv_query.query([])?; while let Some(row) = cursor.next()? { let name: String = row.get(0)?; let refseq_name: String = row.get(1)?; let refseq_begin: i64 = row.get(2)?; let refseq_end: i64 = row.get(3)?; writer.write_fmt(format_args!( "\"{}\",\"~{}:{}-{}\"\n",
{ return Ok(Box::new(io::BufWriter::new(io::stdout()))); }
conditional_block
main.rs
use std::fs::File; use std::io::Write; use std::str; use std::collections::HashMap; use std::mem; use std::fmt; use std::thread; use std::sync::mpsc; use std::sync::{Mutex, Arc}; use std::time::Duration; use std::io::Read; use std::collections::HashSet; extern crate bio; extern crate clap; extern crate flate2; extern crate log; extern crate simple_logger; use clap::{Arg, App}; use bio::alignment::pairwise::Aligner; use bio::alignment::Alignment; use bio::io::fastq; use bio::alphabets::dna::revcomp; use log::{info}; use flate2::read::GzDecoder; const SEQ_NT4_TABLE: [u64; 256] = [ 0, 1, 2, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 ]; const IDX_TABLE: [u8; 4] = [ b'A', b'C', b'G', b'T' ]; fn
(seq: &[u8]) -> Result<u64, &str> { let mut res: u64 = 0; let mut res_rc: u64 = 0; let end = seq.len() - 1; for i in 0..seq.len() { if i >= 32 { return Err("Seq can't longer than 32.") } let m = SEQ_NT4_TABLE[seq[i] as usize]; res |= m << i*2; res_rc |= (3 - m) << (end - i)*2; } if res > res_rc { mem::swap(&mut res, &mut res_rc) }; Ok(res) } fn recover_seq(code: u64, k: u8) -> String { let mut chars: Vec<u8> = Vec::with_capacity(k as usize); for i in 0..k { let mask: u64 = 3 << (i*2); let idx = (code & mask) >> (i*2); let b = IDX_TABLE[idx as usize]; chars.push(b); } String::from_utf8(chars).unwrap() } enum ExtractRes { Ok(String, String), ScoreTooLow, LeftTooShort, RightTooShort, } fn extract_pet(seq: &[u8], pattern: &[u8], flanking: u8, score_ratio_thresh: f32) -> (ExtractRes, Alignment) { // align linker to read let score = |a: u8, b: u8| if a == b {1i32} else {-1i32}; let mut aligner = Aligner::with_capacity(seq.len(), pattern.len(), -1, -1, score); let alignment = aligner.semiglobal(pattern, seq); // filter out non matched reads if (alignment.score as f32) < pattern.len() as f32 * score_ratio_thresh { return (ExtractRes::ScoreTooLow, alignment) } // filter out incomplete flanking if (alignment.ystart as u8) < flanking { return (ExtractRes::LeftTooShort, alignment) } let s = alignment.ystart - flanking as usize; let left = String::from_utf8(seq[s..alignment.ystart].to_vec()).unwrap(); let e = alignment.yend + flanking as usize; if e > alignment.ylen { return (ExtractRes::RightTooShort, alignment) } let right = String::from_utf8(seq[alignment.yend..e].to_vec()).unwrap(); (ExtractRes::Ok(left, right), alignment) } struct ResCounter { linker_reads: u64, score_too_low: u64, left_too_short: u64, right_too_short: u64, } impl ResCounter { fn new() -> Self { Self { linker_reads: 0, score_too_low: 0, left_too_short: 0, right_too_short: 0, } } fn count(&mut self, res: &ExtractRes) { match res{ ExtractRes::Ok(_, _) =>{ self.linker_reads += 1 }, ExtractRes::ScoreTooLow =>{ self.score_too_low += 1 }, ExtractRes::LeftTooShort =>{ self.left_too_short += 1 }, ExtractRes::RightTooShort =>{ self.right_too_short += 1 }, } } } impl fmt::Display for ResCounter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let total = self.linker_reads + self.score_too_low + self.left_too_short + self.right_too_short; let ratio = |c| { if total == 0 { return format!("0%"); } format!("{:.2}%", ((c*100) as f64) / (total as f64)) }; write!(f, "Count result: linker reads\t{}\t{} score too low\t{}\t{} left too short\t{}\t{} right too short\t{}\t{} total reads: {}\n", self.linker_reads, ratio(self.linker_reads), self.score_too_low, ratio(self.score_too_low), self.left_too_short, ratio(self.left_too_short), self.right_too_short, ratio(self.right_too_short), total, ) } } fn main() { simple_logger::init().unwrap(); let matches = App::new("Extract and counting seq pairs.") .arg(Arg::with_name("fq") .required(true) .help("Fastq file of reads 1.")) .arg(Arg::with_name("linker") .short("l") .long("linker") .required(true) .takes_value(true) .help("The linker sequence(Not incluede enzyme).")) .arg(Arg::with_name("enzyme") .short("e") .long("enzyme") .required(true) .takes_value(true) .help("Enzyme recognize site.") ) .arg(Arg::with_name("output_prefix") .short("o") .long("output_prefix") .required(true) .takes_value(true) .help("Prefix of output files.")) .arg(Arg::with_name("flanking") .short("f") .long("flanking") .takes_value(true) .help("Flanking length.")) .arg(Arg::with_name("score_ratio_thresh") .short("s") .long("score_ratio_thresh") .takes_value(true) .help("Threshold of (align score / pattern length)")) .arg(Arg::with_name("align_detail") .short("d") .long("detail") .takes_value(true) .help("Output the align detail.")) .arg(Arg::with_name("threads") .short("t") .long("threads") .takes_value(true) .help("Number of threads used for processing reads.")) .arg(Arg::with_name("wait_timeout") .long("wait_timeout") .takes_value(true) .help("Wait time for end channel timeout.")) .get_matches(); let fq_path = matches.value_of("fq").unwrap(); let out_prefix = matches.value_of("output_prefix").unwrap(); let linker = matches.value_of("linker").unwrap(); let enzyme = matches.value_of("enzyme").unwrap_or("GTTGGA"); let flanking = matches.value_of("flanking").unwrap_or("13"); let flanking: u8 = flanking.parse().unwrap(); let score_ratio_thresh = matches.value_of("score_ratio_thresh").unwrap_or("0.6"); let score_ratio_thresh: f32 = score_ratio_thresh.parse().unwrap(); let threads = matches.value_of("threads").unwrap_or("1"); let threads: u8 = threads.parse().unwrap(); let wait_t = matches.value_of("wait_timeout").unwrap_or("500"); let wait_t: u64 = wait_t.parse().unwrap(); let mut detail_file = match matches.value_of("align_detail") { Some(p) => Some(File::create(p).unwrap()), None => None, }; let fq_file: Box<dyn Read + Send + Sync> = if fq_path.ends_with(".gz") { Box::new(GzDecoder::new(File::open(fq_path).unwrap())) } else { Box::new(File::open(fq_path).unwrap()) }; let fq = fastq::Reader::new(fq_file); let records = fq.records(); let mut freq: HashMap<(u64, u64), u64> = HashMap::new(); let l_vec = linker.as_bytes().to_vec(); let e_vec = enzyme.as_bytes().to_vec(); let e_rc = revcomp(&e_vec); let l_rc = revcomp(&l_vec); let patterns = [ [&e_vec[..], &l_vec[..], &e_rc].concat(), [&e_vec[..], &l_rc[..], &e_rc].concat(), ]; info!("patterns:\n {}\n {}", str::from_utf8(&patterns[0]).unwrap(), str::from_utf8(&patterns[1]).unwrap(), ); let mut counter = ResCounter::new(); let records = Arc::new(Mutex::new(records)); let patterns = Arc::new(patterns); let mut handles = vec![]; let (tx, rx) = mpsc::channel(); info!("Run with {} threads.", threads); for _ in 0..threads { let records = Arc::clone(&records); let patterns = Arc::clone(&patterns); let tx1 = mpsc::Sender::clone(&tx); let handle = thread::spawn(move || { loop { // read seq from fq file let rec = { let mut records = records.lock().unwrap(); match records.next() { Some(r) => match r { Ok(r_) => r_, Err(e) => panic!("{:?}", e), }, None => break } }; let seq = String::from_utf8(rec.seq().to_vec()).unwrap(); let mut align_res: Vec<(ExtractRes, Alignment)> = Vec::with_capacity(2); for pattern in patterns.iter() { align_res.push(extract_pet(seq.as_bytes(), &pattern, flanking, score_ratio_thresh)); let res = &align_res[align_res.len()-1].0; match res { ExtractRes::Ok(_, _) => { break }, _ => { continue }, } } let rec_id = String::from(rec.id()); tx1.send((align_res, rec_id)).unwrap(); } }); handles.push(handle); } loop { match rx.recv_timeout(Duration::from_millis(wait_t)) { Ok((align_res, rec_id)) => { let res = &align_res[align_res.len()-1]; let alignment = &res.1; if let Some(mut f) = detail_file { // write align detail let _ = writeln!(f, "{}\t{}\t{}\t{}\t{}", rec_id, align_res.len(), alignment.score, alignment.ystart, alignment.yend, ); detail_file = Some(f); } // count left-right pair if let ExtractRes::Ok(left, right) = &res.0 { let mut key: (u64, u64) = (compress_seq(left.as_bytes()).unwrap(), compress_seq(right.as_bytes()).unwrap()); if key.0 > key.1 { mem::swap(&mut key.0, &mut key.1) }; *freq.entry(key).or_insert(0) += 1; } counter.count(&res.0); }, _ => { info!("End processing."); break; } } } for handle in handles { // wait all threads fishish handle.join().unwrap(); } let cnt_path = format!("{}.cnt", out_prefix); let fq_out_path = format!("{}.cnt.fq", out_prefix); let mut cnt_file = File::create(cnt_path.clone()).unwrap(); let fq_out_file = File::create(fq_out_path.clone()).unwrap(); let mut fq_out = fastq::Writer::new(fq_out_file); let mut key_set = HashSet::new(); let mut kv_vec = vec![]; for (k, v) in &freq { key_set.insert(k.0); key_set.insert(k.1); kv_vec.push((k.0, k.1, v)); } info!("Totally {} kinds of pairs and {} kinds of sequences were founded.", freq.len(), key_set.len()); kv_vec.sort_by(|a, b| b.2.cmp(a.2)); info!("Write pair counts to tsv file: {}", cnt_path); for (k0, k1, v) in kv_vec { let _ = writeln!(cnt_file, "{}\t{}\t{}", k0, k1, v); } info!("Write sequences to fastq file: {}", fq_out_path); let mut key_vec = key_set.into_iter().collect::<Vec<u64>>(); key_vec.sort(); for k in key_vec { let seq = recover_seq(k, flanking); let id = format!("{}", k); let qual = vec![b'~'; seq.len()]; let _ = fq_out.write( &id, Option::None, seq.as_bytes(), &qual, ); } info!("{}", counter); }
compress_seq
identifier_name
main.rs
use std::fs::File; use std::io::Write; use std::str; use std::collections::HashMap; use std::mem; use std::fmt; use std::thread; use std::sync::mpsc; use std::sync::{Mutex, Arc}; use std::time::Duration; use std::io::Read; use std::collections::HashSet; extern crate bio; extern crate clap; extern crate flate2; extern crate log; extern crate simple_logger; use clap::{Arg, App}; use bio::alignment::pairwise::Aligner; use bio::alignment::Alignment; use bio::io::fastq; use bio::alphabets::dna::revcomp; use log::{info}; use flate2::read::GzDecoder; const SEQ_NT4_TABLE: [u64; 256] = [ 0, 1, 2, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 ]; const IDX_TABLE: [u8; 4] = [ b'A', b'C', b'G', b'T' ]; fn compress_seq(seq: &[u8]) -> Result<u64, &str> { let mut res: u64 = 0; let mut res_rc: u64 = 0; let end = seq.len() - 1; for i in 0..seq.len() { if i >= 32 { return Err("Seq can't longer than 32.") } let m = SEQ_NT4_TABLE[seq[i] as usize]; res |= m << i*2; res_rc |= (3 - m) << (end - i)*2; } if res > res_rc { mem::swap(&mut res, &mut res_rc) }; Ok(res) } fn recover_seq(code: u64, k: u8) -> String { let mut chars: Vec<u8> = Vec::with_capacity(k as usize); for i in 0..k { let mask: u64 = 3 << (i*2); let idx = (code & mask) >> (i*2); let b = IDX_TABLE[idx as usize]; chars.push(b); } String::from_utf8(chars).unwrap() } enum ExtractRes { Ok(String, String), ScoreTooLow, LeftTooShort, RightTooShort, } fn extract_pet(seq: &[u8], pattern: &[u8], flanking: u8, score_ratio_thresh: f32) -> (ExtractRes, Alignment) { // align linker to read let score = |a: u8, b: u8| if a == b {1i32} else {-1i32}; let mut aligner = Aligner::with_capacity(seq.len(), pattern.len(), -1, -1, score); let alignment = aligner.semiglobal(pattern, seq); // filter out non matched reads if (alignment.score as f32) < pattern.len() as f32 * score_ratio_thresh { return (ExtractRes::ScoreTooLow, alignment) } // filter out incomplete flanking if (alignment.ystart as u8) < flanking { return (ExtractRes::LeftTooShort, alignment) } let s = alignment.ystart - flanking as usize; let left = String::from_utf8(seq[s..alignment.ystart].to_vec()).unwrap(); let e = alignment.yend + flanking as usize; if e > alignment.ylen { return (ExtractRes::RightTooShort, alignment) } let right = String::from_utf8(seq[alignment.yend..e].to_vec()).unwrap(); (ExtractRes::Ok(left, right), alignment) } struct ResCounter { linker_reads: u64, score_too_low: u64, left_too_short: u64, right_too_short: u64, } impl ResCounter { fn new() -> Self { Self { linker_reads: 0, score_too_low: 0, left_too_short: 0, right_too_short: 0, } } fn count(&mut self, res: &ExtractRes) { match res{ ExtractRes::Ok(_, _) =>{ self.linker_reads += 1 }, ExtractRes::ScoreTooLow =>{ self.score_too_low += 1 }, ExtractRes::LeftTooShort =>{ self.left_too_short += 1 }, ExtractRes::RightTooShort =>{ self.right_too_short += 1 }, } } } impl fmt::Display for ResCounter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let total = self.linker_reads + self.score_too_low + self.left_too_short + self.right_too_short; let ratio = |c| { if total == 0 { return format!("0%"); } format!("{:.2}%", ((c*100) as f64) / (total as f64)) }; write!(f, "Count result: linker reads\t{}\t{} score too low\t{}\t{} left too short\t{}\t{} right too short\t{}\t{} total reads: {}\n", self.linker_reads, ratio(self.linker_reads), self.score_too_low, ratio(self.score_too_low), self.left_too_short, ratio(self.left_too_short), self.right_too_short, ratio(self.right_too_short), total, ) } } fn main() { simple_logger::init().unwrap(); let matches = App::new("Extract and counting seq pairs.") .arg(Arg::with_name("fq") .required(true) .help("Fastq file of reads 1.")) .arg(Arg::with_name("linker") .short("l") .long("linker") .required(true) .takes_value(true) .help("The linker sequence(Not incluede enzyme).")) .arg(Arg::with_name("enzyme") .short("e") .long("enzyme") .required(true) .takes_value(true) .help("Enzyme recognize site.") ) .arg(Arg::with_name("output_prefix") .short("o") .long("output_prefix") .required(true) .takes_value(true) .help("Prefix of output files.")) .arg(Arg::with_name("flanking") .short("f") .long("flanking") .takes_value(true) .help("Flanking length.")) .arg(Arg::with_name("score_ratio_thresh") .short("s") .long("score_ratio_thresh") .takes_value(true) .help("Threshold of (align score / pattern length)")) .arg(Arg::with_name("align_detail") .short("d") .long("detail") .takes_value(true) .help("Output the align detail.")) .arg(Arg::with_name("threads") .short("t") .long("threads") .takes_value(true) .help("Number of threads used for processing reads.")) .arg(Arg::with_name("wait_timeout") .long("wait_timeout") .takes_value(true) .help("Wait time for end channel timeout.")) .get_matches(); let fq_path = matches.value_of("fq").unwrap(); let out_prefix = matches.value_of("output_prefix").unwrap(); let linker = matches.value_of("linker").unwrap(); let enzyme = matches.value_of("enzyme").unwrap_or("GTTGGA"); let flanking = matches.value_of("flanking").unwrap_or("13"); let flanking: u8 = flanking.parse().unwrap(); let score_ratio_thresh = matches.value_of("score_ratio_thresh").unwrap_or("0.6"); let score_ratio_thresh: f32 = score_ratio_thresh.parse().unwrap(); let threads = matches.value_of("threads").unwrap_or("1"); let threads: u8 = threads.parse().unwrap(); let wait_t = matches.value_of("wait_timeout").unwrap_or("500"); let wait_t: u64 = wait_t.parse().unwrap(); let mut detail_file = match matches.value_of("align_detail") { Some(p) => Some(File::create(p).unwrap()), None => None, }; let fq_file: Box<dyn Read + Send + Sync> = if fq_path.ends_with(".gz") { Box::new(GzDecoder::new(File::open(fq_path).unwrap())) } else { Box::new(File::open(fq_path).unwrap()) }; let fq = fastq::Reader::new(fq_file); let records = fq.records(); let mut freq: HashMap<(u64, u64), u64> = HashMap::new(); let l_vec = linker.as_bytes().to_vec(); let e_vec = enzyme.as_bytes().to_vec(); let e_rc = revcomp(&e_vec); let l_rc = revcomp(&l_vec); let patterns = [ [&e_vec[..], &l_vec[..], &e_rc].concat(), [&e_vec[..], &l_rc[..], &e_rc].concat(), ]; info!("patterns:\n {}\n {}", str::from_utf8(&patterns[0]).unwrap(), str::from_utf8(&patterns[1]).unwrap(), ); let mut counter = ResCounter::new(); let records = Arc::new(Mutex::new(records)); let patterns = Arc::new(patterns); let mut handles = vec![]; let (tx, rx) = mpsc::channel(); info!("Run with {} threads.", threads); for _ in 0..threads { let records = Arc::clone(&records); let patterns = Arc::clone(&patterns); let tx1 = mpsc::Sender::clone(&tx); let handle = thread::spawn(move || { loop { // read seq from fq file let rec = { let mut records = records.lock().unwrap(); match records.next() { Some(r) => match r { Ok(r_) => r_, Err(e) => panic!("{:?}", e), }, None => break } }; let seq = String::from_utf8(rec.seq().to_vec()).unwrap(); let mut align_res: Vec<(ExtractRes, Alignment)> = Vec::with_capacity(2); for pattern in patterns.iter() { align_res.push(extract_pet(seq.as_bytes(), &pattern, flanking, score_ratio_thresh)); let res = &align_res[align_res.len()-1].0; match res { ExtractRes::Ok(_, _) => { break }, _ => { continue }, } } let rec_id = String::from(rec.id()); tx1.send((align_res, rec_id)).unwrap(); } }); handles.push(handle); } loop { match rx.recv_timeout(Duration::from_millis(wait_t)) { Ok((align_res, rec_id)) => { let res = &align_res[align_res.len()-1]; let alignment = &res.1; if let Some(mut f) = detail_file { // write align detail let _ = writeln!(f, "{}\t{}\t{}\t{}\t{}", rec_id, align_res.len(), alignment.score, alignment.ystart, alignment.yend, ); detail_file = Some(f); } // count left-right pair if let ExtractRes::Ok(left, right) = &res.0 { let mut key: (u64, u64) = (compress_seq(left.as_bytes()).unwrap(), compress_seq(right.as_bytes()).unwrap()); if key.0 > key.1 { mem::swap(&mut key.0, &mut key.1) }; *freq.entry(key).or_insert(0) += 1; } counter.count(&res.0); }, _ => { info!("End processing."); break; } } } for handle in handles { // wait all threads fishish handle.join().unwrap(); } let cnt_path = format!("{}.cnt", out_prefix); let fq_out_path = format!("{}.cnt.fq", out_prefix); let mut cnt_file = File::create(cnt_path.clone()).unwrap(); let fq_out_file = File::create(fq_out_path.clone()).unwrap(); let mut fq_out = fastq::Writer::new(fq_out_file); let mut key_set = HashSet::new(); let mut kv_vec = vec![]; for (k, v) in &freq { key_set.insert(k.0); key_set.insert(k.1); kv_vec.push((k.0, k.1, v)); } info!("Totally {} kinds of pairs and {} kinds of sequences were founded.", freq.len(), key_set.len()); kv_vec.sort_by(|a, b| b.2.cmp(a.2)); info!("Write pair counts to tsv file: {}", cnt_path);
for (k0, k1, v) in kv_vec { let _ = writeln!(cnt_file, "{}\t{}\t{}", k0, k1, v); } info!("Write sequences to fastq file: {}", fq_out_path); let mut key_vec = key_set.into_iter().collect::<Vec<u64>>(); key_vec.sort(); for k in key_vec { let seq = recover_seq(k, flanking); let id = format!("{}", k); let qual = vec![b'~'; seq.len()]; let _ = fq_out.write( &id, Option::None, seq.as_bytes(), &qual, ); } info!("{}", counter); }
random_line_split
network.rs
Vec<Option<Crossroad>>> { type Output = Option<Crossroad>; #[inline] fn index(&self, index: CrossroadId) -> &Option<Crossroad> { &self[index.y][index.x] } } /// Allows mutable indexing of the grid by the crossroad coordinates. impl IndexMut<CrossroadId> for Vec<Vec<Option<Crossroad>>> { #[inline] fn index_mut(&mut self, index: CrossroadId) -> &mut Option<Crossroad> { &mut self[index.y][index.x] } } use std::fmt; impl fmt::Display for CrossroadId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "({}, {})", self.x, self.y) } } use std::ops::Add; /// Allows to add some move to crossroad coordinates. impl Add<(i32, i32)> for CrossroadId { type Output = CrossroadId; fn add(self, (x, y): (i32, i32)) -> CrossroadId { CrossroadId { x: (self.x as i32 + x) as usize, y: (self.y as i32 + y) as usize, } } } impl CrossroadId { /// Creates new crossroad identifier. pub fn new(x: usize, y: usize) -> CrossroadId { CrossroadId { x, y } } /// Computes the unit move (dx, dy) and the length to join the destination crossroad. pub fn join(&self, dest: CrossroadId) -> (i32, i32, i32) { if self.x == dest.x { let dy = (dest.y as i32) - (self.y as i32); let len = i32::abs(dy); (0, dy / len, len) } else if self.y == dest.y { let dx = (dest.x as i32) - (self.x as i32); let len = i32::abs(dx); (dx / len, 0, len) } else { panic!("Crossroads {} and {} are not linkable.", self, dest); } } } /// A Crossroad. #[derive(Clone)] pub struct Crossroad { id: CrossroadId, // Coordinates pub nodes: Vec<NodeId>, // Vector of its 4 quarter nodes. // They are indexed by direction. roads: Vec<Vec<Option<RoadId>>>, // Roads leaving this crossroad. // They are indexed by direction and side. roads_arriving: Vec<Vec<Option<RoadId>>>, // Roads arriving at this crossroad. // They are indexed by direction and side. } impl Crossroad { /// Creates a new crossroad with four nodes without any roads. pub fn new(id: CrossroadId, g: &mut Graph) -> Crossroad { let mut c = Crossroad { id, nodes: vec!(), roads: none_array(4, 2), roads_arriving: none_array(4, 2), }; for _ in 0..4 { c.nodes.push(g.add_node(c.id)); } c } /// Enables some roads. Only the cars from enabled roads are able to cross a crossroad. fn enable_path(&self, roads: &mut Vec<Road>) { // First policy: we enable the most loaded road with some guy waiting. // let mut max = -1; // let mut r_max = 0; // for r in self.existing_roads_arriving() { // if roads[r].is_waiting() && roads[r].get_car_count() > max { // r_max = r; // max = roads[r].get_car_count(); // } // } // roads[r_max].enable(); // Second policy: we enable the most loaded roads with guys waiting, but in pairs. // We compute the pair of compatible roads with the maximum cumulated load. let mut max_pair = ((NORTH, LEFT), (NORTH, LEFT)); let mut max_load = 0; for d in 0..4 { for s in 0..2 { for x in 0..2 { let (d2, s2) = { if x == 0 { (d, 1 - s) } else { ((d + 2) % 4, s) } }; let load = self.compute_load(d, s, roads) + self.compute_load(d2, s2, roads); if load > max_load { max_load = load; max_pair = ((d, s), (d2, s2)); } } } } let ((d1, s1), (d2, s2)) = max_pair; if self.roads_arriving[d1][s1].is_some() { roads[self.roads_arriving[d1][s1].unwrap()].enable(); } if self.roads_arriving[d2][s2].is_some() { roads[self.roads_arriving[d2][s2].unwrap()].enable(); } } /// Computes the load of a road, i.e. the numbers of cars on this road. /// If there is no car ready to cross, returns 0. fn compute_load(&self, direction: usize, side: usize, roads: &mut Vec<Road>) -> i32 { let r = self.roads_arriving[direction][side]; if r.is_none() ||!roads[r.unwrap()].is_waiting() { return 0; } return roads[r.unwrap()].get_car_count(); } } impl Network { /// Creates a new empty Network, with specified width and heights. pub fn new(width: usize, height: usize) -> Network { Network { width, height, car_count: 0, cars_per_unit: 10, cars_per_crossroad: 4, grid: none_array(height, width), roads: vec!(), graph: Graph::new(), car_graph: None, crossroads: vec!(), } } /// Adds a crossroad to specified location. pub fn add_crossroad(&mut self, x: usize, y: usize) { let c = CrossroadId::new(x, y); // We check the crossroad does not exist. self.assert_crossroad_not_exists(c); // We add it to the graph and update the network. self.grid[c] = Some(Crossroad::new(c, &mut self.graph)); self.crossroads.push(c); } /// Adds a new specific road. pub fn new_road(&mut self, src: CrossroadId, dest: CrossroadId, side: Side){ // We get the parameters of the road. let (dx, dy, length) = src.join(dest); let length = length * self.cars_per_unit - self.cars_per_crossroad; let (d1, d2) = compute_directions(dx, dy, side); let id = self.roads.len(); // First, it builds the road in the network. let road_info = RoadInfo { id, start: src, end: dest, side, destination: self.crossroad(dest).nodes[d2], length: length as usize, }; // Then, we add it to the crossroads and the roads. let road = Road::new(road_info); self.roads.push(road); self.crossroad_mut(src).roads[d1][side] = Some(id); self.crossroad_mut(dest).roads_arriving[d1][side] = Some(id); // Then, it builds the two corresponding edges in the graph. let (n1, n2) = { let c = self.crossroad(src); (c.nodes[d1], c.nodes[previous_direction(d1)]) }; let n3 = self.crossroad(dest).nodes[d2]; self.graph.add_edge(n1, n3, id); self.graph.add_edge(n2, n3, id); } /// Add the two road linking the first crossroad to the second one. pub fn add_road(&mut self, (src_x, src_y): (usize, usize), (dest_x, dest_y): (usize, usize)) { let (src, dest) = (CrossroadId::new(src_x, src_y), CrossroadId::new(dest_x, dest_y)); // Checks the source and destination crossroads exist. self.assert_crossroad_exists(src); self.assert_crossroad_exists(dest); // Checks that they are aligned. let (dx, dy, length) = src.join(dest); // Checks that the road can be built between the two crossroads, i.e. that it does not // generate any collision. for k in 1..length { self.assert_crossroad_not_exists(src + (k*dx, k*dy)); } // Creates both roads. self.new_road(src, dest, LEFT); self.new_road(src, dest, RIGHT); } /// Adds all roads between the crossroads `c1` and `c2`. pub fn add_all_roads(&mut self, c1: (usize, usize), c2: (usize, usize)) { self.add_road(c1, c2); self.add_road(c2, c1); } /// Panics if the crossroad exists. pub fn assert_crossroad_exists(&self, c: CrossroadId) { if self.grid[c].is_none() { panic!("This crossroad {} does not exist.", c); } } /// Panics if the crossroad does not exist. pub fn assert_crossroad_not_exists(&self, c: CrossroadId) { if self.grid[c].is_some() { panic!("This crossroad {} already exists.", c); } } /// Retrieves the specified crossroad. Panics if it does not exist. pub fn crossroad(&self, c: CrossroadId) -> &Crossroad { self.grid[c].as_ref().unwrap() } /// Retrieves a mutable reference to the specified crossroad. Panics if it does not exist. pub fn crossroad_mut(&mut self, c: CrossroadId) -> &mut Crossroad { self.grid[c].as_mut().unwrap() } /// Creates a new car. It transfers the current graph to the car, with a fresh identifier. pub fn create_car(&mut self) -> Car { if self.car_graph.is_none() { // If needed, we generate this shared reference. self.car_graph = Some(Arc::new(self.clone_graph())); } let id = self.car_count; self.car_count += 1; Car::new(id, 0, CrossroadId::new(0, 0), self.car_graph.clone().unwrap()) } /// Spawns a car on a random road, and finds a random destination. pub fn generate_request(&mut self, id: CarId) -> (RoadInfo, usize, CrossroadId) { // First, it finds a road to spawn the car. let mut rng = rand::thread_rng(); let mut road_id = rng.gen_range(0, self.roads.len()); let mut pos = self.roads[road_id].spawn_car(id); while pos == -1 { road_id = rng.gen_range(0, self.roads.len()); pos = self.roads[road_id].spawn_car(id); } // Then, it gets the crossroad at the end of this road. let road_info = self.roads[road_id].info(); let source_c = road_info.end; // It randomly chooses a crossroad different from the previous crossroad. let mut destination = self.random_crossroad(); while destination == source_c { destination = self.random_crossroad(); } // Returns the final spawn position and destination. (road_info, pos as usize, destination) } /// Spawns all the car that requested to be. Updates the move vector with the resulting spawns. pub fn spawn_cars(&mut self, actions: Vec<Action>, moves: &mut Vec<Move>) { for (i, a) in actions.iter().enumerate() { if let Action::SPAWN = *a { let (road_info, pos, destination) = self.generate_request(i); moves[i] = Move::SPAWN(road_info, pos, destination); } } } /// Makes the crossroads enable some roads. pub fn enable_paths(&mut self) { for &c in &self.crossroads { self.grid[c].as_ref().unwrap().enable_path(&mut self.roads); } } /// Performs an update step on all roads, based on the Actions and Speeds vector. /// Updates the resulting Moves vector, and returns the EdgesWeight estimation. pub fn roads_step(&mut self, actions: &mut Vec<Action>, moves: &mut Vec<Move>, speeds: &Vec<Speed>) -> EdgesWeight { let roads = &mut self.roads; // All the possibles enabled paths are tried. for i in 0..roads.len() { // Each roads tries to make its first car cross, if enabled. Road::deliver(i, actions, moves, roads); } // We make a step for all remaining cars, and get the weights estimations. let mut weights = vec!(); for i in 0..roads.len() { weights.push(roads[i].step_forward(moves, speeds)); } let edges_weight = EdgesWeight::new(weights); return edges_weight } /// Returns the central reactive process of the network. pub fn process(mut self, central_signal: SPMCSignalSender<Arc<GlobalInfo>>, pos_signal: MPSCSignalReceiver<(CarId, (Action, Speed)), (Vec<Action>, Vec<Speed>)>) -> impl Process<Value=()> { let mut weights = vec!(); for r in &self.roads { weights.push(r.weight()); } let mut step = 0; let mut mean_moves = self.car_count as f32; let beta = 0.99; let cont = move | (mut actions, speeds): (Vec<Action>, Vec<Speed>) | { // We count the steps. step += 1; // We enable some path. self.enable_paths(); // We compute the road step and get back some weights. let mut moves = (0..actions.len()).map(|_| { Move::NONE }).collect(); let weights = self.roads_step(&mut actions, &mut moves, &speeds); // We spawn the cars that requested to be. self.spawn_cars(actions, &mut moves); // We count the number of cars that did something. let nb_moves: i32 = moves.iter().map(| m | { match m { &Move::NONE => 0, _ => 1, }}).sum(); // We keep some moving mean of this number. If it is too low, nothing is happening, so // it panics. mean_moves = beta * mean_moves + (1. - beta) * (nb_moves as f32); if mean_moves < 1e-3 { panic!("It looks like a stationary state: not enough moves."); } // Returns the updated information about the step. Arc::new(GlobalInfo { weights, moves }) }; let p = pos_signal.await_in() // Awaits the car actions .map(cont) // Computes the resulting moves and weights. .emit_consume(central_signal) // Emits this information. .loop_inf(); // Loops. return p; } /// Returns a String representing the network. pub fn to_string(&self) -> String { // We first build the corresponding char two-dimensional vector. let (width, height) = (2 * self.width - 1, 2 * self.height - 1); let mut char_map: Vec<Vec<char>> = (0..height).map(|_| { (0..width).map(|_| {'' }).collect()}).collect(); // Then we add the crossroads. for c in &self.crossroads { char_map[2 * c.y][2 * c.x] = 'C'; } // Then we add the roads. for r in &self.roads { let start = r.info().start; let (dx, dy, length) = start.join(r.info().end); // Chooses the right symbol. let c = if dx == 0
else { '-' }; let (x, y) = (2*start.x, 2*start.y); for k in 1..(2*length) { char_map[(y as i32 + k * dy) as usize][(x as i32 + k * dx) as usize] = c; } } // We collect the characters into a string. char_map.into_iter().map(|line| { line.into_iter().collect::<String>().add("\n") }).collect() } /// Loads a network from a file located in trafficsim/maps/. pub fn load_file(&mut self, filename: &str) { let mut f = File::open(format!("./src/trafficsim/maps/{}", filename)).expect("File not found"); let mut contents = String::new(); f.read_to_string(&mut contents) .expect("Something went wrong reading the file"); self.load_string(&contents); } /// Loads a network from a string. pub fn load_string(&mut self, s: &str) { // We remove ending blank lines. let s = s.trim_right(); // We split lines and remove ending spaces and `\n`. let mut char_map: Vec<Vec<char>> = s.split("\n") .map(| line | { line.trim_right().chars().collect() }) .collect(); // We compute the resulting width and height of the character array. let width = char_map.iter().map(| line | { line.len() }).max().unwrap(); let height = char_map.len(); // We add missing spaces. for line in char_map.iter_mut() { for _ in 0..(width - line.len()) { line.push(' '); } } // We change the network size. *self = Network::new((width + 1) / 2, (height + 1) / 2); // Then, we add all the crossroads. for (j, line) in char_map.iter().enumerate() { for (i, c) in line.iter().enumerate() { if *c == 'C' { self.add_crossroad(i / 2, j / 2); } } } // Then we add the horizontal roads. for (j, line) in char_map.iter().enumerate() { let mut last_crossroad = None; let mut road_length = 0; for (i, c) in line.iter().enumerate() { if *c == 'C' { if last_crossroad.is_some() && road_length > 0 { self.add_all_roads(last_crossroad.unwrap(), (i / 2, j / 2)); } last_crossroad = Some((i / 2, j / 2)); road_length = 0; } else
{ '|' }
conditional_block
network.rs
{ pub x: usize, // Abscissa pub y: usize, // Ordinate } use std::ops::{ Index, IndexMut }; /// Allows indexing the grid by the crossroad coordinates. impl Index<CrossroadId> for Vec<Vec<Option<Crossroad>>> { type Output = Option<Crossroad>; #[inline] fn index(&self, index: CrossroadId) -> &Option<Crossroad> { &self[index.y][index.x] } } /// Allows mutable indexing of the grid by the crossroad coordinates. impl IndexMut<CrossroadId> for Vec<Vec<Option<Crossroad>>> { #[inline] fn index_mut(&mut self, index: CrossroadId) -> &mut Option<Crossroad> { &mut self[index.y][index.x] } } use std::fmt; impl fmt::Display for CrossroadId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "({}, {})", self.x, self.y) } } use std::ops::Add; /// Allows to add some move to crossroad coordinates. impl Add<(i32, i32)> for CrossroadId { type Output = CrossroadId; fn add(self, (x, y): (i32, i32)) -> CrossroadId { CrossroadId { x: (self.x as i32 + x) as usize, y: (self.y as i32 + y) as usize, } } } impl CrossroadId { /// Creates new crossroad identifier. pub fn new(x: usize, y: usize) -> CrossroadId { CrossroadId { x, y } } /// Computes the unit move (dx, dy) and the length to join the destination crossroad. pub fn join(&self, dest: CrossroadId) -> (i32, i32, i32) { if self.x == dest.x { let dy = (dest.y as i32) - (self.y as i32); let len = i32::abs(dy); (0, dy / len, len) } else if self.y == dest.y { let dx = (dest.x as i32) - (self.x as i32); let len = i32::abs(dx); (dx / len, 0, len) } else { panic!("Crossroads {} and {} are not linkable.", self, dest); } } } /// A Crossroad. #[derive(Clone)] pub struct Crossroad { id: CrossroadId, // Coordinates pub nodes: Vec<NodeId>, // Vector of its 4 quarter nodes. // They are indexed by direction. roads: Vec<Vec<Option<RoadId>>>, // Roads leaving this crossroad. // They are indexed by direction and side. roads_arriving: Vec<Vec<Option<RoadId>>>, // Roads arriving at this crossroad. // They are indexed by direction and side. } impl Crossroad { /// Creates a new crossroad with four nodes without any roads. pub fn new(id: CrossroadId, g: &mut Graph) -> Crossroad { let mut c = Crossroad { id, nodes: vec!(), roads: none_array(4, 2), roads_arriving: none_array(4, 2), }; for _ in 0..4 { c.nodes.push(g.add_node(c.id)); } c } /// Enables some roads. Only the cars from enabled roads are able to cross a crossroad. fn enable_path(&self, roads: &mut Vec<Road>) { // First policy: we enable the most loaded road with some guy waiting. // let mut max = -1; // let mut r_max = 0; // for r in self.existing_roads_arriving() { // if roads[r].is_waiting() && roads[r].get_car_count() > max { // r_max = r; // max = roads[r].get_car_count(); // } // } // roads[r_max].enable(); // Second policy: we enable the most loaded roads with guys waiting, but in pairs. // We compute the pair of compatible roads with the maximum cumulated load. let mut max_pair = ((NORTH, LEFT), (NORTH, LEFT)); let mut max_load = 0; for d in 0..4 { for s in 0..2 { for x in 0..2 { let (d2, s2) = { if x == 0 { (d, 1 - s) } else { ((d + 2) % 4, s) } }; let load = self.compute_load(d, s, roads) + self.compute_load(d2, s2, roads); if load > max_load { max_load = load; max_pair = ((d, s), (d2, s2)); } } } } let ((d1, s1), (d2, s2)) = max_pair; if self.roads_arriving[d1][s1].is_some() { roads[self.roads_arriving[d1][s1].unwrap()].enable(); } if self.roads_arriving[d2][s2].is_some() { roads[self.roads_arriving[d2][s2].unwrap()].enable(); } } /// Computes the load of a road, i.e. the numbers of cars on this road. /// If there is no car ready to cross, returns 0. fn compute_load(&self, direction: usize, side: usize, roads: &mut Vec<Road>) -> i32 { let r = self.roads_arriving[direction][side]; if r.is_none() ||!roads[r.unwrap()].is_waiting() { return 0; } return roads[r.unwrap()].get_car_count(); } } impl Network { /// Creates a new empty Network, with specified width and heights. pub fn new(width: usize, height: usize) -> Network { Network { width, height, car_count: 0, cars_per_unit: 10, cars_per_crossroad: 4, grid: none_array(height, width), roads: vec!(), graph: Graph::new(), car_graph: None, crossroads: vec!(), } } /// Adds a crossroad to specified location. pub fn add_crossroad(&mut self, x: usize, y: usize) { let c = CrossroadId::new(x, y); // We check the crossroad does not exist. self.assert_crossroad_not_exists(c); // We add it to the graph and update the network. self.grid[c] = Some(Crossroad::new(c, &mut self.graph)); self.crossroads.push(c); } /// Adds a new specific road. pub fn new_road(&mut self, src: CrossroadId, dest: CrossroadId, side: Side){ // We get the parameters of the road. let (dx, dy, length) = src.join(dest); let length = length * self.cars_per_unit - self.cars_per_crossroad; let (d1, d2) = compute_directions(dx, dy, side); let id = self.roads.len(); // First, it builds the road in the network. let road_info = RoadInfo { id, start: src, end: dest, side, destination: self.crossroad(dest).nodes[d2], length: length as usize, }; // Then, we add it to the crossroads and the roads. let road = Road::new(road_info); self.roads.push(road); self.crossroad_mut(src).roads[d1][side] = Some(id); self.crossroad_mut(dest).roads_arriving[d1][side] = Some(id); // Then, it builds the two corresponding edges in the graph. let (n1, n2) = { let c = self.crossroad(src); (c.nodes[d1], c.nodes[previous_direction(d1)]) }; let n3 = self.crossroad(dest).nodes[d2]; self.graph.add_edge(n1, n3, id); self.graph.add_edge(n2, n3, id); } /// Add the two road linking the first crossroad to the second one. pub fn add_road(&mut self, (src_x, src_y): (usize, usize), (dest_x, dest_y): (usize, usize)) { let (src, dest) = (CrossroadId::new(src_x, src_y), CrossroadId::new(dest_x, dest_y)); // Checks the source and destination crossroads exist. self.assert_crossroad_exists(src); self.assert_crossroad_exists(dest); // Checks that they are aligned. let (dx, dy, length) = src.join(dest); // Checks that the road can be built between the two crossroads, i.e. that it does not // generate any collision. for k in 1..length { self.assert_crossroad_not_exists(src + (k*dx, k*dy)); } // Creates both roads. self.new_road(src, dest, LEFT); self.new_road(src, dest, RIGHT); } /// Adds all roads between the crossroads `c1` and `c2`. pub fn add_all_roads(&mut self, c1: (usize, usize), c2: (usize, usize)) { self.add_road(c1, c2); self.add_road(c2, c1); } /// Panics if the crossroad exists. pub fn assert_crossroad_exists(&self, c: CrossroadId) { if self.grid[c].is_none() { panic!("This crossroad {} does not exist.", c); } } /// Panics if the crossroad does not exist. pub fn assert_crossroad_not_exists(&self, c: CrossroadId) { if self.grid[c].is_some() { panic!("This crossroad {} already exists.", c); } } /// Retrieves the specified crossroad. Panics if it does not exist. pub fn crossroad(&self, c: CrossroadId) -> &Crossroad { self.grid[c].as_ref().unwrap() } /// Retrieves a mutable reference to the specified crossroad. Panics if it does not exist. pub fn crossroad_mut(&mut self, c: CrossroadId) -> &mut Crossroad { self.grid[c].as_mut().unwrap() } /// Creates a new car. It transfers the current graph to the car, with a fresh identifier. pub fn create_car(&mut self) -> Car { if self.car_graph.is_none() { // If needed, we generate this shared reference. self.car_graph = Some(Arc::new(self.clone_graph())); } let id = self.car_count; self.car_count += 1; Car::new(id, 0, CrossroadId::new(0, 0), self.car_graph.clone().unwrap()) } /// Spawns a car on a random road, and finds a random destination. pub fn generate_request(&mut self, id: CarId) -> (RoadInfo, usize, CrossroadId) { // First, it finds a road to spawn the car. let mut rng = rand::thread_rng(); let mut road_id = rng.gen_range(0, self.roads.len()); let mut pos = self.roads[road_id].spawn_car(id); while pos == -1 { road_id = rng.gen_range(0, self.roads.len()); pos = self.roads[road_id].spawn_car(id); } // Then, it gets the crossroad at the end of this road. let road_info = self.roads[road_id].info(); let source_c = road_info.end; // It randomly chooses a crossroad different from the previous crossroad. let mut destination = self.random_crossroad(); while destination == source_c { destination = self.random_crossroad(); } // Returns the final spawn position and destination. (road_info, pos as usize, destination) } /// Spawns all the car that requested to be. Updates the move vector with the resulting spawns. pub fn spawn_cars(&mut self, actions: Vec<Action>, moves: &mut Vec<Move>) { for (i, a) in actions.iter().enumerate() { if let Action::SPAWN = *a { let (road_info, pos, destination) = self.generate_request(i); moves[i] = Move::SPAWN(road_info, pos, destination); } } } /// Makes the crossroads enable some roads. pub fn enable_paths(&mut self) { for &c in &self.crossroads { self.grid[c].as_ref().unwrap().enable_path(&mut self.roads); } } /// Performs an update step on all roads, based on the Actions and Speeds vector. /// Updates the resulting Moves vector, and returns the EdgesWeight estimation. pub fn roads_step(&mut self, actions: &mut Vec<Action>, moves: &mut Vec<Move>, speeds: &Vec<Speed>) -> EdgesWeight { let roads = &mut self.roads; // All the possibles enabled paths are tried. for i in 0..roads.len() { // Each roads tries to make its first car cross, if enabled. Road::deliver(i, actions, moves, roads); } // We make a step for all remaining cars, and get the weights estimations. let mut weights = vec!(); for i in 0..roads.len() { weights.push(roads[i].step_forward(moves, speeds)); } let edges_weight = EdgesWeight::new(weights); return edges_weight } /// Returns the central reactive process of the network. pub fn process(mut self, central_signal: SPMCSignalSender<Arc<GlobalInfo>>, pos_signal: MPSCSignalReceiver<(CarId, (Action, Speed)), (Vec<Action>, Vec<Speed>)>) -> impl Process<Value=()> { let mut weights = vec!(); for r in &self.roads { weights.push(r.weight()); } let mut step = 0; let mut mean_moves = self.car_count as f32; let beta = 0.99; let cont = move | (mut actions, speeds): (Vec<Action>, Vec<Speed>) | { // We count the steps. step += 1; // We enable some path. self.enable_paths(); // We compute the road step and get back some weights. let mut moves = (0..actions.len()).map(|_| { Move::NONE }).collect(); let weights = self.roads_step(&mut actions, &mut moves, &speeds); // We spawn the cars that requested to be. self.spawn_cars(actions, &mut moves); // We count the number of cars that did something. let nb_moves: i32 = moves.iter().map(| m | { match m { &Move::NONE => 0, _ => 1, }}).sum(); // We keep some moving mean of this number. If it is too low, nothing is happening, so // it panics. mean_moves = beta * mean_moves + (1. - beta) * (nb_moves as f32); if mean_moves < 1e-3 { panic!("It looks like a stationary state: not enough moves."); } // Returns the updated information about the step. Arc::new(GlobalInfo { weights, moves }) }; let p = pos_signal.await_in() // Awaits the car actions .map(cont) // Computes the resulting moves and weights. .emit_consume(central_signal) // Emits this information. .loop_inf(); // Loops. return p; } /// Returns a String representing the network. pub fn to_string(&self) -> String { // We first build the corresponding char two-dimensional vector. let (width, height) = (2 * self.width - 1, 2 * self.height - 1); let mut char_map: Vec<Vec<char>> = (0..height).map(|_| { (0..width).map(|_| {'' }).collect()}).collect(); // Then we add the crossroads. for c in &self.crossroads { char_map[2 * c.y][2 * c.x] = 'C'; } // Then we add the roads. for r in &self.roads { let start = r.info().start; let (dx, dy, length) = start.join(r.info().end); // Chooses the right symbol. let c = if dx == 0 { '|' } else { '-' }; let (x, y) = (2*start.x, 2*start.y); for k in 1..(2*length) { char_map[(y as i32 + k * dy) as usize][(x as i32 + k * dx) as usize] = c; } } // We collect the characters into a string. char_map.into_iter().map(|line| { line.into_iter().collect::<String>().add("\n") }).collect() } /// Loads a network from a file located in trafficsim/maps/. pub fn load_file(&mut self, filename: &str) { let mut f = File::open(format!("./src/trafficsim/maps/{}", filename)).expect("File not found"); let mut contents = String::new(); f.read_to_string(&mut contents) .expect("Something went wrong reading the file"); self.load_string(&contents); } /// Loads a network from a string. pub fn load_string(&mut self, s: &str) { // We remove ending blank lines. let s = s.trim_right(); // We split lines and remove ending spaces and `\n`. let mut char_map: Vec<Vec<char>> = s.split("\n") .map(| line | { line.trim_right().chars().collect() }) .collect(); // We compute the resulting width and height of the character array. let width = char_map.iter().map(| line | { line.len() }).max().unwrap(); let height = char_map.len(); // We add missing spaces. for line in char_map.iter_mut() { for _ in 0..(width - line.len()) { line.push(' '); } } // We change the network size. *self = Network::new((width + 1) / 2, (height + 1) / 2); // Then, we add all the crossroads. for (j, line) in char_map.iter().enumerate() { for (i, c) in line.iter().enumerate() { if *c == 'C' { self.add_crossroad(i / 2, j / 2); } } } // Then we add the horizontal roads. for (j, line) in char_map.iter().enumerate() { let mut last_crossroad = None; let mut road_length = 0; for (i, c) in line.iter().enumerate() { if *c == 'C' { if last_crossroad.is_some() && road_length > 0 { self.add_all_roads(last_crossroad.unwrap(), (i / 2, j / 2)); } last_crossroad =
CrossroadId
identifier_name
network.rs
// with specified destination crossroad. STEP(i32), // The car performs a step of specified length. VANISH, // The car vanished. CROSS(RoadInfo), // The car crossed and is now on specified road. } /// Network structure containing all the information relative to crossroads and roads. #[derive(Clone)] pub struct Network { pub width: usize, // Width of the network. pub height: usize, // Height of the network. pub car_count: usize, // Number of cars. pub cars_per_unit: i32, // Number of cars between two centers of crossroads. pub cars_per_crossroad: i32, // Number of cars fitting in a crossroad. grid: Vec<Vec<Option<Crossroad>>>, // Grid containing the crossroads. pub roads: Vec<Road>, // Vector containing the roads. graph: Graph, // Corresponding abstract graph. car_graph: Option<Arc<Graph>>, // Shared reference to the same graph. pub crossroads: Vec<CrossroadId>, // Vector containing all the coordinates of existing // crossroads. } /// Crossroad Coordinates. #[derive(Copy, Clone, Eq, PartialEq)] pub struct CrossroadId { pub x: usize, // Abscissa pub y: usize, // Ordinate } use std::ops::{ Index, IndexMut }; /// Allows indexing the grid by the crossroad coordinates. impl Index<CrossroadId> for Vec<Vec<Option<Crossroad>>> { type Output = Option<Crossroad>; #[inline] fn index(&self, index: CrossroadId) -> &Option<Crossroad> { &self[index.y][index.x] } } /// Allows mutable indexing of the grid by the crossroad coordinates. impl IndexMut<CrossroadId> for Vec<Vec<Option<Crossroad>>> { #[inline] fn index_mut(&mut self, index: CrossroadId) -> &mut Option<Crossroad> { &mut self[index.y][index.x] } } use std::fmt; impl fmt::Display for CrossroadId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "({}, {})", self.x, self.y) } } use std::ops::Add; /// Allows to add some move to crossroad coordinates. impl Add<(i32, i32)> for CrossroadId { type Output = CrossroadId; fn add(self, (x, y): (i32, i32)) -> CrossroadId { CrossroadId { x: (self.x as i32 + x) as usize, y: (self.y as i32 + y) as usize, } } } impl CrossroadId { /// Creates new crossroad identifier. pub fn new(x: usize, y: usize) -> CrossroadId { CrossroadId { x, y } } /// Computes the unit move (dx, dy) and the length to join the destination crossroad. pub fn join(&self, dest: CrossroadId) -> (i32, i32, i32) { if self.x == dest.x { let dy = (dest.y as i32) - (self.y as i32); let len = i32::abs(dy); (0, dy / len, len) } else if self.y == dest.y { let dx = (dest.x as i32) - (self.x as i32); let len = i32::abs(dx); (dx / len, 0, len) } else { panic!("Crossroads {} and {} are not linkable.", self, dest); } } } /// A Crossroad. #[derive(Clone)] pub struct Crossroad { id: CrossroadId, // Coordinates pub nodes: Vec<NodeId>, // Vector of its 4 quarter nodes. // They are indexed by direction. roads: Vec<Vec<Option<RoadId>>>, // Roads leaving this crossroad. // They are indexed by direction and side. roads_arriving: Vec<Vec<Option<RoadId>>>, // Roads arriving at this crossroad. // They are indexed by direction and side. } impl Crossroad { /// Creates a new crossroad with four nodes without any roads. pub fn new(id: CrossroadId, g: &mut Graph) -> Crossroad { let mut c = Crossroad { id, nodes: vec!(), roads: none_array(4, 2), roads_arriving: none_array(4, 2), }; for _ in 0..4 { c.nodes.push(g.add_node(c.id)); } c } /// Enables some roads. Only the cars from enabled roads are able to cross a crossroad. fn enable_path(&self, roads: &mut Vec<Road>) { // First policy: we enable the most loaded road with some guy waiting. // let mut max = -1; // let mut r_max = 0; // for r in self.existing_roads_arriving() { // if roads[r].is_waiting() && roads[r].get_car_count() > max { // r_max = r; // max = roads[r].get_car_count(); // } // } // roads[r_max].enable(); // Second policy: we enable the most loaded roads with guys waiting, but in pairs. // We compute the pair of compatible roads with the maximum cumulated load. let mut max_pair = ((NORTH, LEFT), (NORTH, LEFT)); let mut max_load = 0; for d in 0..4 { for s in 0..2 { for x in 0..2 { let (d2, s2) = { if x == 0 { (d, 1 - s) } else { ((d + 2) % 4, s) } }; let load = self.compute_load(d, s, roads) + self.compute_load(d2, s2, roads); if load > max_load { max_load = load; max_pair = ((d, s), (d2, s2)); } } } } let ((d1, s1), (d2, s2)) = max_pair; if self.roads_arriving[d1][s1].is_some() { roads[self.roads_arriving[d1][s1].unwrap()].enable(); } if self.roads_arriving[d2][s2].is_some() { roads[self.roads_arriving[d2][s2].unwrap()].enable(); } } /// Computes the load of a road, i.e. the numbers of cars on this road. /// If there is no car ready to cross, returns 0. fn compute_load(&self, direction: usize, side: usize, roads: &mut Vec<Road>) -> i32 { let r = self.roads_arriving[direction][side]; if r.is_none() ||!roads[r.unwrap()].is_waiting() { return 0; } return roads[r.unwrap()].get_car_count(); } } impl Network { /// Creates a new empty Network, with specified width and heights. pub fn new(width: usize, height: usize) -> Network { Network { width, height, car_count: 0, cars_per_unit: 10, cars_per_crossroad: 4, grid: none_array(height, width), roads: vec!(), graph: Graph::new(), car_graph: None, crossroads: vec!(), } } /// Adds a crossroad to specified location. pub fn add_crossroad(&mut self, x: usize, y: usize) { let c = CrossroadId::new(x, y); // We check the crossroad does not exist. self.assert_crossroad_not_exists(c); // We add it to the graph and update the network. self.grid[c] = Some(Crossroad::new(c, &mut self.graph)); self.crossroads.push(c); } /// Adds a new specific road. pub fn new_road(&mut self, src: CrossroadId, dest: CrossroadId, side: Side){ // We get the parameters of the road. let (dx, dy, length) = src.join(dest); let length = length * self.cars_per_unit - self.cars_per_crossroad; let (d1, d2) = compute_directions(dx, dy, side); let id = self.roads.len(); // First, it builds the road in the network. let road_info = RoadInfo { id, start: src, end: dest, side, destination: self.crossroad(dest).nodes[d2], length: length as usize, }; // Then, we add it to the crossroads and the roads. let road = Road::new(road_info); self.roads.push(road); self.crossroad_mut(src).roads[d1][side] = Some(id); self.crossroad_mut(dest).roads_arriving[d1][side] = Some(id); // Then, it builds the two corresponding edges in the graph. let (n1, n2) = { let c = self.crossroad(src); (c.nodes[d1], c.nodes[previous_direction(d1)]) }; let n3 = self.crossroad(dest).nodes[d2]; self.graph.add_edge(n1, n3, id); self.graph.add_edge(n2, n3, id); } /// Add the two road linking the first crossroad to the second one. pub fn add_road(&mut self, (src_x, src_y): (usize, usize), (dest_x, dest_y): (usize, usize)) { let (src, dest) = (CrossroadId::new(src_x, src_y), CrossroadId::new(dest_x, dest_y)); // Checks the source and destination crossroads exist. self.assert_crossroad_exists(src); self.assert_crossroad_exists(dest); // Checks that they are aligned. let (dx, dy, length) = src.join(dest); // Checks that the road can be built between the two crossroads, i.e. that it does not // generate any collision. for k in 1..length { self.assert_crossroad_not_exists(src + (k*dx, k*dy)); } // Creates both roads. self.new_road(src, dest, LEFT); self.new_road(src, dest, RIGHT); } /// Adds all roads between the crossroads `c1` and `c2`. pub fn add_all_roads(&mut self, c1: (usize, usize), c2: (usize, usize)) { self.add_road(c1, c2); self.add_road(c2, c1); } /// Panics if the crossroad exists. pub fn assert_crossroad_exists(&self, c: CrossroadId) { if self.grid[c].is_none() { panic!("This crossroad {} does not exist.", c); } } /// Panics if the crossroad does not exist. pub fn assert_crossroad_not_exists(&self, c: CrossroadId) { if self.grid[c].is_some() { panic!("This crossroad {} already exists.", c); } } /// Retrieves the specified crossroad. Panics if it does not exist. pub fn crossroad(&self, c: CrossroadId) -> &Crossroad { self.grid[c].as_ref().unwrap() } /// Retrieves a mutable reference to the specified crossroad. Panics if it does not exist. pub fn crossroad_mut(&mut self, c: CrossroadId) -> &mut Crossroad { self.grid[c].as_mut().unwrap() } /// Creates a new car. It transfers the current graph to the car, with a fresh identifier. pub fn create_car(&mut self) -> Car { if self.car_graph.is_none() { // If needed, we generate this shared reference. self.car_graph = Some(Arc::new(self.clone_graph())); } let id = self.car_count; self.car_count += 1; Car::new(id, 0, CrossroadId::new(0, 0), self.car_graph.clone().unwrap()) } /// Spawns a car on a random road, and finds a random destination. pub fn generate_request(&mut self, id: CarId) -> (RoadInfo, usize, CrossroadId) { // First, it finds a road to spawn the car. let mut rng = rand::thread_rng(); let mut road_id = rng.gen_range(0, self.roads.len()); let mut pos = self.roads[road_id].spawn_car(id); while pos == -1 { road_id = rng.gen_range(0, self.roads.len()); pos = self.roads[road_id].spawn_car(id); } // Then, it gets the crossroad at the end of this road. let road_info = self.roads[road_id].info(); let source_c = road_info.end; // It randomly chooses a crossroad different from the previous crossroad. let mut destination = self.random_crossroad(); while destination == source_c { destination = self.random_crossroad(); } // Returns the final spawn position and destination. (road_info, pos as usize, destination) } /// Spawns all the car that requested to be. Updates the move vector with the resulting spawns. pub fn spawn_cars(&mut self, actions: Vec<Action>, moves: &mut Vec<Move>) { for (i, a) in actions.iter().enumerate() { if let Action::SPAWN = *a { let (road_info, pos, destination) = self.generate_request(i); moves[i] = Move::SPAWN(road_info, pos, destination); } } } /// Makes the crossroads enable some roads. pub fn enable_paths(&mut self) { for &c in &self.crossroads { self.grid[c].as_ref().unwrap().enable_path(&mut self.roads); } } /// Performs an update step on all roads, based on the Actions and Speeds vector. /// Updates the resulting Moves vector, and returns the EdgesWeight estimation. pub fn roads_step(&mut self, actions: &mut Vec<Action>, moves: &mut Vec<Move>, speeds: &Vec<Speed>) -> EdgesWeight { let roads = &mut self.roads; // All the possibles enabled paths are tried. for i in 0..roads.len() { // Each roads tries to make its first car cross, if enabled. Road::deliver(i, actions, moves, roads); } // We make a step for all remaining cars, and get the weights estimations. let mut weights = vec!(); for i in 0..roads.len() { weights.push(roads[i].step_forward(moves, speeds)); } let edges_weight = EdgesWeight::new(weights); return edges_weight } /// Returns the central reactive process of the network. pub fn process(mut self, central_signal: SPMCSignalSender<Arc<GlobalInfo>>, pos_signal: MPSCSignalReceiver<(CarId, (Action, Speed)), (Vec<Action>, Vec<Speed>)>) -> impl Process<Value=()> { let mut weights = vec!(); for r in &self.roads { weights.push(r.weight()); } let mut step = 0; let mut mean_moves = self.car_count as f32; let beta = 0.99; let cont = move | (mut actions, speeds): (Vec<Action>, Vec<Speed>) | { // We count the steps. step += 1; // We enable some path. self.enable_paths(); // We compute the road step and get back some weights. let mut moves = (0..actions.len()).map(|_| { Move::NONE }).collect(); let weights = self.roads_step(&mut actions, &mut moves, &speeds); // We spawn the cars that requested to be. self.spawn_cars(actions, &mut moves); // We count the number of cars that did something. let nb_moves: i32 = moves.iter().map(| m | { match m { &Move::NONE => 0, _ => 1, }}).sum(); // We keep some moving mean of this number. If it is too low, nothing is happening, so // it panics. mean_moves = beta * mean_moves + (1. - beta) * (nb_moves as f32); if mean_moves < 1e-3 { panic!("It looks like a stationary state: not enough moves."); } // Returns the updated information about the step. Arc::new(GlobalInfo { weights, moves }) }; let p = pos_signal.await_in() // Awaits the car actions .map(cont) // Computes the resulting moves and weights. .emit_consume(central_signal) // Emits this information. .loop_inf(); // Loops. return p; } /// Returns a String representing the network. pub fn to_string(&self) -> String { // We first build the corresponding char two-dimensional vector. let (width, height) = (2 * self.width - 1, 2 * self.height - 1); let mut char_map: Vec<Vec<char>> = (0..height).map(|_| { (0..width).map(|_| {'' }).collect()}).collect(); // Then we add the crossroads. for c in &self.crossroads { char_map[2 * c.y][2 * c.x] = 'C'; } // Then we add the roads. for r in &self.roads { let start = r.info().start; let (dx, dy, length) = start.join(r.info().end); // Chooses the right symbol. let c = if dx == 0 { '|' } else { '-' }; let (x, y) = (2*start.x, 2*start.y); for k in 1..(2*length) { char_map[(y as i32 + k * dy) as usize][(x as i32 + k * dx) as usize] = c; } } // We collect the characters into a string. char_map.into_iter().map(|line| { line.into_iter().collect::<String>().add("\n") }).collect() } /// Loads a network from a file located in trafficsim/maps/. pub fn load_file(&mut self, filename: &str) { let mut f = File::open(format!("./src/trafficsim/maps/{}", filename)).expect("File not found"); let mut contents = String::new(); f.read_to_string(&mut contents) .expect("Something went wrong reading the file"); self.load_string(&contents); } /// Loads a network from a string. pub fn load_string(&mut self, s: &str) { // We remove ending blank lines. let s = s.trim_right(); // We split lines and remove ending spaces and `\n`. let mut char_map: Vec<Vec<char>> = s.split("\n") .map(| line | { line.trim_right().chars().collect() })
pub enum Move { NONE, // None happened. SPAWN(RoadInfo, usize, CrossroadId), // The car has spawned at specified road, position,
random_line_split
network.rs
Vec<Option<Crossroad>>> { type Output = Option<Crossroad>; #[inline] fn index(&self, index: CrossroadId) -> &Option<Crossroad> { &self[index.y][index.x] } } /// Allows mutable indexing of the grid by the crossroad coordinates. impl IndexMut<CrossroadId> for Vec<Vec<Option<Crossroad>>> { #[inline] fn index_mut(&mut self, index: CrossroadId) -> &mut Option<Crossroad> { &mut self[index.y][index.x] } } use std::fmt; impl fmt::Display for CrossroadId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "({}, {})", self.x, self.y) } } use std::ops::Add; /// Allows to add some move to crossroad coordinates. impl Add<(i32, i32)> for CrossroadId { type Output = CrossroadId; fn add(self, (x, y): (i32, i32)) -> CrossroadId { CrossroadId { x: (self.x as i32 + x) as usize, y: (self.y as i32 + y) as usize, } } } impl CrossroadId { /// Creates new crossroad identifier. pub fn new(x: usize, y: usize) -> CrossroadId { CrossroadId { x, y } } /// Computes the unit move (dx, dy) and the length to join the destination crossroad. pub fn join(&self, dest: CrossroadId) -> (i32, i32, i32) { if self.x == dest.x { let dy = (dest.y as i32) - (self.y as i32); let len = i32::abs(dy); (0, dy / len, len) } else if self.y == dest.y { let dx = (dest.x as i32) - (self.x as i32); let len = i32::abs(dx); (dx / len, 0, len) } else { panic!("Crossroads {} and {} are not linkable.", self, dest); } } } /// A Crossroad. #[derive(Clone)] pub struct Crossroad { id: CrossroadId, // Coordinates pub nodes: Vec<NodeId>, // Vector of its 4 quarter nodes. // They are indexed by direction. roads: Vec<Vec<Option<RoadId>>>, // Roads leaving this crossroad. // They are indexed by direction and side. roads_arriving: Vec<Vec<Option<RoadId>>>, // Roads arriving at this crossroad. // They are indexed by direction and side. } impl Crossroad { /// Creates a new crossroad with four nodes without any roads. pub fn new(id: CrossroadId, g: &mut Graph) -> Crossroad { let mut c = Crossroad { id, nodes: vec!(), roads: none_array(4, 2), roads_arriving: none_array(4, 2), }; for _ in 0..4 { c.nodes.push(g.add_node(c.id)); } c } /// Enables some roads. Only the cars from enabled roads are able to cross a crossroad. fn enable_path(&self, roads: &mut Vec<Road>) { // First policy: we enable the most loaded road with some guy waiting. // let mut max = -1; // let mut r_max = 0; // for r in self.existing_roads_arriving() { // if roads[r].is_waiting() && roads[r].get_car_count() > max { // r_max = r; // max = roads[r].get_car_count(); // } // } // roads[r_max].enable(); // Second policy: we enable the most loaded roads with guys waiting, but in pairs. // We compute the pair of compatible roads with the maximum cumulated load. let mut max_pair = ((NORTH, LEFT), (NORTH, LEFT)); let mut max_load = 0; for d in 0..4 { for s in 0..2 { for x in 0..2 { let (d2, s2) = { if x == 0 { (d, 1 - s) } else { ((d + 2) % 4, s) } }; let load = self.compute_load(d, s, roads) + self.compute_load(d2, s2, roads); if load > max_load { max_load = load; max_pair = ((d, s), (d2, s2)); } } } } let ((d1, s1), (d2, s2)) = max_pair; if self.roads_arriving[d1][s1].is_some() { roads[self.roads_arriving[d1][s1].unwrap()].enable(); } if self.roads_arriving[d2][s2].is_some() { roads[self.roads_arriving[d2][s2].unwrap()].enable(); } } /// Computes the load of a road, i.e. the numbers of cars on this road. /// If there is no car ready to cross, returns 0. fn compute_load(&self, direction: usize, side: usize, roads: &mut Vec<Road>) -> i32 { let r = self.roads_arriving[direction][side]; if r.is_none() ||!roads[r.unwrap()].is_waiting() { return 0; } return roads[r.unwrap()].get_car_count(); } } impl Network { /// Creates a new empty Network, with specified width and heights. pub fn new(width: usize, height: usize) -> Network { Network { width, height, car_count: 0, cars_per_unit: 10, cars_per_crossroad: 4, grid: none_array(height, width), roads: vec!(), graph: Graph::new(), car_graph: None, crossroads: vec!(), } } /// Adds a crossroad to specified location. pub fn add_crossroad(&mut self, x: usize, y: usize) { let c = CrossroadId::new(x, y); // We check the crossroad does not exist. self.assert_crossroad_not_exists(c); // We add it to the graph and update the network. self.grid[c] = Some(Crossroad::new(c, &mut self.graph)); self.crossroads.push(c); } /// Adds a new specific road. pub fn new_road(&mut self, src: CrossroadId, dest: CrossroadId, side: Side)
self.crossroad_mut(src).roads[d1][side] = Some(id); self.crossroad_mut(dest).roads_arriving[d1][side] = Some(id); // Then, it builds the two corresponding edges in the graph. let (n1, n2) = { let c = self.crossroad(src); (c.nodes[d1], c.nodes[previous_direction(d1)]) }; let n3 = self.crossroad(dest).nodes[d2]; self.graph.add_edge(n1, n3, id); self.graph.add_edge(n2, n3, id); } /// Add the two road linking the first crossroad to the second one. pub fn add_road(&mut self, (src_x, src_y): (usize, usize), (dest_x, dest_y): (usize, usize)) { let (src, dest) = (CrossroadId::new(src_x, src_y), CrossroadId::new(dest_x, dest_y)); // Checks the source and destination crossroads exist. self.assert_crossroad_exists(src); self.assert_crossroad_exists(dest); // Checks that they are aligned. let (dx, dy, length) = src.join(dest); // Checks that the road can be built between the two crossroads, i.e. that it does not // generate any collision. for k in 1..length { self.assert_crossroad_not_exists(src + (k*dx, k*dy)); } // Creates both roads. self.new_road(src, dest, LEFT); self.new_road(src, dest, RIGHT); } /// Adds all roads between the crossroads `c1` and `c2`. pub fn add_all_roads(&mut self, c1: (usize, usize), c2: (usize, usize)) { self.add_road(c1, c2); self.add_road(c2, c1); } /// Panics if the crossroad exists. pub fn assert_crossroad_exists(&self, c: CrossroadId) { if self.grid[c].is_none() { panic!("This crossroad {} does not exist.", c); } } /// Panics if the crossroad does not exist. pub fn assert_crossroad_not_exists(&self, c: CrossroadId) { if self.grid[c].is_some() { panic!("This crossroad {} already exists.", c); } } /// Retrieves the specified crossroad. Panics if it does not exist. pub fn crossroad(&self, c: CrossroadId) -> &Crossroad { self.grid[c].as_ref().unwrap() } /// Retrieves a mutable reference to the specified crossroad. Panics if it does not exist. pub fn crossroad_mut(&mut self, c: CrossroadId) -> &mut Crossroad { self.grid[c].as_mut().unwrap() } /// Creates a new car. It transfers the current graph to the car, with a fresh identifier. pub fn create_car(&mut self) -> Car { if self.car_graph.is_none() { // If needed, we generate this shared reference. self.car_graph = Some(Arc::new(self.clone_graph())); } let id = self.car_count; self.car_count += 1; Car::new(id, 0, CrossroadId::new(0, 0), self.car_graph.clone().unwrap()) } /// Spawns a car on a random road, and finds a random destination. pub fn generate_request(&mut self, id: CarId) -> (RoadInfo, usize, CrossroadId) { // First, it finds a road to spawn the car. let mut rng = rand::thread_rng(); let mut road_id = rng.gen_range(0, self.roads.len()); let mut pos = self.roads[road_id].spawn_car(id); while pos == -1 { road_id = rng.gen_range(0, self.roads.len()); pos = self.roads[road_id].spawn_car(id); } // Then, it gets the crossroad at the end of this road. let road_info = self.roads[road_id].info(); let source_c = road_info.end; // It randomly chooses a crossroad different from the previous crossroad. let mut destination = self.random_crossroad(); while destination == source_c { destination = self.random_crossroad(); } // Returns the final spawn position and destination. (road_info, pos as usize, destination) } /// Spawns all the car that requested to be. Updates the move vector with the resulting spawns. pub fn spawn_cars(&mut self, actions: Vec<Action>, moves: &mut Vec<Move>) { for (i, a) in actions.iter().enumerate() { if let Action::SPAWN = *a { let (road_info, pos, destination) = self.generate_request(i); moves[i] = Move::SPAWN(road_info, pos, destination); } } } /// Makes the crossroads enable some roads. pub fn enable_paths(&mut self) { for &c in &self.crossroads { self.grid[c].as_ref().unwrap().enable_path(&mut self.roads); } } /// Performs an update step on all roads, based on the Actions and Speeds vector. /// Updates the resulting Moves vector, and returns the EdgesWeight estimation. pub fn roads_step(&mut self, actions: &mut Vec<Action>, moves: &mut Vec<Move>, speeds: &Vec<Speed>) -> EdgesWeight { let roads = &mut self.roads; // All the possibles enabled paths are tried. for i in 0..roads.len() { // Each roads tries to make its first car cross, if enabled. Road::deliver(i, actions, moves, roads); } // We make a step for all remaining cars, and get the weights estimations. let mut weights = vec!(); for i in 0..roads.len() { weights.push(roads[i].step_forward(moves, speeds)); } let edges_weight = EdgesWeight::new(weights); return edges_weight } /// Returns the central reactive process of the network. pub fn process(mut self, central_signal: SPMCSignalSender<Arc<GlobalInfo>>, pos_signal: MPSCSignalReceiver<(CarId, (Action, Speed)), (Vec<Action>, Vec<Speed>)>) -> impl Process<Value=()> { let mut weights = vec!(); for r in &self.roads { weights.push(r.weight()); } let mut step = 0; let mut mean_moves = self.car_count as f32; let beta = 0.99; let cont = move | (mut actions, speeds): (Vec<Action>, Vec<Speed>) | { // We count the steps. step += 1; // We enable some path. self.enable_paths(); // We compute the road step and get back some weights. let mut moves = (0..actions.len()).map(|_| { Move::NONE }).collect(); let weights = self.roads_step(&mut actions, &mut moves, &speeds); // We spawn the cars that requested to be. self.spawn_cars(actions, &mut moves); // We count the number of cars that did something. let nb_moves: i32 = moves.iter().map(| m | { match m { &Move::NONE => 0, _ => 1, }}).sum(); // We keep some moving mean of this number. If it is too low, nothing is happening, so // it panics. mean_moves = beta * mean_moves + (1. - beta) * (nb_moves as f32); if mean_moves < 1e-3 { panic!("It looks like a stationary state: not enough moves."); } // Returns the updated information about the step. Arc::new(GlobalInfo { weights, moves }) }; let p = pos_signal.await_in() // Awaits the car actions .map(cont) // Computes the resulting moves and weights. .emit_consume(central_signal) // Emits this information. .loop_inf(); // Loops. return p; } /// Returns a String representing the network. pub fn to_string(&self) -> String { // We first build the corresponding char two-dimensional vector. let (width, height) = (2 * self.width - 1, 2 * self.height - 1); let mut char_map: Vec<Vec<char>> = (0..height).map(|_| { (0..width).map(|_| {'' }).collect()}).collect(); // Then we add the crossroads. for c in &self.crossroads { char_map[2 * c.y][2 * c.x] = 'C'; } // Then we add the roads. for r in &self.roads { let start = r.info().start; let (dx, dy, length) = start.join(r.info().end); // Chooses the right symbol. let c = if dx == 0 { '|' } else { '-' }; let (x, y) = (2*start.x, 2*start.y); for k in 1..(2*length) { char_map[(y as i32 + k * dy) as usize][(x as i32 + k * dx) as usize] = c; } } // We collect the characters into a string. char_map.into_iter().map(|line| { line.into_iter().collect::<String>().add("\n") }).collect() } /// Loads a network from a file located in trafficsim/maps/. pub fn load_file(&mut self, filename: &str) { let mut f = File::open(format!("./src/trafficsim/maps/{}", filename)).expect("File not found"); let mut contents = String::new(); f.read_to_string(&mut contents) .expect("Something went wrong reading the file"); self.load_string(&contents); } /// Loads a network from a string. pub fn load_string(&mut self, s: &str) { // We remove ending blank lines. let s = s.trim_right(); // We split lines and remove ending spaces and `\n`. let mut char_map: Vec<Vec<char>> = s.split("\n") .map(| line | { line.trim_right().chars().collect() }) .collect(); // We compute the resulting width and height of the character array. let width = char_map.iter().map(| line | { line.len() }).max().unwrap(); let height = char_map.len(); // We add missing spaces. for line in char_map.iter_mut() { for _ in 0..(width - line.len()) { line.push(' '); } } // We change the network size. *self = Network::new((width + 1) / 2, (height + 1) / 2); // Then, we add all the crossroads. for (j, line) in char_map.iter().enumerate() { for (i, c) in line.iter().enumerate() { if *c == 'C' { self.add_crossroad(i / 2, j / 2); } } } // Then we add the horizontal roads. for (j, line) in char_map.iter().enumerate() { let mut last_crossroad = None; let mut road_length = 0; for (i, c) in line.iter().enumerate() { if *c == 'C' { if last_crossroad.is_some() && road_length > 0 { self.add_all_roads(last_crossroad.unwrap(), (i / 2, j / 2)); } last_crossroad = Some((i / 2, j / 2)); road_length = 0; } else
{ // We get the parameters of the road. let (dx, dy, length) = src.join(dest); let length = length * self.cars_per_unit - self.cars_per_crossroad; let (d1, d2) = compute_directions(dx, dy, side); let id = self.roads.len(); // First, it builds the road in the network. let road_info = RoadInfo { id, start: src, end: dest, side, destination: self.crossroad(dest).nodes[d2], length: length as usize, }; // Then, we add it to the crossroads and the roads. let road = Road::new(road_info); self.roads.push(road);
identifier_body
lib.rs
//! A high-level API for programmatically interacting with web pages //! through WebDriver. //! //! [WebDriver protocol]: https://www.w3.org/TR/webdriver/ //! [CSS selectors]: https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Selectors //! [powerful]: https://developer.mozilla.org/en-US/docs/Web/CSS/Pseudo-classes //! [operators]: https://developer.mozilla.org/en-US/docs/Web/CSS/Attribute_selectors //! [WebDriver compatible]: https://github.com/Fyrd/caniuse/issues/2757#issuecomment-304529217 //! [`geckodriver`]: https://github.com/mozilla/geckodriver #[macro_use] extern crate error_chain; pub mod error; mod protocol; use crate::error::*; pub use hyper::Method; use protocol::Client; use serde_json::Value; use std::time::Duration; use tokio::time::sleep; use webdriver::{ command::{SwitchToFrameParameters, SwitchToWindowParameters, WebDriverCommand}, common::{FrameId, WebElement, ELEMENT_KEY}, error::{ErrorStatus, WebDriverError}, }; #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)] pub enum Locator { Css(String), LinkText(String), XPath(String), } impl Into<webdriver::command::LocatorParameters> for Locator { fn into(self) -> webdriver::command::LocatorParameters { match self { Locator::Css(s) => webdriver::command::LocatorParameters { using: webdriver::common::LocatorStrategy::CSSSelector, value: s, }, Locator::XPath(s) => webdriver::command::LocatorParameters { using: webdriver::common::LocatorStrategy::XPath, value: s, }, Locator::LinkText(s) => webdriver::command::LocatorParameters { using: webdriver::common::LocatorStrategy::LinkText, value: s, }, } } } pub struct Driver(Client); macro_rules! generate_wait_for_find { ($name:ident, $search_fn:ident, $return_typ:ty) => { /// Wait for the specified element(s) to appear on the page pub async fn $name( &self, search: Locator, root: Option<WebElement> ) -> Result<$return_typ> { loop { match self.$search_fn(search.clone(), root.clone()).await { Ok(e) => break Ok(e), Err(Error(ErrorKind::WebDriver( WebDriverError {error: ErrorStatus::NoSuchElement,..} ), _)) => sleep(Duration::from_millis(100)).await, Err(e) => break Err(e) } } } } } impl Driver { /// Create a new webdriver session on the specified server pub async fn new(webdriver_url: &str, user_agent: Option<String>) -> Result<Self> { Ok(Driver(Client::new(webdriver_url, user_agent).await?)) } /// Navigate directly to the given URL. pub async fn goto<'a>(&'a self, url: &'a str) -> Result<()> { let cmd = WebDriverCommand::Get(webdriver::command::GetParameters { url: self.current_url().await?.join(url)?.into(), }); self.0.issue_cmd(&cmd).await?; Ok(()) } /// Retrieve the currently active URL for this session. pub async fn current_url(&self) -> Result<url::Url> { match self.0.issue_cmd(&WebDriverCommand::GetCurrentUrl).await?.as_str() { Some(url) => Ok(url.parse()?), None => bail!(ErrorKind::NotW3C(Value::Null)), } } /// Get the HTML source for the current page. pub async fn source(&self) -> Result<String> { match self.0.issue_cmd(&WebDriverCommand::GetPageSource).await?.as_str() { Some(src) => Ok(src.to_string()), None => bail!(ErrorKind::NotW3C(Value::Null)), } } /// Go back to the previous page. pub async fn back(&self) -> Result<()> { self.0.issue_cmd(&WebDriverCommand::GoBack).await?; Ok(()) } /// Refresh the current previous page. pub async fn refresh(&self) -> Result<()> { self.0.issue_cmd(&WebDriverCommand::Refresh).await?; Ok(()) } /// Switch the focus to the frame contained in Element pub async fn switch_to_frame(&self, frame: WebElement) -> Result<()> { let p = SwitchToFrameParameters { id: Some(FrameId::Element(frame)), }; let cmd = WebDriverCommand::SwitchToFrame(p); self.0.issue_cmd(&cmd).await?; Ok(()) } /// Switch the focus to this frame's parent frame pub async fn switch_to_parent_frame(&self) -> Result<()> { self.0.issue_cmd(&WebDriverCommand::SwitchToParentFrame).await?; Ok(()) } /// Switch the focus to the window identified by handle pub async fn switch_to_window(&self, window: String) -> Result<()> { let p = SwitchToWindowParameters { handle: window }; let cmd = WebDriverCommand::SwitchToWindow(p); self.0.issue_cmd(&cmd).await?; Ok(()) } /// Execute the given JavaScript `script` in the current browser session. /// /// `args` is available to the script inside the `arguments` /// array. Since `Element` implements `ToJson`, you can also /// provide serialized `Element`s as arguments, and they will /// correctly serialize to DOM elements on the other side. pub async fn execute(&self, script: String, mut args: Vec<Value>) -> Result<Value> { self.fixup_elements(&mut args); let cmd = webdriver::command::JavascriptCommandParameters { script: script, args: Some(args), }; let cmd = WebDriverCommand::ExecuteScript(cmd); self.0.issue_cmd(&cmd).await } /// Wait for the page to navigate to a new URL before proceeding. /// /// If the `current` URL is not provided, `self.current_url()` /// will be used. Note however that this introduces a race /// condition: the browser could finish navigating *before* we /// call `current_url()`, which would lead to an eternal wait. pub async fn wait_for_navigation(&self, current: Option<url::Url>) -> Result<()> { let current = match current { Some(current) => current, None => self.current_url().await?, }; loop { if self.current_url().await?!= current { break Ok(()); } sleep(Duration::from_millis(100)).await } } /// Starting from the document root, find the first element on the page that /// matches the specified selector. pub async fn
( &self, locator: Locator, root: Option<WebElement>, ) -> Result<WebElement> { let cmd = match root { Option::None => WebDriverCommand::FindElement(locator.into()), Option::Some(elt) => { WebDriverCommand::FindElementElement(elt, locator.into()) } }; let res = self.0.issue_cmd(&cmd).await?; Ok(self.parse_lookup(res)?) } pub async fn find_all( &self, locator: Locator, root: Option<WebElement>, ) -> Result<Vec<WebElement>> { let cmd = match root { Option::None => WebDriverCommand::FindElements(locator.into()), Option::Some(elt) => { WebDriverCommand::FindElementElements(elt, locator.into()) } }; match self.0.issue_cmd(&cmd).await? { Value::Array(a) => Ok(a .into_iter() .map(|e| self.parse_lookup(e)) .collect::<Result<Vec<WebElement>>>()?), r => bail!(ErrorKind::NotW3C(r)), } } generate_wait_for_find!(wait_for_find, find, WebElement); generate_wait_for_find!(wait_for_find_all, find_all, Vec<WebElement>); /// Extract the `WebElement` from a `FindElement` or `FindElementElement` command. fn parse_lookup(&self, mut res: Value) -> Result<WebElement> { let key = if self.0.legacy { "ELEMENT" } else { ELEMENT_KEY }; let o = { if let Some(o) = res.as_object_mut() { o } else { bail!(ErrorKind::NotW3C(res)) } }; match o.remove(key) { None => bail!(ErrorKind::NotW3C(res)), Some(Value::String(wei)) => Ok(webdriver::common::WebElement(wei)), Some(v) => { o.insert(key.to_string(), v); bail!(ErrorKind::NotW3C(res)) } } } fn fixup_elements(&self, args: &mut [Value]) { if self.0.legacy { for arg in args { // the serialization of WebElement uses the W3C index, // but legacy implementations need us to use the "ELEMENT" index if let Value::Object(ref mut o) = *arg { if let Some(wei) = o.remove(ELEMENT_KEY) { o.insert("ELEMENT".to_string(), wei); } } } } } /// Look up an attribute value for this element by name. pub async fn attr( &self, eid: WebElement, attribute: String, ) -> Result<Option<String>> { let cmd = WebDriverCommand::GetElementAttribute(eid, attribute); match self.0.issue_cmd(&cmd).await? { Value::String(v) => Ok(Some(v)), Value::Null => Ok(None), v => bail!(ErrorKind::NotW3C(v)), } } /// Look up a DOM property for this element by name. pub async fn prop(&self, eid: WebElement, prop: String) -> Result<Option<String>> { let cmd = WebDriverCommand::GetElementProperty(eid, prop); match self.0.issue_cmd(&cmd).await? { Value::String(v) => Ok(Some(v)), Value::Null => Ok(None), v => bail!(ErrorKind::NotW3C(v)), } } /// Retrieve the text contents of this elment. pub async fn text(&self, eid: WebElement) -> Result<String> { let cmd = WebDriverCommand::GetElementText(eid); match self.0.issue_cmd(&cmd).await? { Value::String(v) => Ok(v), v => bail!(ErrorKind::NotW3C(v)), } } /// Retrieve the HTML contents of this element. if inner is true, /// also return the wrapping nodes html. Note: this is the same as /// calling `prop("innerHTML")` or `prop("outerHTML")`. pub async fn html(&self, eid: WebElement, inner: bool) -> Result<String> { let prop = if inner { "innerHTML" } else { "outerHTML" }; self.prop(eid, prop.to_owned()).await? .ok_or_else(|| Error::from(ErrorKind::NotW3C(Value::Null))) } /// Click on this element pub async fn click(&self, eid: WebElement) -> Result<()> { let cmd = WebDriverCommand::ElementClick(eid); let r = self.0.issue_cmd(&cmd).await?; if r.is_null() || r.as_object().map(|o| o.is_empty()).unwrap_or(false) { // geckodriver returns {} :( Ok(()) } else { bail!(ErrorKind::NotW3C(r)) } } /// Scroll this element into view pub async fn scroll_into_view(&self, eid: WebElement) -> Result<()> { let args = vec![serde_json::to_value(eid)?]; let js = "arguments[0].scrollIntoView(true)".to_string(); self.clone().execute(js, args).await?; Ok(()) } /// Follow the `href` target of the element matching the given CSS /// selector *without* causing a click interaction. pub async fn follow(&self, eid: WebElement) -> Result<()> { match self.clone().attr(eid.clone(), String::from("href")).await? { None => bail!("no href attribute"), Some(href) => { let current = self.current_url().await?.join(&href)?; self.goto(current.as_str()).await } } } /// Set the `value` of the input element named `name` which is a child of `eid` pub async fn set_by_name( &self, eid: WebElement, name: String, value: String, ) -> Result<()> { let locator = Locator::Css(format!("input[name='{}']", name)); let elt = self.clone().find(locator.into(), Some(eid)).await?; let args = { let mut a = vec![serde_json::to_value(elt)?, Value::String(value)]; self.fixup_elements(&mut a); a }; let js = "arguments[0].value = arguments[1]".to_string(); let res = self.clone().execute(js, args).await?; if res.is_null() { Ok(()) } else { bail!(ErrorKind::NotW3C(res)) } } /// Submit the form specified by `eid` with the first submit button pub async fn submit(&self, eid: WebElement) -> Result<()> { let l = Locator::Css("input[type=submit],button[type=submit]".into()); self.submit_with(eid, l).await } /// Submit the form `eid` using the button matched by the given selector. pub async fn submit_with(&self, eid: WebElement, button: Locator) -> Result<()> { let elt = self.clone().find(button.into(), Some(eid)).await?; Ok(self.clone().click(elt).await?) } /// Submit this form using the form submit button with the given /// label (case-insensitive). pub async fn submit_using(&self, eid: WebElement, button_label: String) -> Result<()> { let escaped = button_label.replace('\\', "\\\\").replace('"', "\\\""); let btn = format!( "input[type=submit][value=\"{}\" i],\ button[type=submit][value=\"{}\" i]", escaped, escaped ); Ok(self.submit_with(eid, Locator::Css(btn)).await?) } /// Submit this form directly, without clicking any buttons. /// /// This can be useful to bypass forms that perform various magic /// when the submit button is clicked, or that hijack click events /// altogether. /// /// Note that since no button is actually clicked, the /// `name=value` pair for the submit button will not be /// submitted. This can be circumvented by using `submit_sneaky` /// instead. pub async fn submit_direct(&self, eid: WebElement) -> Result<()> { // some sites are silly, and name their submit button // "submit". this ends up overwriting the "submit" function of // the form with a reference to the submit button itself, so // we can't call.submit(). we get around this by creating a // *new* form, and using *its* submit() handler but with this // pointed to the real form. solution from here: // https://stackoverflow.com/q/833032/472927#comment23038712_834197 let js = "document.createElement('form').submit.call(arguments[0])".to_string(); let args = { let mut a = vec![serde_json::to_value(eid)?]; self.fixup_elements(&mut a); a }; self.clone().execute(js, args).await?; Ok(()) } /// Submit this form directly, without clicking any buttons, and /// with an extra field. /// /// Like `submit_direct`, this method will submit this form /// without clicking a submit button. However, it will *also* /// inject a hidden input element on the page that carries the /// given `field=value` mapping. This allows you to emulate the /// form data as it would have been *if* the submit button was /// indeed clicked. pub async fn submit_sneaky( &self, eid: WebElement, field: String, value: String, ) -> Result<()> { let js = r#" var h = document.createElement('input'); h.setAttribute('type', 'hidden'); h.setAttribute('name', arguments[1]); h.value = arguments[2]; arguments[0].appendChild(h); "# .to_string(); let args = { let mut a = vec![ serde_json::to_value(eid)?, Value::String(field), Value::String(value), ]; self.fixup_elements(&mut a); a }; self.execute(js, args).await?; Ok(()) } }
find
identifier_name
lib.rs
//! A high-level API for programmatically interacting with web pages //! through WebDriver. //! //! [WebDriver protocol]: https://www.w3.org/TR/webdriver/ //! [CSS selectors]: https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Selectors //! [powerful]: https://developer.mozilla.org/en-US/docs/Web/CSS/Pseudo-classes //! [operators]: https://developer.mozilla.org/en-US/docs/Web/CSS/Attribute_selectors //! [WebDriver compatible]: https://github.com/Fyrd/caniuse/issues/2757#issuecomment-304529217 //! [`geckodriver`]: https://github.com/mozilla/geckodriver #[macro_use] extern crate error_chain; pub mod error; mod protocol; use crate::error::*; pub use hyper::Method; use protocol::Client; use serde_json::Value; use std::time::Duration; use tokio::time::sleep; use webdriver::{ command::{SwitchToFrameParameters, SwitchToWindowParameters, WebDriverCommand}, common::{FrameId, WebElement, ELEMENT_KEY}, error::{ErrorStatus, WebDriverError}, }; #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)] pub enum Locator { Css(String), LinkText(String), XPath(String), } impl Into<webdriver::command::LocatorParameters> for Locator { fn into(self) -> webdriver::command::LocatorParameters { match self { Locator::Css(s) => webdriver::command::LocatorParameters { using: webdriver::common::LocatorStrategy::CSSSelector, value: s, }, Locator::XPath(s) => webdriver::command::LocatorParameters { using: webdriver::common::LocatorStrategy::XPath, value: s, }, Locator::LinkText(s) => webdriver::command::LocatorParameters { using: webdriver::common::LocatorStrategy::LinkText, value: s, }, } } } pub struct Driver(Client); macro_rules! generate_wait_for_find {
/// Wait for the specified element(s) to appear on the page pub async fn $name( &self, search: Locator, root: Option<WebElement> ) -> Result<$return_typ> { loop { match self.$search_fn(search.clone(), root.clone()).await { Ok(e) => break Ok(e), Err(Error(ErrorKind::WebDriver( WebDriverError {error: ErrorStatus::NoSuchElement,..} ), _)) => sleep(Duration::from_millis(100)).await, Err(e) => break Err(e) } } } } } impl Driver { /// Create a new webdriver session on the specified server pub async fn new(webdriver_url: &str, user_agent: Option<String>) -> Result<Self> { Ok(Driver(Client::new(webdriver_url, user_agent).await?)) } /// Navigate directly to the given URL. pub async fn goto<'a>(&'a self, url: &'a str) -> Result<()> { let cmd = WebDriverCommand::Get(webdriver::command::GetParameters { url: self.current_url().await?.join(url)?.into(), }); self.0.issue_cmd(&cmd).await?; Ok(()) } /// Retrieve the currently active URL for this session. pub async fn current_url(&self) -> Result<url::Url> { match self.0.issue_cmd(&WebDriverCommand::GetCurrentUrl).await?.as_str() { Some(url) => Ok(url.parse()?), None => bail!(ErrorKind::NotW3C(Value::Null)), } } /// Get the HTML source for the current page. pub async fn source(&self) -> Result<String> { match self.0.issue_cmd(&WebDriverCommand::GetPageSource).await?.as_str() { Some(src) => Ok(src.to_string()), None => bail!(ErrorKind::NotW3C(Value::Null)), } } /// Go back to the previous page. pub async fn back(&self) -> Result<()> { self.0.issue_cmd(&WebDriverCommand::GoBack).await?; Ok(()) } /// Refresh the current previous page. pub async fn refresh(&self) -> Result<()> { self.0.issue_cmd(&WebDriverCommand::Refresh).await?; Ok(()) } /// Switch the focus to the frame contained in Element pub async fn switch_to_frame(&self, frame: WebElement) -> Result<()> { let p = SwitchToFrameParameters { id: Some(FrameId::Element(frame)), }; let cmd = WebDriverCommand::SwitchToFrame(p); self.0.issue_cmd(&cmd).await?; Ok(()) } /// Switch the focus to this frame's parent frame pub async fn switch_to_parent_frame(&self) -> Result<()> { self.0.issue_cmd(&WebDriverCommand::SwitchToParentFrame).await?; Ok(()) } /// Switch the focus to the window identified by handle pub async fn switch_to_window(&self, window: String) -> Result<()> { let p = SwitchToWindowParameters { handle: window }; let cmd = WebDriverCommand::SwitchToWindow(p); self.0.issue_cmd(&cmd).await?; Ok(()) } /// Execute the given JavaScript `script` in the current browser session. /// /// `args` is available to the script inside the `arguments` /// array. Since `Element` implements `ToJson`, you can also /// provide serialized `Element`s as arguments, and they will /// correctly serialize to DOM elements on the other side. pub async fn execute(&self, script: String, mut args: Vec<Value>) -> Result<Value> { self.fixup_elements(&mut args); let cmd = webdriver::command::JavascriptCommandParameters { script: script, args: Some(args), }; let cmd = WebDriverCommand::ExecuteScript(cmd); self.0.issue_cmd(&cmd).await } /// Wait for the page to navigate to a new URL before proceeding. /// /// If the `current` URL is not provided, `self.current_url()` /// will be used. Note however that this introduces a race /// condition: the browser could finish navigating *before* we /// call `current_url()`, which would lead to an eternal wait. pub async fn wait_for_navigation(&self, current: Option<url::Url>) -> Result<()> { let current = match current { Some(current) => current, None => self.current_url().await?, }; loop { if self.current_url().await?!= current { break Ok(()); } sleep(Duration::from_millis(100)).await } } /// Starting from the document root, find the first element on the page that /// matches the specified selector. pub async fn find( &self, locator: Locator, root: Option<WebElement>, ) -> Result<WebElement> { let cmd = match root { Option::None => WebDriverCommand::FindElement(locator.into()), Option::Some(elt) => { WebDriverCommand::FindElementElement(elt, locator.into()) } }; let res = self.0.issue_cmd(&cmd).await?; Ok(self.parse_lookup(res)?) } pub async fn find_all( &self, locator: Locator, root: Option<WebElement>, ) -> Result<Vec<WebElement>> { let cmd = match root { Option::None => WebDriverCommand::FindElements(locator.into()), Option::Some(elt) => { WebDriverCommand::FindElementElements(elt, locator.into()) } }; match self.0.issue_cmd(&cmd).await? { Value::Array(a) => Ok(a .into_iter() .map(|e| self.parse_lookup(e)) .collect::<Result<Vec<WebElement>>>()?), r => bail!(ErrorKind::NotW3C(r)), } } generate_wait_for_find!(wait_for_find, find, WebElement); generate_wait_for_find!(wait_for_find_all, find_all, Vec<WebElement>); /// Extract the `WebElement` from a `FindElement` or `FindElementElement` command. fn parse_lookup(&self, mut res: Value) -> Result<WebElement> { let key = if self.0.legacy { "ELEMENT" } else { ELEMENT_KEY }; let o = { if let Some(o) = res.as_object_mut() { o } else { bail!(ErrorKind::NotW3C(res)) } }; match o.remove(key) { None => bail!(ErrorKind::NotW3C(res)), Some(Value::String(wei)) => Ok(webdriver::common::WebElement(wei)), Some(v) => { o.insert(key.to_string(), v); bail!(ErrorKind::NotW3C(res)) } } } fn fixup_elements(&self, args: &mut [Value]) { if self.0.legacy { for arg in args { // the serialization of WebElement uses the W3C index, // but legacy implementations need us to use the "ELEMENT" index if let Value::Object(ref mut o) = *arg { if let Some(wei) = o.remove(ELEMENT_KEY) { o.insert("ELEMENT".to_string(), wei); } } } } } /// Look up an attribute value for this element by name. pub async fn attr( &self, eid: WebElement, attribute: String, ) -> Result<Option<String>> { let cmd = WebDriverCommand::GetElementAttribute(eid, attribute); match self.0.issue_cmd(&cmd).await? { Value::String(v) => Ok(Some(v)), Value::Null => Ok(None), v => bail!(ErrorKind::NotW3C(v)), } } /// Look up a DOM property for this element by name. pub async fn prop(&self, eid: WebElement, prop: String) -> Result<Option<String>> { let cmd = WebDriverCommand::GetElementProperty(eid, prop); match self.0.issue_cmd(&cmd).await? { Value::String(v) => Ok(Some(v)), Value::Null => Ok(None), v => bail!(ErrorKind::NotW3C(v)), } } /// Retrieve the text contents of this elment. pub async fn text(&self, eid: WebElement) -> Result<String> { let cmd = WebDriverCommand::GetElementText(eid); match self.0.issue_cmd(&cmd).await? { Value::String(v) => Ok(v), v => bail!(ErrorKind::NotW3C(v)), } } /// Retrieve the HTML contents of this element. if inner is true, /// also return the wrapping nodes html. Note: this is the same as /// calling `prop("innerHTML")` or `prop("outerHTML")`. pub async fn html(&self, eid: WebElement, inner: bool) -> Result<String> { let prop = if inner { "innerHTML" } else { "outerHTML" }; self.prop(eid, prop.to_owned()).await? .ok_or_else(|| Error::from(ErrorKind::NotW3C(Value::Null))) } /// Click on this element pub async fn click(&self, eid: WebElement) -> Result<()> { let cmd = WebDriverCommand::ElementClick(eid); let r = self.0.issue_cmd(&cmd).await?; if r.is_null() || r.as_object().map(|o| o.is_empty()).unwrap_or(false) { // geckodriver returns {} :( Ok(()) } else { bail!(ErrorKind::NotW3C(r)) } } /// Scroll this element into view pub async fn scroll_into_view(&self, eid: WebElement) -> Result<()> { let args = vec![serde_json::to_value(eid)?]; let js = "arguments[0].scrollIntoView(true)".to_string(); self.clone().execute(js, args).await?; Ok(()) } /// Follow the `href` target of the element matching the given CSS /// selector *without* causing a click interaction. pub async fn follow(&self, eid: WebElement) -> Result<()> { match self.clone().attr(eid.clone(), String::from("href")).await? { None => bail!("no href attribute"), Some(href) => { let current = self.current_url().await?.join(&href)?; self.goto(current.as_str()).await } } } /// Set the `value` of the input element named `name` which is a child of `eid` pub async fn set_by_name( &self, eid: WebElement, name: String, value: String, ) -> Result<()> { let locator = Locator::Css(format!("input[name='{}']", name)); let elt = self.clone().find(locator.into(), Some(eid)).await?; let args = { let mut a = vec![serde_json::to_value(elt)?, Value::String(value)]; self.fixup_elements(&mut a); a }; let js = "arguments[0].value = arguments[1]".to_string(); let res = self.clone().execute(js, args).await?; if res.is_null() { Ok(()) } else { bail!(ErrorKind::NotW3C(res)) } } /// Submit the form specified by `eid` with the first submit button pub async fn submit(&self, eid: WebElement) -> Result<()> { let l = Locator::Css("input[type=submit],button[type=submit]".into()); self.submit_with(eid, l).await } /// Submit the form `eid` using the button matched by the given selector. pub async fn submit_with(&self, eid: WebElement, button: Locator) -> Result<()> { let elt = self.clone().find(button.into(), Some(eid)).await?; Ok(self.clone().click(elt).await?) } /// Submit this form using the form submit button with the given /// label (case-insensitive). pub async fn submit_using(&self, eid: WebElement, button_label: String) -> Result<()> { let escaped = button_label.replace('\\', "\\\\").replace('"', "\\\""); let btn = format!( "input[type=submit][value=\"{}\" i],\ button[type=submit][value=\"{}\" i]", escaped, escaped ); Ok(self.submit_with(eid, Locator::Css(btn)).await?) } /// Submit this form directly, without clicking any buttons. /// /// This can be useful to bypass forms that perform various magic /// when the submit button is clicked, or that hijack click events /// altogether. /// /// Note that since no button is actually clicked, the /// `name=value` pair for the submit button will not be /// submitted. This can be circumvented by using `submit_sneaky` /// instead. pub async fn submit_direct(&self, eid: WebElement) -> Result<()> { // some sites are silly, and name their submit button // "submit". this ends up overwriting the "submit" function of // the form with a reference to the submit button itself, so // we can't call.submit(). we get around this by creating a // *new* form, and using *its* submit() handler but with this // pointed to the real form. solution from here: // https://stackoverflow.com/q/833032/472927#comment23038712_834197 let js = "document.createElement('form').submit.call(arguments[0])".to_string(); let args = { let mut a = vec![serde_json::to_value(eid)?]; self.fixup_elements(&mut a); a }; self.clone().execute(js, args).await?; Ok(()) } /// Submit this form directly, without clicking any buttons, and /// with an extra field. /// /// Like `submit_direct`, this method will submit this form /// without clicking a submit button. However, it will *also* /// inject a hidden input element on the page that carries the /// given `field=value` mapping. This allows you to emulate the /// form data as it would have been *if* the submit button was /// indeed clicked. pub async fn submit_sneaky( &self, eid: WebElement, field: String, value: String, ) -> Result<()> { let js = r#" var h = document.createElement('input'); h.setAttribute('type', 'hidden'); h.setAttribute('name', arguments[1]); h.value = arguments[2]; arguments[0].appendChild(h); "# .to_string(); let args = { let mut a = vec![ serde_json::to_value(eid)?, Value::String(field), Value::String(value), ]; self.fixup_elements(&mut a); a }; self.execute(js, args).await?; Ok(()) } }
($name:ident, $search_fn:ident, $return_typ:ty) => {
random_line_split
lib.rs
//! A high-level API for programmatically interacting with web pages //! through WebDriver. //! //! [WebDriver protocol]: https://www.w3.org/TR/webdriver/ //! [CSS selectors]: https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Selectors //! [powerful]: https://developer.mozilla.org/en-US/docs/Web/CSS/Pseudo-classes //! [operators]: https://developer.mozilla.org/en-US/docs/Web/CSS/Attribute_selectors //! [WebDriver compatible]: https://github.com/Fyrd/caniuse/issues/2757#issuecomment-304529217 //! [`geckodriver`]: https://github.com/mozilla/geckodriver #[macro_use] extern crate error_chain; pub mod error; mod protocol; use crate::error::*; pub use hyper::Method; use protocol::Client; use serde_json::Value; use std::time::Duration; use tokio::time::sleep; use webdriver::{ command::{SwitchToFrameParameters, SwitchToWindowParameters, WebDriverCommand}, common::{FrameId, WebElement, ELEMENT_KEY}, error::{ErrorStatus, WebDriverError}, }; #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)] pub enum Locator { Css(String), LinkText(String), XPath(String), } impl Into<webdriver::command::LocatorParameters> for Locator { fn into(self) -> webdriver::command::LocatorParameters { match self { Locator::Css(s) => webdriver::command::LocatorParameters { using: webdriver::common::LocatorStrategy::CSSSelector, value: s, }, Locator::XPath(s) => webdriver::command::LocatorParameters { using: webdriver::common::LocatorStrategy::XPath, value: s, }, Locator::LinkText(s) => webdriver::command::LocatorParameters { using: webdriver::common::LocatorStrategy::LinkText, value: s, }, } } } pub struct Driver(Client); macro_rules! generate_wait_for_find { ($name:ident, $search_fn:ident, $return_typ:ty) => { /// Wait for the specified element(s) to appear on the page pub async fn $name( &self, search: Locator, root: Option<WebElement> ) -> Result<$return_typ> { loop { match self.$search_fn(search.clone(), root.clone()).await { Ok(e) => break Ok(e), Err(Error(ErrorKind::WebDriver( WebDriverError {error: ErrorStatus::NoSuchElement,..} ), _)) => sleep(Duration::from_millis(100)).await, Err(e) => break Err(e) } } } } } impl Driver { /// Create a new webdriver session on the specified server pub async fn new(webdriver_url: &str, user_agent: Option<String>) -> Result<Self> { Ok(Driver(Client::new(webdriver_url, user_agent).await?)) } /// Navigate directly to the given URL. pub async fn goto<'a>(&'a self, url: &'a str) -> Result<()> { let cmd = WebDriverCommand::Get(webdriver::command::GetParameters { url: self.current_url().await?.join(url)?.into(), }); self.0.issue_cmd(&cmd).await?; Ok(()) } /// Retrieve the currently active URL for this session. pub async fn current_url(&self) -> Result<url::Url> { match self.0.issue_cmd(&WebDriverCommand::GetCurrentUrl).await?.as_str() { Some(url) => Ok(url.parse()?), None => bail!(ErrorKind::NotW3C(Value::Null)), } } /// Get the HTML source for the current page. pub async fn source(&self) -> Result<String> { match self.0.issue_cmd(&WebDriverCommand::GetPageSource).await?.as_str() { Some(src) => Ok(src.to_string()), None => bail!(ErrorKind::NotW3C(Value::Null)), } } /// Go back to the previous page. pub async fn back(&self) -> Result<()> { self.0.issue_cmd(&WebDriverCommand::GoBack).await?; Ok(()) } /// Refresh the current previous page. pub async fn refresh(&self) -> Result<()> { self.0.issue_cmd(&WebDriverCommand::Refresh).await?; Ok(()) } /// Switch the focus to the frame contained in Element pub async fn switch_to_frame(&self, frame: WebElement) -> Result<()> { let p = SwitchToFrameParameters { id: Some(FrameId::Element(frame)), }; let cmd = WebDriverCommand::SwitchToFrame(p); self.0.issue_cmd(&cmd).await?; Ok(()) } /// Switch the focus to this frame's parent frame pub async fn switch_to_parent_frame(&self) -> Result<()> { self.0.issue_cmd(&WebDriverCommand::SwitchToParentFrame).await?; Ok(()) } /// Switch the focus to the window identified by handle pub async fn switch_to_window(&self, window: String) -> Result<()> { let p = SwitchToWindowParameters { handle: window }; let cmd = WebDriverCommand::SwitchToWindow(p); self.0.issue_cmd(&cmd).await?; Ok(()) } /// Execute the given JavaScript `script` in the current browser session. /// /// `args` is available to the script inside the `arguments` /// array. Since `Element` implements `ToJson`, you can also /// provide serialized `Element`s as arguments, and they will /// correctly serialize to DOM elements on the other side. pub async fn execute(&self, script: String, mut args: Vec<Value>) -> Result<Value> { self.fixup_elements(&mut args); let cmd = webdriver::command::JavascriptCommandParameters { script: script, args: Some(args), }; let cmd = WebDriverCommand::ExecuteScript(cmd); self.0.issue_cmd(&cmd).await } /// Wait for the page to navigate to a new URL before proceeding. /// /// If the `current` URL is not provided, `self.current_url()` /// will be used. Note however that this introduces a race /// condition: the browser could finish navigating *before* we /// call `current_url()`, which would lead to an eternal wait. pub async fn wait_for_navigation(&self, current: Option<url::Url>) -> Result<()> { let current = match current { Some(current) => current, None => self.current_url().await?, }; loop { if self.current_url().await?!= current { break Ok(()); } sleep(Duration::from_millis(100)).await } } /// Starting from the document root, find the first element on the page that /// matches the specified selector. pub async fn find( &self, locator: Locator, root: Option<WebElement>, ) -> Result<WebElement> { let cmd = match root { Option::None => WebDriverCommand::FindElement(locator.into()), Option::Some(elt) => { WebDriverCommand::FindElementElement(elt, locator.into()) } }; let res = self.0.issue_cmd(&cmd).await?; Ok(self.parse_lookup(res)?) } pub async fn find_all( &self, locator: Locator, root: Option<WebElement>, ) -> Result<Vec<WebElement>> { let cmd = match root { Option::None => WebDriverCommand::FindElements(locator.into()), Option::Some(elt) => { WebDriverCommand::FindElementElements(elt, locator.into()) } }; match self.0.issue_cmd(&cmd).await? { Value::Array(a) => Ok(a .into_iter() .map(|e| self.parse_lookup(e)) .collect::<Result<Vec<WebElement>>>()?), r => bail!(ErrorKind::NotW3C(r)), } } generate_wait_for_find!(wait_for_find, find, WebElement); generate_wait_for_find!(wait_for_find_all, find_all, Vec<WebElement>); /// Extract the `WebElement` from a `FindElement` or `FindElementElement` command. fn parse_lookup(&self, mut res: Value) -> Result<WebElement> { let key = if self.0.legacy { "ELEMENT" } else { ELEMENT_KEY }; let o = { if let Some(o) = res.as_object_mut() { o } else { bail!(ErrorKind::NotW3C(res)) } }; match o.remove(key) { None => bail!(ErrorKind::NotW3C(res)), Some(Value::String(wei)) => Ok(webdriver::common::WebElement(wei)), Some(v) => { o.insert(key.to_string(), v); bail!(ErrorKind::NotW3C(res)) } } } fn fixup_elements(&self, args: &mut [Value]) { if self.0.legacy { for arg in args { // the serialization of WebElement uses the W3C index, // but legacy implementations need us to use the "ELEMENT" index if let Value::Object(ref mut o) = *arg { if let Some(wei) = o.remove(ELEMENT_KEY) { o.insert("ELEMENT".to_string(), wei); } } } } } /// Look up an attribute value for this element by name. pub async fn attr( &self, eid: WebElement, attribute: String, ) -> Result<Option<String>> { let cmd = WebDriverCommand::GetElementAttribute(eid, attribute); match self.0.issue_cmd(&cmd).await? { Value::String(v) => Ok(Some(v)), Value::Null => Ok(None), v => bail!(ErrorKind::NotW3C(v)), } } /// Look up a DOM property for this element by name. pub async fn prop(&self, eid: WebElement, prop: String) -> Result<Option<String>> { let cmd = WebDriverCommand::GetElementProperty(eid, prop); match self.0.issue_cmd(&cmd).await? { Value::String(v) => Ok(Some(v)), Value::Null => Ok(None), v => bail!(ErrorKind::NotW3C(v)), } } /// Retrieve the text contents of this elment. pub async fn text(&self, eid: WebElement) -> Result<String> { let cmd = WebDriverCommand::GetElementText(eid); match self.0.issue_cmd(&cmd).await? { Value::String(v) => Ok(v), v => bail!(ErrorKind::NotW3C(v)), } } /// Retrieve the HTML contents of this element. if inner is true, /// also return the wrapping nodes html. Note: this is the same as /// calling `prop("innerHTML")` or `prop("outerHTML")`. pub async fn html(&self, eid: WebElement, inner: bool) -> Result<String> { let prop = if inner { "innerHTML" } else { "outerHTML" }; self.prop(eid, prop.to_owned()).await? .ok_or_else(|| Error::from(ErrorKind::NotW3C(Value::Null))) } /// Click on this element pub async fn click(&self, eid: WebElement) -> Result<()> { let cmd = WebDriverCommand::ElementClick(eid); let r = self.0.issue_cmd(&cmd).await?; if r.is_null() || r.as_object().map(|o| o.is_empty()).unwrap_or(false) { // geckodriver returns {} :( Ok(()) } else { bail!(ErrorKind::NotW3C(r)) } } /// Scroll this element into view pub async fn scroll_into_view(&self, eid: WebElement) -> Result<()> { let args = vec![serde_json::to_value(eid)?]; let js = "arguments[0].scrollIntoView(true)".to_string(); self.clone().execute(js, args).await?; Ok(()) } /// Follow the `href` target of the element matching the given CSS /// selector *without* causing a click interaction. pub async fn follow(&self, eid: WebElement) -> Result<()>
/// Set the `value` of the input element named `name` which is a child of `eid` pub async fn set_by_name( &self, eid: WebElement, name: String, value: String, ) -> Result<()> { let locator = Locator::Css(format!("input[name='{}']", name)); let elt = self.clone().find(locator.into(), Some(eid)).await?; let args = { let mut a = vec![serde_json::to_value(elt)?, Value::String(value)]; self.fixup_elements(&mut a); a }; let js = "arguments[0].value = arguments[1]".to_string(); let res = self.clone().execute(js, args).await?; if res.is_null() { Ok(()) } else { bail!(ErrorKind::NotW3C(res)) } } /// Submit the form specified by `eid` with the first submit button pub async fn submit(&self, eid: WebElement) -> Result<()> { let l = Locator::Css("input[type=submit],button[type=submit]".into()); self.submit_with(eid, l).await } /// Submit the form `eid` using the button matched by the given selector. pub async fn submit_with(&self, eid: WebElement, button: Locator) -> Result<()> { let elt = self.clone().find(button.into(), Some(eid)).await?; Ok(self.clone().click(elt).await?) } /// Submit this form using the form submit button with the given /// label (case-insensitive). pub async fn submit_using(&self, eid: WebElement, button_label: String) -> Result<()> { let escaped = button_label.replace('\\', "\\\\").replace('"', "\\\""); let btn = format!( "input[type=submit][value=\"{}\" i],\ button[type=submit][value=\"{}\" i]", escaped, escaped ); Ok(self.submit_with(eid, Locator::Css(btn)).await?) } /// Submit this form directly, without clicking any buttons. /// /// This can be useful to bypass forms that perform various magic /// when the submit button is clicked, or that hijack click events /// altogether. /// /// Note that since no button is actually clicked, the /// `name=value` pair for the submit button will not be /// submitted. This can be circumvented by using `submit_sneaky` /// instead. pub async fn submit_direct(&self, eid: WebElement) -> Result<()> { // some sites are silly, and name their submit button // "submit". this ends up overwriting the "submit" function of // the form with a reference to the submit button itself, so // we can't call.submit(). we get around this by creating a // *new* form, and using *its* submit() handler but with this // pointed to the real form. solution from here: // https://stackoverflow.com/q/833032/472927#comment23038712_834197 let js = "document.createElement('form').submit.call(arguments[0])".to_string(); let args = { let mut a = vec![serde_json::to_value(eid)?]; self.fixup_elements(&mut a); a }; self.clone().execute(js, args).await?; Ok(()) } /// Submit this form directly, without clicking any buttons, and /// with an extra field. /// /// Like `submit_direct`, this method will submit this form /// without clicking a submit button. However, it will *also* /// inject a hidden input element on the page that carries the /// given `field=value` mapping. This allows you to emulate the /// form data as it would have been *if* the submit button was /// indeed clicked. pub async fn submit_sneaky( &self, eid: WebElement, field: String, value: String, ) -> Result<()> { let js = r#" var h = document.createElement('input'); h.setAttribute('type', 'hidden'); h.setAttribute('name', arguments[1]); h.value = arguments[2]; arguments[0].appendChild(h); "# .to_string(); let args = { let mut a = vec![ serde_json::to_value(eid)?, Value::String(field), Value::String(value), ]; self.fixup_elements(&mut a); a }; self.execute(js, args).await?; Ok(()) } }
{ match self.clone().attr(eid.clone(), String::from("href")).await? { None => bail!("no href attribute"), Some(href) => { let current = self.current_url().await?.join(&href)?; self.goto(current.as_str()).await } } }
identifier_body
lib.rs
//! Brainfuck interpreter types //! //! This crate contains all the data types necessary for the Brainfuck //! interpreter project. #![deny(missing_docs)] use std::fmt; use std::io; use std::path::{Path, PathBuf}; use thiserror::Error; /// Represents a Brainfuck Types Error. #[derive(Error, fmt::Debug)] pub enum BrainfuckTypesError { /// When an unmatched left or right bracket is found #[error("unmatched bracket, {0:?}")] UnmatchedBracket(BrainfuckInstr), } /// Represents the eight raw Brainfuck instructions. #[derive(Debug, PartialEq, Copy, Clone)] pub enum BrainfuckInstrRaw { /// Increment (increase by one) the byte at the data pointer Increment, /// Decrement (decrease by one) the byte at the data pointer Decrement, /// Increment the data pointer (to point to the next cell to the right) MoveHeadLeft, /// Decrement the data pointer (to point to the next cell to the left) MoveHeadRight, /// If the byte at the data pointer is zero, then instead of moving the /// instruction pointer forward to the next command, jump it forward to the /// command after the matching ] command. WhileStart, /// If the byte at the data pointer is nonzero, then instead of moving the /// instruction pointer forward to the next command, jump it back to the /// command after the matching [ command. WhileEnd, /// Accept one byte of input, storing its value in the byte at the data pointer CellRead, /// Output the byte at the data pointer CellWrite, } impl BrainfuckInstrRaw { /// Returns a BrainfuckInstrRaw from the given character. fn from_byte(c: u8) -> Option<BrainfuckInstrRaw> { match c { b'+' => Some(BrainfuckInstrRaw::Increment), b'-' => Some(BrainfuckInstrRaw::Decrement), b'<' => Some(BrainfuckInstrRaw::MoveHeadLeft), b'>' => Some(BrainfuckInstrRaw::MoveHeadRight), b'[' => Some(BrainfuckInstrRaw::WhileStart), b']' => Some(BrainfuckInstrRaw::WhileEnd), b',' => Some(BrainfuckInstrRaw::CellRead), b'.' => Some(BrainfuckInstrRaw::CellWrite), _ => None, } } } /// Represents the raw Brainfuck instruction and where it is in the file. #[derive(Debug, Copy, Clone)] pub struct BrainfuckInstr { /// The raw brainfuck instruction instr: BrainfuckInstrRaw, /// The line number, starting from 1 for humans line: usize, /// The column number, starting from 1 for humans column: usize, } impl BrainfuckInstr { /// Returns a vector of BrainfuckInstr's, parsed from the given string slice. /// /// # Example /// ``` /// # use bft_types::{BrainfuckInstr, BrainfuckInstrRaw}; /// let bf = BrainfuckInstr::instrs_from_str("<>"); /// /// assert_eq!(bf[0].line(), 1); /// assert_eq!(bf[0].column(), 1); /// /// assert_eq!(bf[1].line(), 1); /// assert_eq!(bf[1].column(), 2); /// ``` pub fn
(s: &str) -> Vec<Self> { let mut instrs: Vec<BrainfuckInstr> = Vec::new(); for (l, pline) in s.lines().enumerate() { for (c, pbyte) in pline.bytes().enumerate() { if let Some(iraw) = BrainfuckInstrRaw::from_byte(pbyte) { instrs.push(BrainfuckInstr { instr: iraw, line: l + 1, column: c + 1, }); } } } instrs } /// Returns the Brainfuck instruction line number pub fn line(&self) -> usize { self.line } /// Returns the Brainfuck instruction column pub fn column(&self) -> usize { self.column } /// Returns a borrow of the raw Brainfuck instruction. pub fn instr(&self) -> &BrainfuckInstrRaw { &self.instr } } impl fmt::Display for BrainfuckInstr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let text = match self.instr { BrainfuckInstrRaw::Increment => "Increment byte at data pointer", BrainfuckInstrRaw::Decrement => "Decrement byte at data pointer", BrainfuckInstrRaw::MoveHeadLeft => "Decrement data pointer", BrainfuckInstrRaw::MoveHeadRight => "Increment data pointer", BrainfuckInstrRaw::WhileStart => "Start looping", BrainfuckInstrRaw::WhileEnd => "End looping", BrainfuckInstrRaw::CellRead => "Input byte at the data pointer", BrainfuckInstrRaw::CellWrite => "Output byte at data pointer", }; write!(f, "{}", text) } } /// Represents an entire Brainfuck program, which is a Path and a series of /// instructions. #[derive(Debug)] pub struct BrainfuckProg { /// The path to the Brainfuck program. path: PathBuf, /// A series of BrainfuckInstr. instrs: Vec<BrainfuckInstr>, } impl BrainfuckProg { /// Instantiate a new BrainfuckProg with the given content and associate it /// with the given path. /// /// It is implemented like this so that we don't have to re-open a file if /// it is already open. See also from_file. /// /// # Example /// ``` /// # use bft_types::BrainfuckProg; /// # use std::path::Path; /// let bf = BrainfuckProg::new(Path::new("path/to/prog.bf"), "<>[]"); /// ``` pub fn new<P: AsRef<Path>>(path: P, content: &str) -> Self { Self { path: path.as_ref().to_path_buf(), instrs: BrainfuckInstr::instrs_from_str(content), } } /// Returns a new instance of BrainfuckProg, parsed from the file located at /// the given Path-like reference. /// /// # Example /// ```no_run /// # use bft_types::BrainfuckProg; /// # use std::path::Path; /// let bf = BrainfuckProg::from_file(Path::new("path/to/prog.bf")); /// ``` pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> { let content = std::fs::read_to_string(&path)?; Ok(Self::new(path, content.as_str())) } /// Returns a reference to the BrainfuckProg's path. pub fn path(&self) -> &Path { self.path.as_path() } /// Returns a reference to the BrainfuckProg's instructions. pub fn instrs(&self) -> &[BrainfuckInstr] { &self.instrs[..] } /// Checks the program and returns the Result. pub fn check(&self) -> Result<(), BrainfuckTypesError> { self.check_brackets() } /// Checks the left and right brackets and returns the Result. fn check_brackets(&self) -> Result<(), BrainfuckTypesError> { let mut left_brackets: Vec<&BrainfuckInstr> = Vec::new(); // Collect left brackets and pop when we find matching right brackets. for bf_instr in &self.instrs { if bf_instr.instr == BrainfuckInstrRaw::WhileStart { left_brackets.push(&bf_instr); } else if bf_instr.instr == BrainfuckInstrRaw::WhileEnd { match left_brackets.pop() { Some(_) => (), None => return Err(BrainfuckTypesError::UnmatchedBracket(*bf_instr)), }; } } // Error if there are remaining unmatched left_brackets match left_brackets.iter().last() { Some(&b) => Err(BrainfuckTypesError::UnmatchedBracket(*b)), None => Ok(()), } } } #[cfg(test)] mod tests { use super::{BrainfuckInstrRaw, BrainfuckProg}; use std::path::Path; // Store the line and column struct Position { line: usize, column: usize, } // Some default sequence, which we can test against. const CORRECT_INSTRS: [BrainfuckInstrRaw; 8] = [ BrainfuckInstrRaw::MoveHeadLeft, BrainfuckInstrRaw::MoveHeadRight, BrainfuckInstrRaw::WhileStart, BrainfuckInstrRaw::WhileEnd, BrainfuckInstrRaw::Decrement, BrainfuckInstrRaw::Increment, BrainfuckInstrRaw::CellRead, BrainfuckInstrRaw::CellWrite, ]; #[test] fn test_program() { let fake_path = "path/to/file.bf"; let another_path = "path/to/somewhere/else.bf"; // Construct let b = BrainfuckProg::new(fake_path, "<>[]-+,."); // Check the path is stored correctly assert_eq!(Path::new(fake_path), b.path.as_path()); assert_ne!(Path::new(another_path), b.path.as_path()); // Check the program let p = b.instrs(); for (i, cinstr) in CORRECT_INSTRS.iter().enumerate() { assert_eq!(p[i].instr, *cinstr); assert_eq!(p[i].line(), 1); assert_eq!(p[i].column(), i + 1); } // Check the program backwards to verify BrainfuckInstrRaw PartialEq // actually fails when comparing two BrainfuckInstrRaw which are // different. // Note: This is pointless because we derrive PartialEq, if the standard // implementation is broken then something is very wrong... for (i, cinstr) in CORRECT_INSTRS.iter().rev().enumerate() { assert_ne!(p[i].instr, *cinstr); } } #[test] fn test_program_with_comments() { let prog_str = "this < is > a [ valid ]\n\ brainfuck - program +\n\ these, are. comments"; let correct_pos = [ Position { line: 1, column: 6 }, Position { line: 1, column: 11, }, Position { line: 1, column: 15, }, Position { line: 1, column: 23, }, Position { line: 2, column: 11, }, Position { line: 2, column: 21, }, Position { line: 3, column: 7 }, Position { line: 3, column: 13, }, ]; let b = BrainfuckProg::new("path/to/file.bf", prog_str); // Check the program let p = b.instrs(); for (i, cinstr) in CORRECT_INSTRS.iter().enumerate() { assert_eq!(p[i].instr, *cinstr); assert_eq!(p[i].line(), correct_pos[i].line); assert_eq!(p[i].column(), correct_pos[i].column); } } #[test] fn test_program_with_matched_brackets() { let fake_path = "path/to/file.bf"; let b = BrainfuckProg::new(fake_path, "<>[[[]-]+],."); assert!(b.check().is_ok()); } #[test] fn test_program_with_unmatched_brackets() { let fake_path = "path/to/file.bf"; let b1 = BrainfuckProg::new(fake_path, "<>[[]-+,."); assert!(b1.check().is_err()); let b2 = BrainfuckProg::new(fake_path, "<>[[]]]-+,."); assert!(b2.check().is_err()); } #[test] fn test_bad_path() { assert!(BrainfuckProg::from_file("/path/to/file.bf").is_err()); } }
instrs_from_str
identifier_name
lib.rs
//! Brainfuck interpreter types //! //! This crate contains all the data types necessary for the Brainfuck //! interpreter project. #![deny(missing_docs)]
use thiserror::Error; /// Represents a Brainfuck Types Error. #[derive(Error, fmt::Debug)] pub enum BrainfuckTypesError { /// When an unmatched left or right bracket is found #[error("unmatched bracket, {0:?}")] UnmatchedBracket(BrainfuckInstr), } /// Represents the eight raw Brainfuck instructions. #[derive(Debug, PartialEq, Copy, Clone)] pub enum BrainfuckInstrRaw { /// Increment (increase by one) the byte at the data pointer Increment, /// Decrement (decrease by one) the byte at the data pointer Decrement, /// Increment the data pointer (to point to the next cell to the right) MoveHeadLeft, /// Decrement the data pointer (to point to the next cell to the left) MoveHeadRight, /// If the byte at the data pointer is zero, then instead of moving the /// instruction pointer forward to the next command, jump it forward to the /// command after the matching ] command. WhileStart, /// If the byte at the data pointer is nonzero, then instead of moving the /// instruction pointer forward to the next command, jump it back to the /// command after the matching [ command. WhileEnd, /// Accept one byte of input, storing its value in the byte at the data pointer CellRead, /// Output the byte at the data pointer CellWrite, } impl BrainfuckInstrRaw { /// Returns a BrainfuckInstrRaw from the given character. fn from_byte(c: u8) -> Option<BrainfuckInstrRaw> { match c { b'+' => Some(BrainfuckInstrRaw::Increment), b'-' => Some(BrainfuckInstrRaw::Decrement), b'<' => Some(BrainfuckInstrRaw::MoveHeadLeft), b'>' => Some(BrainfuckInstrRaw::MoveHeadRight), b'[' => Some(BrainfuckInstrRaw::WhileStart), b']' => Some(BrainfuckInstrRaw::WhileEnd), b',' => Some(BrainfuckInstrRaw::CellRead), b'.' => Some(BrainfuckInstrRaw::CellWrite), _ => None, } } } /// Represents the raw Brainfuck instruction and where it is in the file. #[derive(Debug, Copy, Clone)] pub struct BrainfuckInstr { /// The raw brainfuck instruction instr: BrainfuckInstrRaw, /// The line number, starting from 1 for humans line: usize, /// The column number, starting from 1 for humans column: usize, } impl BrainfuckInstr { /// Returns a vector of BrainfuckInstr's, parsed from the given string slice. /// /// # Example /// ``` /// # use bft_types::{BrainfuckInstr, BrainfuckInstrRaw}; /// let bf = BrainfuckInstr::instrs_from_str("<>"); /// /// assert_eq!(bf[0].line(), 1); /// assert_eq!(bf[0].column(), 1); /// /// assert_eq!(bf[1].line(), 1); /// assert_eq!(bf[1].column(), 2); /// ``` pub fn instrs_from_str(s: &str) -> Vec<Self> { let mut instrs: Vec<BrainfuckInstr> = Vec::new(); for (l, pline) in s.lines().enumerate() { for (c, pbyte) in pline.bytes().enumerate() { if let Some(iraw) = BrainfuckInstrRaw::from_byte(pbyte) { instrs.push(BrainfuckInstr { instr: iraw, line: l + 1, column: c + 1, }); } } } instrs } /// Returns the Brainfuck instruction line number pub fn line(&self) -> usize { self.line } /// Returns the Brainfuck instruction column pub fn column(&self) -> usize { self.column } /// Returns a borrow of the raw Brainfuck instruction. pub fn instr(&self) -> &BrainfuckInstrRaw { &self.instr } } impl fmt::Display for BrainfuckInstr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let text = match self.instr { BrainfuckInstrRaw::Increment => "Increment byte at data pointer", BrainfuckInstrRaw::Decrement => "Decrement byte at data pointer", BrainfuckInstrRaw::MoveHeadLeft => "Decrement data pointer", BrainfuckInstrRaw::MoveHeadRight => "Increment data pointer", BrainfuckInstrRaw::WhileStart => "Start looping", BrainfuckInstrRaw::WhileEnd => "End looping", BrainfuckInstrRaw::CellRead => "Input byte at the data pointer", BrainfuckInstrRaw::CellWrite => "Output byte at data pointer", }; write!(f, "{}", text) } } /// Represents an entire Brainfuck program, which is a Path and a series of /// instructions. #[derive(Debug)] pub struct BrainfuckProg { /// The path to the Brainfuck program. path: PathBuf, /// A series of BrainfuckInstr. instrs: Vec<BrainfuckInstr>, } impl BrainfuckProg { /// Instantiate a new BrainfuckProg with the given content and associate it /// with the given path. /// /// It is implemented like this so that we don't have to re-open a file if /// it is already open. See also from_file. /// /// # Example /// ``` /// # use bft_types::BrainfuckProg; /// # use std::path::Path; /// let bf = BrainfuckProg::new(Path::new("path/to/prog.bf"), "<>[]"); /// ``` pub fn new<P: AsRef<Path>>(path: P, content: &str) -> Self { Self { path: path.as_ref().to_path_buf(), instrs: BrainfuckInstr::instrs_from_str(content), } } /// Returns a new instance of BrainfuckProg, parsed from the file located at /// the given Path-like reference. /// /// # Example /// ```no_run /// # use bft_types::BrainfuckProg; /// # use std::path::Path; /// let bf = BrainfuckProg::from_file(Path::new("path/to/prog.bf")); /// ``` pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> { let content = std::fs::read_to_string(&path)?; Ok(Self::new(path, content.as_str())) } /// Returns a reference to the BrainfuckProg's path. pub fn path(&self) -> &Path { self.path.as_path() } /// Returns a reference to the BrainfuckProg's instructions. pub fn instrs(&self) -> &[BrainfuckInstr] { &self.instrs[..] } /// Checks the program and returns the Result. pub fn check(&self) -> Result<(), BrainfuckTypesError> { self.check_brackets() } /// Checks the left and right brackets and returns the Result. fn check_brackets(&self) -> Result<(), BrainfuckTypesError> { let mut left_brackets: Vec<&BrainfuckInstr> = Vec::new(); // Collect left brackets and pop when we find matching right brackets. for bf_instr in &self.instrs { if bf_instr.instr == BrainfuckInstrRaw::WhileStart { left_brackets.push(&bf_instr); } else if bf_instr.instr == BrainfuckInstrRaw::WhileEnd { match left_brackets.pop() { Some(_) => (), None => return Err(BrainfuckTypesError::UnmatchedBracket(*bf_instr)), }; } } // Error if there are remaining unmatched left_brackets match left_brackets.iter().last() { Some(&b) => Err(BrainfuckTypesError::UnmatchedBracket(*b)), None => Ok(()), } } } #[cfg(test)] mod tests { use super::{BrainfuckInstrRaw, BrainfuckProg}; use std::path::Path; // Store the line and column struct Position { line: usize, column: usize, } // Some default sequence, which we can test against. const CORRECT_INSTRS: [BrainfuckInstrRaw; 8] = [ BrainfuckInstrRaw::MoveHeadLeft, BrainfuckInstrRaw::MoveHeadRight, BrainfuckInstrRaw::WhileStart, BrainfuckInstrRaw::WhileEnd, BrainfuckInstrRaw::Decrement, BrainfuckInstrRaw::Increment, BrainfuckInstrRaw::CellRead, BrainfuckInstrRaw::CellWrite, ]; #[test] fn test_program() { let fake_path = "path/to/file.bf"; let another_path = "path/to/somewhere/else.bf"; // Construct let b = BrainfuckProg::new(fake_path, "<>[]-+,."); // Check the path is stored correctly assert_eq!(Path::new(fake_path), b.path.as_path()); assert_ne!(Path::new(another_path), b.path.as_path()); // Check the program let p = b.instrs(); for (i, cinstr) in CORRECT_INSTRS.iter().enumerate() { assert_eq!(p[i].instr, *cinstr); assert_eq!(p[i].line(), 1); assert_eq!(p[i].column(), i + 1); } // Check the program backwards to verify BrainfuckInstrRaw PartialEq // actually fails when comparing two BrainfuckInstrRaw which are // different. // Note: This is pointless because we derrive PartialEq, if the standard // implementation is broken then something is very wrong... for (i, cinstr) in CORRECT_INSTRS.iter().rev().enumerate() { assert_ne!(p[i].instr, *cinstr); } } #[test] fn test_program_with_comments() { let prog_str = "this < is > a [ valid ]\n\ brainfuck - program +\n\ these, are. comments"; let correct_pos = [ Position { line: 1, column: 6 }, Position { line: 1, column: 11, }, Position { line: 1, column: 15, }, Position { line: 1, column: 23, }, Position { line: 2, column: 11, }, Position { line: 2, column: 21, }, Position { line: 3, column: 7 }, Position { line: 3, column: 13, }, ]; let b = BrainfuckProg::new("path/to/file.bf", prog_str); // Check the program let p = b.instrs(); for (i, cinstr) in CORRECT_INSTRS.iter().enumerate() { assert_eq!(p[i].instr, *cinstr); assert_eq!(p[i].line(), correct_pos[i].line); assert_eq!(p[i].column(), correct_pos[i].column); } } #[test] fn test_program_with_matched_brackets() { let fake_path = "path/to/file.bf"; let b = BrainfuckProg::new(fake_path, "<>[[[]-]+],."); assert!(b.check().is_ok()); } #[test] fn test_program_with_unmatched_brackets() { let fake_path = "path/to/file.bf"; let b1 = BrainfuckProg::new(fake_path, "<>[[]-+,."); assert!(b1.check().is_err()); let b2 = BrainfuckProg::new(fake_path, "<>[[]]]-+,."); assert!(b2.check().is_err()); } #[test] fn test_bad_path() { assert!(BrainfuckProg::from_file("/path/to/file.bf").is_err()); } }
use std::fmt; use std::io; use std::path::{Path, PathBuf};
random_line_split
sse_server.rs
//! Server-sent-event server for the note viewer feature. //! This module contains also the web browser Javascript client code. use crate::config::CFG; use crate::config::VIEWER_SERVED_MIME_TYPES_MAP; use crate::viewer::error::ViewerError; use crate::viewer::http_response::HttpResponse; use crate::viewer::init::LOCALHOST; use parking_lot::RwLock; use percent_encoding::percent_decode_str; use std::collections::HashSet; use std::io::{ErrorKind, Read, Write}; use std::net::Ipv4Addr; use std::net::SocketAddr; use std::net::SocketAddrV4; use std::net::{TcpListener, TcpStream}; use std::path::PathBuf; use std::str; use std::sync::mpsc::{sync_channel, Receiver, SyncSender}; use std::sync::{Arc, Mutex}; use std::thread; use std::time::SystemTime; use tpnote_lib::config::TMPL_HTML_VAR_NOTE_JS; use tpnote_lib::context::Context; /// The TCP stream is read in chunks. This is the read buffer size. const TCP_READ_BUFFER_SIZE: usize = 0x400; /// Javascript client code, part 1 /// Refresh on WTFiles events. pub const SSE_CLIENT_CODE1: &str = r#" var evtSource = new EventSource("http://"#; /// Javascript client code, part 2 /// Save last scroll position into local storage. /// Jump to the last saved scroll position. pub const SSE_CLIENT_CODE2: &str = r#"/events"); evtSource.addEventListener("update", function(e) { localStorage.setItem('scrollPosition', window.scrollY); window.location.reload(true); }); window.addEventListener('load', function() { if(localStorage.getItem('scrollPosition')!== null) window.scrollTo(0, localStorage.getItem('scrollPosition')); }); "#; /// URL path for Server-Sent-Events. const SSE_EVENT_PATH: &str = "/events"; /// Server-Sent-Event tokens our HTTP client has registered to receive. #[derive(Debug, Clone, Copy)] pub enum SseToken { /// Server-Sent-Event token to request nothing but check if the client is still /// there. Ping, /// Server-Sent-Event token to request a page update. Update, } pub fn manage_connections( event_tx_list: Arc<Mutex<Vec<SyncSender<SseToken>>>>, listener: TcpListener, doc_path: PathBuf, ) { // A list of referenced local links to images or other documents as // they appeared in the displayed documents. // Every thread gets an (ARC) reference to it. let allowed_urls = Arc::new(RwLock::new(HashSet::new())); // Subset of the above list containing only displayed Tp-Note documents. let delivered_tpnote_docs = Arc::new(RwLock::new(HashSet::new())); // We use an ARC to count the number of running threads. let conn_counter = Arc::new(()); // Store `doc_path` in the `context.path` and // in the Tera variable `TMPL_VAR_PATH`. let context = Context::from(&doc_path); log::info!( "Viewer notice:\n\ only files under the directory: {}\n\ with the following extensions:\n\ {}\n\ are served!", context.root_path.display(), &VIEWER_SERVED_MIME_TYPES_MAP .keys() .map(|s| { let mut s = s.to_string(); s.push_str(", "); s }) .collect::<String>() ); for stream in listener.incoming() { match stream { Ok(stream) => { let (event_tx, event_rx) = sync_channel(0); event_tx_list.lock().unwrap().push(event_tx); let allowed_urls = allowed_urls.clone(); let delivered_tpnote_docs = delivered_tpnote_docs.clone(); let conn_counter = conn_counter.clone(); let context = context.clone(); thread::spawn(move || { let mut st = ServerThread::new( event_rx, stream, allowed_urls, delivered_tpnote_docs, conn_counter, context, ); st.serve_connection() }); } Err(e) => log::warn!("TCP connection failed: {}", e), } } } /// Server thread state. pub(crate) struct
{ /// Receiver side of the channel where `update` events are sent. rx: Receiver<SseToken>, /// Byte stream coming from a TCP connection. pub(crate) stream: TcpStream, /// A list of referenced relative URLs to images or other /// documents as they appear in the delivered Tp-Note documents. /// This list contains local links that may or may not have been displayed. /// The local links in this list are relative to `self.context.root_path` pub(crate) allowed_urls: Arc<RwLock<HashSet<PathBuf>>>, /// Subset of `allowed_urls` containing only URLs that /// have been actually delivered. The list only contains URLs to Tp-Note /// documents. /// The local links in this list are absolute. pub(crate) delivered_tpnote_docs: Arc<RwLock<HashSet<PathBuf>>>, /// We do not store anything here, instead we use the ARC pointing to /// `conn_counter` to count the number of instances of `ServerThread`. pub(crate) conn_counter: Arc<()>, /// The constructor stores the path of the note document in `context.path` /// and in the Tera variable `TMPL_VAR_PATH`. /// Both are needed for rendering to HTML. pub(crate) context: Context, } impl ServerThread { /// Constructor. fn new( rx: Receiver<SseToken>, stream: TcpStream, allowed_urls: Arc<RwLock<HashSet<PathBuf>>>, delivered_tpnote_docs: Arc<RwLock<HashSet<PathBuf>>>, conn_counter: Arc<()>, mut context: Context, ) -> Self { let local_addr = stream.local_addr(); // Compose JavaScript code. let note_js = match local_addr { Ok(addr) => format!( "{}{}:{}{}", SSE_CLIENT_CODE1, LOCALHOST, addr.port(), SSE_CLIENT_CODE2 ), Err(_) => { panic!("No TCP connection: socket address of local half is missing.") } }; // Save JavaScript code. context.insert(TMPL_HTML_VAR_NOTE_JS, &note_js); Self { rx, stream, allowed_urls, delivered_tpnote_docs, conn_counter, context, } } /// Wrapper for `serve_connection2()` that logs /// errors as log message warnings. fn serve_connection(&mut self) { match Self::serve_connection2(self) { Ok(_) => (), Err(e) => { log::debug!( "TCP port local {} to peer {}: Closed connection because of error: {}", self.stream .local_addr() .unwrap_or_else(|_| SocketAddr::V4(SocketAddrV4::new( Ipv4Addr::new(0, 0, 0, 0), 0 ))) .port(), self.stream .peer_addr() .unwrap_or_else(|_| SocketAddr::V4(SocketAddrV4::new( Ipv4Addr::new(0, 0, 0, 0), 0 ))) .port(), e ); } } } /// HTTP server: serves content and events via the specified subscriber stream. #[inline] #[allow(clippy::needless_return)] fn serve_connection2(&mut self) -> Result<(), ViewerError> { // One reference is hold by the `manage_connections` thread and does not count. // This is why we subtract 1. let open_connections = Arc::<()>::strong_count(&self.conn_counter) - 1; log::trace!( "TCP port local {} to peer {}: New incoming TCP connection ({} open).", self.stream.local_addr()?.port(), self.stream.peer_addr()?.port(), open_connections ); // Check if we exceed our connection limit. if open_connections > CFG.viewer.tcp_connections_max { self.respond_service_unavailable()?; // This ends this thread and closes the connection. return Err(ViewerError::TcpConnectionsExceeded { max_conn: CFG.viewer.tcp_connections_max, }); } 'tcp_connection: loop { // This is inspired by the Spook crate. // Read the request. let mut read_buffer = [0u8; TCP_READ_BUFFER_SIZE]; let mut buffer = Vec::new(); let (method, path) = 'assemble_tcp_chunks: loop { // Read the request, or part thereof. match self.stream.read(&mut read_buffer) { Ok(0) => { log::trace!( "TCP port local {} to peer {}: Connection closed by peer.", self.stream.local_addr()?.port(), self.stream.peer_addr()?.port() ); // Connection by peer. break 'tcp_connection; } Err(e) => { // Connection closed or error. return Err(ViewerError::StreamRead { error: e }); } Ok(n) => { // Successful read. buffer.extend_from_slice(&read_buffer[..n]); log::trace!( "TCP port local {} to peer {}: chunk: {:?}...", self.stream.local_addr()?.port(), self.stream.peer_addr()?.port(), std::str::from_utf8(&read_buffer) .unwrap_or_default() .chars() .take(60) .collect::<String>() ); } } // Try to parse the request. let mut headers = [httparse::EMPTY_HEADER; 16]; let mut req = httparse::Request::new(&mut headers); let res = req.parse(&buffer)?; if res.is_partial() { continue 'assemble_tcp_chunks; } // Check if the HTTP header is complete and valid. if res.is_complete() { if let (Some(method), Some(path)) = (req.method, req.path) { // This is the only regular exit. break 'assemble_tcp_chunks (method, path); } }; // We quit with error. There is nothing more we can do here. return Err(ViewerError::StreamParse { source_str: std::str::from_utf8(&buffer) .unwrap_or_default() .chars() .take(60) .collect::<String>(), }); }; // End of input chunk loop. // The only supported request method for SSE is GET. if method!= "GET" { self.respond_method_not_allowed(method)?; continue 'tcp_connection; } // Decode the percent encoding in the URL path. let path = percent_decode_str(path).decode_utf8()?; // Check the path. // Serve note rendition. match &*path { // This is a connection for Server-Sent-Events. SSE_EVENT_PATH => { // Serve event response, but keep the connection. self.respond_event_ok()?; // Make the stream non-blocking to be able to detect whether the // connection was closed by the client. self.stream.set_nonblocking(true)?; // Serve events until the connection is closed. // Keep in mind that the client will often close // the request after the first event if the event // is used to trigger a page refresh, so try to eagerly // detect closed connections. '_event: loop { // Wait for the next update. let msg = self.rx.recv()?; // Detect whether the connection was closed. match self.stream.read(&mut read_buffer) { // Connection closed. Ok(0) => { log::trace!( "TCP port local {} to peer {}: Event connection closed by peer.", self.stream.local_addr()?.port(), self.stream.peer_addr()?.port() ); // Our peer closed this connection, we finish also then. break 'tcp_connection; } // Connection alive. Ok(_) => {} // `WouldBlock` is OK, all others not. Err(e) => { if e.kind()!= ErrorKind::WouldBlock { // Something bad happened. return Err(ViewerError::StreamRead { error: e }); } } } // Send event. let event = match msg { SseToken::Update => "event: update\r\ndata:\r\n\r\n".to_string(), SseToken::Ping => ": ping\r\n\r\n".to_string(), }; self.stream.write_all(event.as_bytes())?; log::debug!( "TCP port local {} to peer {} ({} open TCP conn.): pushed '{:?}' in event connection to web browser.", self.stream.local_addr()?.port(), self.stream.peer_addr()?.port(), Arc::<()>::strong_count(&self.conn_counter) - 1, msg, ); } } // Serve all other documents. _ => self.respond(&path)?, }; // end of match path } // Go to 'tcp_connection loop start log::trace!( "TCP port local {} to peer {}: ({} open). Closing this TCP connection.", self.stream.local_addr()?.port(), self.stream.peer_addr()?.port(), // We subtract 1 for the `manage connection()` thread, and // 1 for the thread we will close in a moment. Arc::<()>::strong_count(&self.conn_counter) - 2, ); // We came here because the client closed this connection. Ok(()) } /// Write HTTP event response. fn respond_event_ok(&mut self) -> Result<(), ViewerError> { // Declare SSE capability and allow cross-origin access. let response = format!( "\ HTTP/1.1 200 OK\r\n\ Date: {}\r\n\ Access-Control-Allow-Origin: *\r\n\ Cache-Control: no-cache\r\n\ Content-Type: text/event-stream\r\n\ \r\n", httpdate::fmt_http_date(SystemTime::now()), ); self.stream.write_all(response.as_bytes())?; log::debug!( "TCP port local {} to peer {}: 200 OK, served event header, \ keeping event connection open...", self.stream.local_addr()?.port(), self.stream.peer_addr()?.port(), ); Ok(()) } }
ServerThread
identifier_name
sse_server.rs
//! Server-sent-event server for the note viewer feature. //! This module contains also the web browser Javascript client code. use crate::config::CFG; use crate::config::VIEWER_SERVED_MIME_TYPES_MAP; use crate::viewer::error::ViewerError; use crate::viewer::http_response::HttpResponse; use crate::viewer::init::LOCALHOST; use parking_lot::RwLock; use percent_encoding::percent_decode_str; use std::collections::HashSet; use std::io::{ErrorKind, Read, Write}; use std::net::Ipv4Addr; use std::net::SocketAddr; use std::net::SocketAddrV4; use std::net::{TcpListener, TcpStream}; use std::path::PathBuf; use std::str; use std::sync::mpsc::{sync_channel, Receiver, SyncSender}; use std::sync::{Arc, Mutex}; use std::thread; use std::time::SystemTime; use tpnote_lib::config::TMPL_HTML_VAR_NOTE_JS; use tpnote_lib::context::Context; /// The TCP stream is read in chunks. This is the read buffer size. const TCP_READ_BUFFER_SIZE: usize = 0x400; /// Javascript client code, part 1 /// Refresh on WTFiles events. pub const SSE_CLIENT_CODE1: &str = r#" var evtSource = new EventSource("http://"#; /// Javascript client code, part 2 /// Save last scroll position into local storage. /// Jump to the last saved scroll position. pub const SSE_CLIENT_CODE2: &str = r#"/events"); evtSource.addEventListener("update", function(e) { localStorage.setItem('scrollPosition', window.scrollY); window.location.reload(true); }); window.addEventListener('load', function() { if(localStorage.getItem('scrollPosition')!== null) window.scrollTo(0, localStorage.getItem('scrollPosition')); }); "#; /// URL path for Server-Sent-Events. const SSE_EVENT_PATH: &str = "/events"; /// Server-Sent-Event tokens our HTTP client has registered to receive. #[derive(Debug, Clone, Copy)] pub enum SseToken { /// Server-Sent-Event token to request nothing but check if the client is still /// there. Ping, /// Server-Sent-Event token to request a page update. Update, } pub fn manage_connections( event_tx_list: Arc<Mutex<Vec<SyncSender<SseToken>>>>, listener: TcpListener, doc_path: PathBuf, ) { // A list of referenced local links to images or other documents as // they appeared in the displayed documents. // Every thread gets an (ARC) reference to it. let allowed_urls = Arc::new(RwLock::new(HashSet::new())); // Subset of the above list containing only displayed Tp-Note documents. let delivered_tpnote_docs = Arc::new(RwLock::new(HashSet::new())); // We use an ARC to count the number of running threads. let conn_counter = Arc::new(()); // Store `doc_path` in the `context.path` and // in the Tera variable `TMPL_VAR_PATH`. let context = Context::from(&doc_path); log::info!( "Viewer notice:\n\ only files under the directory: {}\n\ with the following extensions:\n\ {}\n\ are served!", context.root_path.display(), &VIEWER_SERVED_MIME_TYPES_MAP .keys() .map(|s| { let mut s = s.to_string(); s.push_str(", "); s }) .collect::<String>() ); for stream in listener.incoming() { match stream { Ok(stream) => { let (event_tx, event_rx) = sync_channel(0); event_tx_list.lock().unwrap().push(event_tx); let allowed_urls = allowed_urls.clone(); let delivered_tpnote_docs = delivered_tpnote_docs.clone(); let conn_counter = conn_counter.clone(); let context = context.clone(); thread::spawn(move || { let mut st = ServerThread::new( event_rx, stream, allowed_urls, delivered_tpnote_docs, conn_counter, context, ); st.serve_connection() }); } Err(e) => log::warn!("TCP connection failed: {}", e), } } } /// Server thread state. pub(crate) struct ServerThread { /// Receiver side of the channel where `update` events are sent. rx: Receiver<SseToken>, /// Byte stream coming from a TCP connection. pub(crate) stream: TcpStream, /// A list of referenced relative URLs to images or other /// documents as they appear in the delivered Tp-Note documents. /// This list contains local links that may or may not have been displayed. /// The local links in this list are relative to `self.context.root_path` pub(crate) allowed_urls: Arc<RwLock<HashSet<PathBuf>>>, /// Subset of `allowed_urls` containing only URLs that /// have been actually delivered. The list only contains URLs to Tp-Note /// documents. /// The local links in this list are absolute. pub(crate) delivered_tpnote_docs: Arc<RwLock<HashSet<PathBuf>>>, /// We do not store anything here, instead we use the ARC pointing to /// `conn_counter` to count the number of instances of `ServerThread`. pub(crate) conn_counter: Arc<()>, /// The constructor stores the path of the note document in `context.path` /// and in the Tera variable `TMPL_VAR_PATH`. /// Both are needed for rendering to HTML. pub(crate) context: Context, } impl ServerThread { /// Constructor. fn new( rx: Receiver<SseToken>, stream: TcpStream, allowed_urls: Arc<RwLock<HashSet<PathBuf>>>, delivered_tpnote_docs: Arc<RwLock<HashSet<PathBuf>>>, conn_counter: Arc<()>, mut context: Context, ) -> Self { let local_addr = stream.local_addr(); // Compose JavaScript code. let note_js = match local_addr { Ok(addr) => format!( "{}{}:{}{}", SSE_CLIENT_CODE1, LOCALHOST, addr.port(), SSE_CLIENT_CODE2 ), Err(_) => { panic!("No TCP connection: socket address of local half is missing.") } }; // Save JavaScript code. context.insert(TMPL_HTML_VAR_NOTE_JS, &note_js); Self { rx, stream, allowed_urls, delivered_tpnote_docs, conn_counter, context, } } /// Wrapper for `serve_connection2()` that logs /// errors as log message warnings. fn serve_connection(&mut self) { match Self::serve_connection2(self) { Ok(_) => (), Err(e) => { log::debug!( "TCP port local {} to peer {}: Closed connection because of error: {}", self.stream .local_addr() .unwrap_or_else(|_| SocketAddr::V4(SocketAddrV4::new( Ipv4Addr::new(0, 0, 0, 0), 0 ))) .port(), self.stream .peer_addr() .unwrap_or_else(|_| SocketAddr::V4(SocketAddrV4::new( Ipv4Addr::new(0, 0, 0, 0), 0 ))) .port(), e ); } } } /// HTTP server: serves content and events via the specified subscriber stream. #[inline] #[allow(clippy::needless_return)] fn serve_connection2(&mut self) -> Result<(), ViewerError>
'tcp_connection: loop { // This is inspired by the Spook crate. // Read the request. let mut read_buffer = [0u8; TCP_READ_BUFFER_SIZE]; let mut buffer = Vec::new(); let (method, path) = 'assemble_tcp_chunks: loop { // Read the request, or part thereof. match self.stream.read(&mut read_buffer) { Ok(0) => { log::trace!( "TCP port local {} to peer {}: Connection closed by peer.", self.stream.local_addr()?.port(), self.stream.peer_addr()?.port() ); // Connection by peer. break 'tcp_connection; } Err(e) => { // Connection closed or error. return Err(ViewerError::StreamRead { error: e }); } Ok(n) => { // Successful read. buffer.extend_from_slice(&read_buffer[..n]); log::trace!( "TCP port local {} to peer {}: chunk: {:?}...", self.stream.local_addr()?.port(), self.stream.peer_addr()?.port(), std::str::from_utf8(&read_buffer) .unwrap_or_default() .chars() .take(60) .collect::<String>() ); } } // Try to parse the request. let mut headers = [httparse::EMPTY_HEADER; 16]; let mut req = httparse::Request::new(&mut headers); let res = req.parse(&buffer)?; if res.is_partial() { continue 'assemble_tcp_chunks; } // Check if the HTTP header is complete and valid. if res.is_complete() { if let (Some(method), Some(path)) = (req.method, req.path) { // This is the only regular exit. break 'assemble_tcp_chunks (method, path); } }; // We quit with error. There is nothing more we can do here. return Err(ViewerError::StreamParse { source_str: std::str::from_utf8(&buffer) .unwrap_or_default() .chars() .take(60) .collect::<String>(), }); }; // End of input chunk loop. // The only supported request method for SSE is GET. if method!= "GET" { self.respond_method_not_allowed(method)?; continue 'tcp_connection; } // Decode the percent encoding in the URL path. let path = percent_decode_str(path).decode_utf8()?; // Check the path. // Serve note rendition. match &*path { // This is a connection for Server-Sent-Events. SSE_EVENT_PATH => { // Serve event response, but keep the connection. self.respond_event_ok()?; // Make the stream non-blocking to be able to detect whether the // connection was closed by the client. self.stream.set_nonblocking(true)?; // Serve events until the connection is closed. // Keep in mind that the client will often close // the request after the first event if the event // is used to trigger a page refresh, so try to eagerly // detect closed connections. '_event: loop { // Wait for the next update. let msg = self.rx.recv()?; // Detect whether the connection was closed. match self.stream.read(&mut read_buffer) { // Connection closed. Ok(0) => { log::trace!( "TCP port local {} to peer {}: Event connection closed by peer.", self.stream.local_addr()?.port(), self.stream.peer_addr()?.port() ); // Our peer closed this connection, we finish also then. break 'tcp_connection; } // Connection alive. Ok(_) => {} // `WouldBlock` is OK, all others not. Err(e) => { if e.kind()!= ErrorKind::WouldBlock { // Something bad happened. return Err(ViewerError::StreamRead { error: e }); } } } // Send event. let event = match msg { SseToken::Update => "event: update\r\ndata:\r\n\r\n".to_string(), SseToken::Ping => ": ping\r\n\r\n".to_string(), }; self.stream.write_all(event.as_bytes())?; log::debug!( "TCP port local {} to peer {} ({} open TCP conn.): pushed '{:?}' in event connection to web browser.", self.stream.local_addr()?.port(), self.stream.peer_addr()?.port(), Arc::<()>::strong_count(&self.conn_counter) - 1, msg, ); } } // Serve all other documents. _ => self.respond(&path)?, }; // end of match path } // Go to 'tcp_connection loop start log::trace!( "TCP port local {} to peer {}: ({} open). Closing this TCP connection.", self.stream.local_addr()?.port(), self.stream.peer_addr()?.port(), // We subtract 1 for the `manage connection()` thread, and // 1 for the thread we will close in a moment. Arc::<()>::strong_count(&self.conn_counter) - 2, ); // We came here because the client closed this connection. Ok(()) } /// Write HTTP event response. fn respond_event_ok(&mut self) -> Result<(), ViewerError> { // Declare SSE capability and allow cross-origin access. let response = format!( "\ HTTP/1.1 200 OK\r\n\ Date: {}\r\n\ Access-Control-Allow-Origin: *\r\n\ Cache-Control: no-cache\r\n\ Content-Type: text/event-stream\r\n\ \r\n", httpdate::fmt_http_date(SystemTime::now()), ); self.stream.write_all(response.as_bytes())?; log::debug!( "TCP port local {} to peer {}: 200 OK, served event header, \ keeping event connection open...", self.stream.local_addr()?.port(), self.stream.peer_addr()?.port(), ); Ok(()) } }
{ // One reference is hold by the `manage_connections` thread and does not count. // This is why we subtract 1. let open_connections = Arc::<()>::strong_count(&self.conn_counter) - 1; log::trace!( "TCP port local {} to peer {}: New incoming TCP connection ({} open).", self.stream.local_addr()?.port(), self.stream.peer_addr()?.port(), open_connections ); // Check if we exceed our connection limit. if open_connections > CFG.viewer.tcp_connections_max { self.respond_service_unavailable()?; // This ends this thread and closes the connection. return Err(ViewerError::TcpConnectionsExceeded { max_conn: CFG.viewer.tcp_connections_max, }); }
identifier_body
sse_server.rs
//! Server-sent-event server for the note viewer feature. //! This module contains also the web browser Javascript client code. use crate::config::CFG; use crate::config::VIEWER_SERVED_MIME_TYPES_MAP; use crate::viewer::error::ViewerError; use crate::viewer::http_response::HttpResponse; use crate::viewer::init::LOCALHOST; use parking_lot::RwLock; use percent_encoding::percent_decode_str; use std::collections::HashSet; use std::io::{ErrorKind, Read, Write}; use std::net::Ipv4Addr; use std::net::SocketAddr; use std::net::SocketAddrV4; use std::net::{TcpListener, TcpStream}; use std::path::PathBuf; use std::str; use std::sync::mpsc::{sync_channel, Receiver, SyncSender}; use std::sync::{Arc, Mutex}; use std::thread; use std::time::SystemTime; use tpnote_lib::config::TMPL_HTML_VAR_NOTE_JS; use tpnote_lib::context::Context; /// The TCP stream is read in chunks. This is the read buffer size. const TCP_READ_BUFFER_SIZE: usize = 0x400; /// Javascript client code, part 1 /// Refresh on WTFiles events. pub const SSE_CLIENT_CODE1: &str = r#" var evtSource = new EventSource("http://"#; /// Javascript client code, part 2 /// Save last scroll position into local storage. /// Jump to the last saved scroll position. pub const SSE_CLIENT_CODE2: &str = r#"/events"); evtSource.addEventListener("update", function(e) { localStorage.setItem('scrollPosition', window.scrollY); window.location.reload(true); }); window.addEventListener('load', function() { if(localStorage.getItem('scrollPosition')!== null) window.scrollTo(0, localStorage.getItem('scrollPosition')); }); "#; /// URL path for Server-Sent-Events. const SSE_EVENT_PATH: &str = "/events";
pub enum SseToken { /// Server-Sent-Event token to request nothing but check if the client is still /// there. Ping, /// Server-Sent-Event token to request a page update. Update, } pub fn manage_connections( event_tx_list: Arc<Mutex<Vec<SyncSender<SseToken>>>>, listener: TcpListener, doc_path: PathBuf, ) { // A list of referenced local links to images or other documents as // they appeared in the displayed documents. // Every thread gets an (ARC) reference to it. let allowed_urls = Arc::new(RwLock::new(HashSet::new())); // Subset of the above list containing only displayed Tp-Note documents. let delivered_tpnote_docs = Arc::new(RwLock::new(HashSet::new())); // We use an ARC to count the number of running threads. let conn_counter = Arc::new(()); // Store `doc_path` in the `context.path` and // in the Tera variable `TMPL_VAR_PATH`. let context = Context::from(&doc_path); log::info!( "Viewer notice:\n\ only files under the directory: {}\n\ with the following extensions:\n\ {}\n\ are served!", context.root_path.display(), &VIEWER_SERVED_MIME_TYPES_MAP .keys() .map(|s| { let mut s = s.to_string(); s.push_str(", "); s }) .collect::<String>() ); for stream in listener.incoming() { match stream { Ok(stream) => { let (event_tx, event_rx) = sync_channel(0); event_tx_list.lock().unwrap().push(event_tx); let allowed_urls = allowed_urls.clone(); let delivered_tpnote_docs = delivered_tpnote_docs.clone(); let conn_counter = conn_counter.clone(); let context = context.clone(); thread::spawn(move || { let mut st = ServerThread::new( event_rx, stream, allowed_urls, delivered_tpnote_docs, conn_counter, context, ); st.serve_connection() }); } Err(e) => log::warn!("TCP connection failed: {}", e), } } } /// Server thread state. pub(crate) struct ServerThread { /// Receiver side of the channel where `update` events are sent. rx: Receiver<SseToken>, /// Byte stream coming from a TCP connection. pub(crate) stream: TcpStream, /// A list of referenced relative URLs to images or other /// documents as they appear in the delivered Tp-Note documents. /// This list contains local links that may or may not have been displayed. /// The local links in this list are relative to `self.context.root_path` pub(crate) allowed_urls: Arc<RwLock<HashSet<PathBuf>>>, /// Subset of `allowed_urls` containing only URLs that /// have been actually delivered. The list only contains URLs to Tp-Note /// documents. /// The local links in this list are absolute. pub(crate) delivered_tpnote_docs: Arc<RwLock<HashSet<PathBuf>>>, /// We do not store anything here, instead we use the ARC pointing to /// `conn_counter` to count the number of instances of `ServerThread`. pub(crate) conn_counter: Arc<()>, /// The constructor stores the path of the note document in `context.path` /// and in the Tera variable `TMPL_VAR_PATH`. /// Both are needed for rendering to HTML. pub(crate) context: Context, } impl ServerThread { /// Constructor. fn new( rx: Receiver<SseToken>, stream: TcpStream, allowed_urls: Arc<RwLock<HashSet<PathBuf>>>, delivered_tpnote_docs: Arc<RwLock<HashSet<PathBuf>>>, conn_counter: Arc<()>, mut context: Context, ) -> Self { let local_addr = stream.local_addr(); // Compose JavaScript code. let note_js = match local_addr { Ok(addr) => format!( "{}{}:{}{}", SSE_CLIENT_CODE1, LOCALHOST, addr.port(), SSE_CLIENT_CODE2 ), Err(_) => { panic!("No TCP connection: socket address of local half is missing.") } }; // Save JavaScript code. context.insert(TMPL_HTML_VAR_NOTE_JS, &note_js); Self { rx, stream, allowed_urls, delivered_tpnote_docs, conn_counter, context, } } /// Wrapper for `serve_connection2()` that logs /// errors as log message warnings. fn serve_connection(&mut self) { match Self::serve_connection2(self) { Ok(_) => (), Err(e) => { log::debug!( "TCP port local {} to peer {}: Closed connection because of error: {}", self.stream .local_addr() .unwrap_or_else(|_| SocketAddr::V4(SocketAddrV4::new( Ipv4Addr::new(0, 0, 0, 0), 0 ))) .port(), self.stream .peer_addr() .unwrap_or_else(|_| SocketAddr::V4(SocketAddrV4::new( Ipv4Addr::new(0, 0, 0, 0), 0 ))) .port(), e ); } } } /// HTTP server: serves content and events via the specified subscriber stream. #[inline] #[allow(clippy::needless_return)] fn serve_connection2(&mut self) -> Result<(), ViewerError> { // One reference is hold by the `manage_connections` thread and does not count. // This is why we subtract 1. let open_connections = Arc::<()>::strong_count(&self.conn_counter) - 1; log::trace!( "TCP port local {} to peer {}: New incoming TCP connection ({} open).", self.stream.local_addr()?.port(), self.stream.peer_addr()?.port(), open_connections ); // Check if we exceed our connection limit. if open_connections > CFG.viewer.tcp_connections_max { self.respond_service_unavailable()?; // This ends this thread and closes the connection. return Err(ViewerError::TcpConnectionsExceeded { max_conn: CFG.viewer.tcp_connections_max, }); } 'tcp_connection: loop { // This is inspired by the Spook crate. // Read the request. let mut read_buffer = [0u8; TCP_READ_BUFFER_SIZE]; let mut buffer = Vec::new(); let (method, path) = 'assemble_tcp_chunks: loop { // Read the request, or part thereof. match self.stream.read(&mut read_buffer) { Ok(0) => { log::trace!( "TCP port local {} to peer {}: Connection closed by peer.", self.stream.local_addr()?.port(), self.stream.peer_addr()?.port() ); // Connection by peer. break 'tcp_connection; } Err(e) => { // Connection closed or error. return Err(ViewerError::StreamRead { error: e }); } Ok(n) => { // Successful read. buffer.extend_from_slice(&read_buffer[..n]); log::trace!( "TCP port local {} to peer {}: chunk: {:?}...", self.stream.local_addr()?.port(), self.stream.peer_addr()?.port(), std::str::from_utf8(&read_buffer) .unwrap_or_default() .chars() .take(60) .collect::<String>() ); } } // Try to parse the request. let mut headers = [httparse::EMPTY_HEADER; 16]; let mut req = httparse::Request::new(&mut headers); let res = req.parse(&buffer)?; if res.is_partial() { continue 'assemble_tcp_chunks; } // Check if the HTTP header is complete and valid. if res.is_complete() { if let (Some(method), Some(path)) = (req.method, req.path) { // This is the only regular exit. break 'assemble_tcp_chunks (method, path); } }; // We quit with error. There is nothing more we can do here. return Err(ViewerError::StreamParse { source_str: std::str::from_utf8(&buffer) .unwrap_or_default() .chars() .take(60) .collect::<String>(), }); }; // End of input chunk loop. // The only supported request method for SSE is GET. if method!= "GET" { self.respond_method_not_allowed(method)?; continue 'tcp_connection; } // Decode the percent encoding in the URL path. let path = percent_decode_str(path).decode_utf8()?; // Check the path. // Serve note rendition. match &*path { // This is a connection for Server-Sent-Events. SSE_EVENT_PATH => { // Serve event response, but keep the connection. self.respond_event_ok()?; // Make the stream non-blocking to be able to detect whether the // connection was closed by the client. self.stream.set_nonblocking(true)?; // Serve events until the connection is closed. // Keep in mind that the client will often close // the request after the first event if the event // is used to trigger a page refresh, so try to eagerly // detect closed connections. '_event: loop { // Wait for the next update. let msg = self.rx.recv()?; // Detect whether the connection was closed. match self.stream.read(&mut read_buffer) { // Connection closed. Ok(0) => { log::trace!( "TCP port local {} to peer {}: Event connection closed by peer.", self.stream.local_addr()?.port(), self.stream.peer_addr()?.port() ); // Our peer closed this connection, we finish also then. break 'tcp_connection; } // Connection alive. Ok(_) => {} // `WouldBlock` is OK, all others not. Err(e) => { if e.kind()!= ErrorKind::WouldBlock { // Something bad happened. return Err(ViewerError::StreamRead { error: e }); } } } // Send event. let event = match msg { SseToken::Update => "event: update\r\ndata:\r\n\r\n".to_string(), SseToken::Ping => ": ping\r\n\r\n".to_string(), }; self.stream.write_all(event.as_bytes())?; log::debug!( "TCP port local {} to peer {} ({} open TCP conn.): pushed '{:?}' in event connection to web browser.", self.stream.local_addr()?.port(), self.stream.peer_addr()?.port(), Arc::<()>::strong_count(&self.conn_counter) - 1, msg, ); } } // Serve all other documents. _ => self.respond(&path)?, }; // end of match path } // Go to 'tcp_connection loop start log::trace!( "TCP port local {} to peer {}: ({} open). Closing this TCP connection.", self.stream.local_addr()?.port(), self.stream.peer_addr()?.port(), // We subtract 1 for the `manage connection()` thread, and // 1 for the thread we will close in a moment. Arc::<()>::strong_count(&self.conn_counter) - 2, ); // We came here because the client closed this connection. Ok(()) } /// Write HTTP event response. fn respond_event_ok(&mut self) -> Result<(), ViewerError> { // Declare SSE capability and allow cross-origin access. let response = format!( "\ HTTP/1.1 200 OK\r\n\ Date: {}\r\n\ Access-Control-Allow-Origin: *\r\n\ Cache-Control: no-cache\r\n\ Content-Type: text/event-stream\r\n\ \r\n", httpdate::fmt_http_date(SystemTime::now()), ); self.stream.write_all(response.as_bytes())?; log::debug!( "TCP port local {} to peer {}: 200 OK, served event header, \ keeping event connection open...", self.stream.local_addr()?.port(), self.stream.peer_addr()?.port(), ); Ok(()) } }
/// Server-Sent-Event tokens our HTTP client has registered to receive. #[derive(Debug, Clone, Copy)]
random_line_split
inner_product_proof.rs
#![allow(non_snake_case)] #![doc(include = "../docs/inner-product-protocol.md")] use std::borrow::Borrow; use std::iter; use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::scalar::Scalar; use curve25519_dalek::traits::VartimeMultiscalarMul; use proof_transcript::ProofTranscript; #[derive(Serialize, Deserialize, Clone, Debug)] pub struct InnerProductProof { pub(crate) L_vec: Vec<RistrettoPoint>, pub(crate) R_vec: Vec<RistrettoPoint>, pub(crate) a: Scalar, pub(crate) b: Scalar, } impl InnerProductProof { /// Create an inner-product proof. /// /// The proof is created with respect to the bases \\(G\\), \\(H'\\), /// where \\(H'\_i = H\_i \cdot \texttt{Hprime\\_factors}\_i\\). /// /// The `verifier` is passed in as a parameter so that the /// challenges depend on the *entire* transcript (including parent /// protocols). pub fn create<I>( verifier: &mut ProofTranscript, Q: &RistrettoPoint, Hprime_factors: I, mut G_vec: Vec<RistrettoPoint>, mut H_vec: Vec<RistrettoPoint>, mut a_vec: Vec<Scalar>, mut b_vec: Vec<Scalar>, ) -> InnerProductProof where I: IntoIterator, I::Item: Borrow<Scalar>, { // Create slices G, H, a, b backed by their respective // vectors. This lets us reslice as we compress the lengths // of the vectors in the main loop below. let mut G = &mut G_vec[..]; let mut H = &mut H_vec[..]; let mut a = &mut a_vec[..]; let mut b = &mut b_vec[..]; let mut n = G.len(); // All of the input vectors must have the same length. assert_eq!(G.len(), n); assert_eq!(H.len(), n); assert_eq!(a.len(), n); assert_eq!(b.len(), n); // XXX save these scalar mults by unrolling them into the // first iteration of the loop below for (H_i, h_i) in H.iter_mut().zip(Hprime_factors.into_iter()) { *H_i = (&*H_i) * h_i.borrow(); } let lg_n = n.next_power_of_two().trailing_zeros() as usize; let mut L_vec = Vec::with_capacity(lg_n); let mut R_vec = Vec::with_capacity(lg_n); while n!= 1 { n = n / 2; let (a_L, a_R) = a.split_at_mut(n); let (b_L, b_R) = b.split_at_mut(n); let (G_L, G_R) = G.split_at_mut(n); let (H_L, H_R) = H.split_at_mut(n); let c_L = inner_product(&a_L, &b_R); let c_R = inner_product(&a_R, &b_L); let L = RistrettoPoint::vartime_multiscalar_mul( a_L.iter().chain(b_R.iter()).chain(iter::once(&c_L)), G_R.iter().chain(H_L.iter()).chain(iter::once(Q)), ); let R = RistrettoPoint::vartime_multiscalar_mul( a_R.iter().chain(b_L.iter()).chain(iter::once(&c_R)), G_L.iter().chain(H_R.iter()).chain(iter::once(Q)), ); L_vec.push(L); R_vec.push(R); verifier.commit(L.compress().as_bytes()); verifier.commit(R.compress().as_bytes()); let u = verifier.challenge_scalar(); let u_inv = u.invert(); for i in 0..n { a_L[i] = a_L[i] * u + u_inv * a_R[i]; b_L[i] = b_L[i] * u_inv + u * b_R[i]; G_L[i] = RistrettoPoint::vartime_multiscalar_mul(&[u_inv, u], &[G_L[i], G_R[i]]); H_L[i] = RistrettoPoint::vartime_multiscalar_mul(&[u, u_inv], &[H_L[i], H_R[i]]); } a = a_L; b = b_L; G = G_L; H = H_L; } return InnerProductProof { L_vec: L_vec, R_vec: R_vec, a: a[0], b: b[0], }; } /// Computes three vectors of verification scalars \\([u\_{i}^{2}]\\), \\([u\_{i}^{-2}]\\) and \\([s\_{i}]\\) for combined multiscalar multiplication /// in a parent protocol. See [inner product protocol notes](index.html#verification-equation) for details. pub(crate) fn verification_scalars( &self, transcript: &mut ProofTranscript, ) -> (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>) { let lg_n = self.L_vec.len(); let n = 1 << lg_n; // 1. Recompute x_k,...,x_1 based on the proof transcript let mut challenges = Vec::with_capacity(lg_n); for (L, R) in self.L_vec.iter().zip(self.R_vec.iter()) { // XXX maybe avoid this compression when proof ser/de is sorted out transcript.commit(L.compress().as_bytes()); transcript.commit(R.compress().as_bytes()); challenges.push(transcript.challenge_scalar()); } // 2. Compute 1/(u_k...u_1) and 1/u_k,..., 1/u_1 let mut challenges_inv = challenges.clone(); let allinv = Scalar::batch_invert(&mut challenges_inv); // 3. Compute u_i^2 and (1/u_i)^2 for i in 0..lg_n { // XXX missing square fn upstream challenges[i] = challenges[i] * challenges[i]; challenges_inv[i] = challenges_inv[i] * challenges_inv[i]; } let challenges_sq = challenges; let challenges_inv_sq = challenges_inv; // 4. Compute s values inductively. let mut s = Vec::with_capacity(n); s.push(allinv); for i in 1..n { let lg_i = (32 - 1 - (i as u32).leading_zeros()) as usize; let k = 1 << lg_i; // The challenges are stored in "creation order" as [u_k,...,u_1], // so u_{lg(i)+1} = is indexed by (lg_n-1) - lg_i let u_lg_i_sq = challenges_sq[(lg_n - 1) - lg_i]; s.push(s[i - k] * u_lg_i_sq); } (challenges_sq, challenges_inv_sq, s) } /// This method is for testing that proof generation work, /// but for efficiency the actual protocols would use `verification_scalars` /// method to combine inner product verification with other checks /// in a single multiscalar multiplication. #[allow(dead_code)] pub fn verify<I>( &self, transcript: &mut ProofTranscript, Hprime_factors: I, P: &RistrettoPoint, Q: &RistrettoPoint, G: &[RistrettoPoint], H: &[RistrettoPoint], ) -> Result<(), ()> where I: IntoIterator, I::Item: Borrow<Scalar>, { let (u_sq, u_inv_sq, s) = self.verification_scalars(transcript); let a_times_s = s.iter().map(|s_i| self.a * s_i); // 1/s[i] is s[!i], and!i runs from n-1 to 0 as i runs from 0 to n-1 let inv_s = s.iter().rev(); let h_times_b_div_s = Hprime_factors .into_iter() .zip(inv_s) .map(|(h_i, s_i_inv)| (self.b * s_i_inv) * h_i.borrow()); let neg_u_sq = u_sq.iter().map(|ui| -ui); let neg_u_inv_sq = u_inv_sq.iter().map(|ui| -ui); let expect_P = RistrettoPoint::vartime_multiscalar_mul( iter::once(self.a * self.b) .chain(a_times_s) .chain(h_times_b_div_s) .chain(neg_u_sq) .chain(neg_u_inv_sq), iter::once(Q) .chain(G.iter()) .chain(H.iter()) .chain(self.L_vec.iter()) .chain(self.R_vec.iter()), ); if expect_P == *P { Ok(()) } else
} } /// Computes an inner product of two vectors /// \\[ /// {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} = \sum\_{i=0}^{n-1} a\_i \cdot b\_i. /// \\] /// Panics if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal. pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar { let mut out = Scalar::zero(); if a.len()!= b.len() { panic!("inner_product(a,b): lengths of vectors do not match"); } for i in 0..a.len() { out += a[i] * b[i]; } out } #[cfg(test)] mod tests { use super::*; use rand::OsRng; use sha2::Sha512; use util; fn test_helper_create(n: usize) { let mut rng = OsRng::new().unwrap(); use generators::{Generators, PedersenGenerators}; let gens = Generators::new(PedersenGenerators::default(), n, 1); let G = gens.share(0).G.to_vec(); let H = gens.share(0).H.to_vec(); // Q would be determined upstream in the protocol, so we pick a random one. let Q = RistrettoPoint::hash_from_bytes::<Sha512>(b"test point"); // a and b are the vectors for which we want to prove c = <a,b> let a: Vec<_> = (0..n).map(|_| Scalar::random(&mut rng)).collect(); let b: Vec<_> = (0..n).map(|_| Scalar::random(&mut rng)).collect(); let c = inner_product(&a, &b); // y_inv is (the inverse of) a random challenge let y_inv = Scalar::random(&mut rng); // P would be determined upstream, but we need a correct P to check the proof. // // To generate P = <a,G> + <b,H'> + <a,b> Q, compute // P = <a,G> + <b',H> + <a,b> Q, // where b' = b \circ y^(-n) let b_prime = b.iter().zip(util::exp_iter(y_inv)).map(|(bi, yi)| bi * yi); // a.iter() has Item=&Scalar, need Item=Scalar to chain with b_prime let a_prime = a.iter().cloned(); let P = RistrettoPoint::vartime_multiscalar_mul( a_prime.chain(b_prime).chain(iter::once(c)), G.iter().chain(H.iter()).chain(iter::once(&Q)), ); let mut verifier = ProofTranscript::new(b"innerproducttest"); let proof = InnerProductProof::create( &mut verifier, &Q, util::exp_iter(y_inv), G.clone(), H.clone(), a.clone(), b.clone(), ); let mut verifier = ProofTranscript::new(b"innerproducttest"); assert!( proof .verify(&mut verifier, util::exp_iter(y_inv), &P, &Q, &G, &H) .is_ok() ); } #[test] fn make_ipp_1() { test_helper_create(1); } #[test] fn make_ipp_2() { test_helper_create(2); } #[test] fn make_ipp_4() { test_helper_create(4); } #[test] fn make_ipp_32() { test_helper_create(32); } #[test] fn make_ipp_64() { test_helper_create(64); } #[test] fn test_inner_product() { let a = vec![ Scalar::from_u64(1), Scalar::from_u64(2), Scalar::from_u64(3), Scalar::from_u64(4), ]; let b = vec![ Scalar::from_u64(2), Scalar::from_u64(3), Scalar::from_u64(4), Scalar::from_u64(5), ]; assert_eq!(Scalar::from_u64(40), inner_product(&a, &b)); } }
{ Err(()) }
conditional_block
inner_product_proof.rs
#![allow(non_snake_case)] #![doc(include = "../docs/inner-product-protocol.md")] use std::borrow::Borrow; use std::iter; use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::scalar::Scalar; use curve25519_dalek::traits::VartimeMultiscalarMul; use proof_transcript::ProofTranscript; #[derive(Serialize, Deserialize, Clone, Debug)] pub struct InnerProductProof { pub(crate) L_vec: Vec<RistrettoPoint>, pub(crate) R_vec: Vec<RistrettoPoint>, pub(crate) a: Scalar, pub(crate) b: Scalar, } impl InnerProductProof { /// Create an inner-product proof. /// /// The proof is created with respect to the bases \\(G\\), \\(H'\\), /// where \\(H'\_i = H\_i \cdot \texttt{Hprime\\_factors}\_i\\). /// /// The `verifier` is passed in as a parameter so that the /// challenges depend on the *entire* transcript (including parent /// protocols). pub fn create<I>( verifier: &mut ProofTranscript, Q: &RistrettoPoint, Hprime_factors: I, mut G_vec: Vec<RistrettoPoint>, mut H_vec: Vec<RistrettoPoint>, mut a_vec: Vec<Scalar>, mut b_vec: Vec<Scalar>, ) -> InnerProductProof where I: IntoIterator, I::Item: Borrow<Scalar>, { // Create slices G, H, a, b backed by their respective // vectors. This lets us reslice as we compress the lengths // of the vectors in the main loop below. let mut G = &mut G_vec[..]; let mut H = &mut H_vec[..]; let mut a = &mut a_vec[..]; let mut b = &mut b_vec[..]; let mut n = G.len(); // All of the input vectors must have the same length. assert_eq!(G.len(), n); assert_eq!(H.len(), n); assert_eq!(a.len(), n); assert_eq!(b.len(), n); // XXX save these scalar mults by unrolling them into the // first iteration of the loop below for (H_i, h_i) in H.iter_mut().zip(Hprime_factors.into_iter()) { *H_i = (&*H_i) * h_i.borrow(); } let lg_n = n.next_power_of_two().trailing_zeros() as usize; let mut L_vec = Vec::with_capacity(lg_n); let mut R_vec = Vec::with_capacity(lg_n); while n!= 1 { n = n / 2; let (a_L, a_R) = a.split_at_mut(n); let (b_L, b_R) = b.split_at_mut(n); let (G_L, G_R) = G.split_at_mut(n); let (H_L, H_R) = H.split_at_mut(n); let c_L = inner_product(&a_L, &b_R); let c_R = inner_product(&a_R, &b_L); let L = RistrettoPoint::vartime_multiscalar_mul( a_L.iter().chain(b_R.iter()).chain(iter::once(&c_L)), G_R.iter().chain(H_L.iter()).chain(iter::once(Q)), ); let R = RistrettoPoint::vartime_multiscalar_mul( a_R.iter().chain(b_L.iter()).chain(iter::once(&c_R)), G_L.iter().chain(H_R.iter()).chain(iter::once(Q)), ); L_vec.push(L); R_vec.push(R); verifier.commit(L.compress().as_bytes()); verifier.commit(R.compress().as_bytes()); let u = verifier.challenge_scalar(); let u_inv = u.invert(); for i in 0..n { a_L[i] = a_L[i] * u + u_inv * a_R[i]; b_L[i] = b_L[i] * u_inv + u * b_R[i]; G_L[i] = RistrettoPoint::vartime_multiscalar_mul(&[u_inv, u], &[G_L[i], G_R[i]]); H_L[i] = RistrettoPoint::vartime_multiscalar_mul(&[u, u_inv], &[H_L[i], H_R[i]]); } a = a_L; b = b_L; G = G_L; H = H_L; } return InnerProductProof { L_vec: L_vec, R_vec: R_vec, a: a[0], b: b[0], }; } /// Computes three vectors of verification scalars \\([u\_{i}^{2}]\\), \\([u\_{i}^{-2}]\\) and \\([s\_{i}]\\) for combined multiscalar multiplication /// in a parent protocol. See [inner product protocol notes](index.html#verification-equation) for details. pub(crate) fn
( &self, transcript: &mut ProofTranscript, ) -> (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>) { let lg_n = self.L_vec.len(); let n = 1 << lg_n; // 1. Recompute x_k,...,x_1 based on the proof transcript let mut challenges = Vec::with_capacity(lg_n); for (L, R) in self.L_vec.iter().zip(self.R_vec.iter()) { // XXX maybe avoid this compression when proof ser/de is sorted out transcript.commit(L.compress().as_bytes()); transcript.commit(R.compress().as_bytes()); challenges.push(transcript.challenge_scalar()); } // 2. Compute 1/(u_k...u_1) and 1/u_k,..., 1/u_1 let mut challenges_inv = challenges.clone(); let allinv = Scalar::batch_invert(&mut challenges_inv); // 3. Compute u_i^2 and (1/u_i)^2 for i in 0..lg_n { // XXX missing square fn upstream challenges[i] = challenges[i] * challenges[i]; challenges_inv[i] = challenges_inv[i] * challenges_inv[i]; } let challenges_sq = challenges; let challenges_inv_sq = challenges_inv; // 4. Compute s values inductively. let mut s = Vec::with_capacity(n); s.push(allinv); for i in 1..n { let lg_i = (32 - 1 - (i as u32).leading_zeros()) as usize; let k = 1 << lg_i; // The challenges are stored in "creation order" as [u_k,...,u_1], // so u_{lg(i)+1} = is indexed by (lg_n-1) - lg_i let u_lg_i_sq = challenges_sq[(lg_n - 1) - lg_i]; s.push(s[i - k] * u_lg_i_sq); } (challenges_sq, challenges_inv_sq, s) } /// This method is for testing that proof generation work, /// but for efficiency the actual protocols would use `verification_scalars` /// method to combine inner product verification with other checks /// in a single multiscalar multiplication. #[allow(dead_code)] pub fn verify<I>( &self, transcript: &mut ProofTranscript, Hprime_factors: I, P: &RistrettoPoint, Q: &RistrettoPoint, G: &[RistrettoPoint], H: &[RistrettoPoint], ) -> Result<(), ()> where I: IntoIterator, I::Item: Borrow<Scalar>, { let (u_sq, u_inv_sq, s) = self.verification_scalars(transcript); let a_times_s = s.iter().map(|s_i| self.a * s_i); // 1/s[i] is s[!i], and!i runs from n-1 to 0 as i runs from 0 to n-1 let inv_s = s.iter().rev(); let h_times_b_div_s = Hprime_factors .into_iter() .zip(inv_s) .map(|(h_i, s_i_inv)| (self.b * s_i_inv) * h_i.borrow()); let neg_u_sq = u_sq.iter().map(|ui| -ui); let neg_u_inv_sq = u_inv_sq.iter().map(|ui| -ui); let expect_P = RistrettoPoint::vartime_multiscalar_mul( iter::once(self.a * self.b) .chain(a_times_s) .chain(h_times_b_div_s) .chain(neg_u_sq) .chain(neg_u_inv_sq), iter::once(Q) .chain(G.iter()) .chain(H.iter()) .chain(self.L_vec.iter()) .chain(self.R_vec.iter()), ); if expect_P == *P { Ok(()) } else { Err(()) } } } /// Computes an inner product of two vectors /// \\[ /// {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} = \sum\_{i=0}^{n-1} a\_i \cdot b\_i. /// \\] /// Panics if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal. pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar { let mut out = Scalar::zero(); if a.len()!= b.len() { panic!("inner_product(a,b): lengths of vectors do not match"); } for i in 0..a.len() { out += a[i] * b[i]; } out } #[cfg(test)] mod tests { use super::*; use rand::OsRng; use sha2::Sha512; use util; fn test_helper_create(n: usize) { let mut rng = OsRng::new().unwrap(); use generators::{Generators, PedersenGenerators}; let gens = Generators::new(PedersenGenerators::default(), n, 1); let G = gens.share(0).G.to_vec(); let H = gens.share(0).H.to_vec(); // Q would be determined upstream in the protocol, so we pick a random one. let Q = RistrettoPoint::hash_from_bytes::<Sha512>(b"test point"); // a and b are the vectors for which we want to prove c = <a,b> let a: Vec<_> = (0..n).map(|_| Scalar::random(&mut rng)).collect(); let b: Vec<_> = (0..n).map(|_| Scalar::random(&mut rng)).collect(); let c = inner_product(&a, &b); // y_inv is (the inverse of) a random challenge let y_inv = Scalar::random(&mut rng); // P would be determined upstream, but we need a correct P to check the proof. // // To generate P = <a,G> + <b,H'> + <a,b> Q, compute // P = <a,G> + <b',H> + <a,b> Q, // where b' = b \circ y^(-n) let b_prime = b.iter().zip(util::exp_iter(y_inv)).map(|(bi, yi)| bi * yi); // a.iter() has Item=&Scalar, need Item=Scalar to chain with b_prime let a_prime = a.iter().cloned(); let P = RistrettoPoint::vartime_multiscalar_mul( a_prime.chain(b_prime).chain(iter::once(c)), G.iter().chain(H.iter()).chain(iter::once(&Q)), ); let mut verifier = ProofTranscript::new(b"innerproducttest"); let proof = InnerProductProof::create( &mut verifier, &Q, util::exp_iter(y_inv), G.clone(), H.clone(), a.clone(), b.clone(), ); let mut verifier = ProofTranscript::new(b"innerproducttest"); assert!( proof .verify(&mut verifier, util::exp_iter(y_inv), &P, &Q, &G, &H) .is_ok() ); } #[test] fn make_ipp_1() { test_helper_create(1); } #[test] fn make_ipp_2() { test_helper_create(2); } #[test] fn make_ipp_4() { test_helper_create(4); } #[test] fn make_ipp_32() { test_helper_create(32); } #[test] fn make_ipp_64() { test_helper_create(64); } #[test] fn test_inner_product() { let a = vec![ Scalar::from_u64(1), Scalar::from_u64(2), Scalar::from_u64(3), Scalar::from_u64(4), ]; let b = vec![ Scalar::from_u64(2), Scalar::from_u64(3), Scalar::from_u64(4), Scalar::from_u64(5), ]; assert_eq!(Scalar::from_u64(40), inner_product(&a, &b)); } }
verification_scalars
identifier_name
inner_product_proof.rs
#![allow(non_snake_case)] #![doc(include = "../docs/inner-product-protocol.md")] use std::borrow::Borrow; use std::iter; use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::scalar::Scalar; use curve25519_dalek::traits::VartimeMultiscalarMul; use proof_transcript::ProofTranscript; #[derive(Serialize, Deserialize, Clone, Debug)] pub struct InnerProductProof { pub(crate) L_vec: Vec<RistrettoPoint>, pub(crate) R_vec: Vec<RistrettoPoint>, pub(crate) a: Scalar, pub(crate) b: Scalar, } impl InnerProductProof { /// Create an inner-product proof. /// /// The proof is created with respect to the bases \\(G\\), \\(H'\\), /// where \\(H'\_i = H\_i \cdot \texttt{Hprime\\_factors}\_i\\). /// /// The `verifier` is passed in as a parameter so that the /// challenges depend on the *entire* transcript (including parent /// protocols). pub fn create<I>( verifier: &mut ProofTranscript, Q: &RistrettoPoint, Hprime_factors: I, mut G_vec: Vec<RistrettoPoint>, mut H_vec: Vec<RistrettoPoint>, mut a_vec: Vec<Scalar>, mut b_vec: Vec<Scalar>, ) -> InnerProductProof where I: IntoIterator, I::Item: Borrow<Scalar>, { // Create slices G, H, a, b backed by their respective // vectors. This lets us reslice as we compress the lengths // of the vectors in the main loop below. let mut G = &mut G_vec[..]; let mut H = &mut H_vec[..]; let mut a = &mut a_vec[..]; let mut b = &mut b_vec[..]; let mut n = G.len(); // All of the input vectors must have the same length. assert_eq!(G.len(), n); assert_eq!(H.len(), n); assert_eq!(a.len(), n); assert_eq!(b.len(), n); // XXX save these scalar mults by unrolling them into the // first iteration of the loop below for (H_i, h_i) in H.iter_mut().zip(Hprime_factors.into_iter()) { *H_i = (&*H_i) * h_i.borrow(); } let lg_n = n.next_power_of_two().trailing_zeros() as usize; let mut L_vec = Vec::with_capacity(lg_n); let mut R_vec = Vec::with_capacity(lg_n); while n!= 1 { n = n / 2; let (a_L, a_R) = a.split_at_mut(n); let (b_L, b_R) = b.split_at_mut(n); let (G_L, G_R) = G.split_at_mut(n); let (H_L, H_R) = H.split_at_mut(n); let c_L = inner_product(&a_L, &b_R); let c_R = inner_product(&a_R, &b_L); let L = RistrettoPoint::vartime_multiscalar_mul( a_L.iter().chain(b_R.iter()).chain(iter::once(&c_L)), G_R.iter().chain(H_L.iter()).chain(iter::once(Q)), ); let R = RistrettoPoint::vartime_multiscalar_mul( a_R.iter().chain(b_L.iter()).chain(iter::once(&c_R)), G_L.iter().chain(H_R.iter()).chain(iter::once(Q)), ); L_vec.push(L); R_vec.push(R); verifier.commit(L.compress().as_bytes()); verifier.commit(R.compress().as_bytes()); let u = verifier.challenge_scalar(); let u_inv = u.invert(); for i in 0..n { a_L[i] = a_L[i] * u + u_inv * a_R[i]; b_L[i] = b_L[i] * u_inv + u * b_R[i]; G_L[i] = RistrettoPoint::vartime_multiscalar_mul(&[u_inv, u], &[G_L[i], G_R[i]]); H_L[i] = RistrettoPoint::vartime_multiscalar_mul(&[u, u_inv], &[H_L[i], H_R[i]]); } a = a_L; b = b_L; G = G_L; H = H_L; } return InnerProductProof { L_vec: L_vec, R_vec: R_vec, a: a[0], b: b[0], }; } /// Computes three vectors of verification scalars \\([u\_{i}^{2}]\\), \\([u\_{i}^{-2}]\\) and \\([s\_{i}]\\) for combined multiscalar multiplication /// in a parent protocol. See [inner product protocol notes](index.html#verification-equation) for details. pub(crate) fn verification_scalars( &self, transcript: &mut ProofTranscript, ) -> (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>) { let lg_n = self.L_vec.len(); let n = 1 << lg_n; // 1. Recompute x_k,...,x_1 based on the proof transcript let mut challenges = Vec::with_capacity(lg_n); for (L, R) in self.L_vec.iter().zip(self.R_vec.iter()) { // XXX maybe avoid this compression when proof ser/de is sorted out transcript.commit(L.compress().as_bytes()); transcript.commit(R.compress().as_bytes()); challenges.push(transcript.challenge_scalar()); } // 2. Compute 1/(u_k...u_1) and 1/u_k,..., 1/u_1 let mut challenges_inv = challenges.clone(); let allinv = Scalar::batch_invert(&mut challenges_inv); // 3. Compute u_i^2 and (1/u_i)^2 for i in 0..lg_n { // XXX missing square fn upstream challenges[i] = challenges[i] * challenges[i]; challenges_inv[i] = challenges_inv[i] * challenges_inv[i]; } let challenges_sq = challenges; let challenges_inv_sq = challenges_inv; // 4. Compute s values inductively. let mut s = Vec::with_capacity(n); s.push(allinv); for i in 1..n { let lg_i = (32 - 1 - (i as u32).leading_zeros()) as usize; let k = 1 << lg_i; // The challenges are stored in "creation order" as [u_k,...,u_1], // so u_{lg(i)+1} = is indexed by (lg_n-1) - lg_i let u_lg_i_sq = challenges_sq[(lg_n - 1) - lg_i]; s.push(s[i - k] * u_lg_i_sq); } (challenges_sq, challenges_inv_sq, s) } /// This method is for testing that proof generation work, /// but for efficiency the actual protocols would use `verification_scalars` /// method to combine inner product verification with other checks /// in a single multiscalar multiplication. #[allow(dead_code)] pub fn verify<I>( &self, transcript: &mut ProofTranscript, Hprime_factors: I, P: &RistrettoPoint, Q: &RistrettoPoint, G: &[RistrettoPoint], H: &[RistrettoPoint], ) -> Result<(), ()> where I: IntoIterator, I::Item: Borrow<Scalar>, { let (u_sq, u_inv_sq, s) = self.verification_scalars(transcript); let a_times_s = s.iter().map(|s_i| self.a * s_i); // 1/s[i] is s[!i], and!i runs from n-1 to 0 as i runs from 0 to n-1 let inv_s = s.iter().rev(); let h_times_b_div_s = Hprime_factors .into_iter() .zip(inv_s) .map(|(h_i, s_i_inv)| (self.b * s_i_inv) * h_i.borrow()); let neg_u_sq = u_sq.iter().map(|ui| -ui); let neg_u_inv_sq = u_inv_sq.iter().map(|ui| -ui); let expect_P = RistrettoPoint::vartime_multiscalar_mul( iter::once(self.a * self.b) .chain(a_times_s) .chain(h_times_b_div_s) .chain(neg_u_sq) .chain(neg_u_inv_sq), iter::once(Q) .chain(G.iter()) .chain(H.iter()) .chain(self.L_vec.iter()) .chain(self.R_vec.iter()), ); if expect_P == *P { Ok(()) } else { Err(()) } } } /// Computes an inner product of two vectors /// \\[ /// {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} = \sum\_{i=0}^{n-1} a\_i \cdot b\_i. /// \\] /// Panics if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal. pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar { let mut out = Scalar::zero(); if a.len()!= b.len() { panic!("inner_product(a,b): lengths of vectors do not match"); } for i in 0..a.len() { out += a[i] * b[i]; } out } #[cfg(test)] mod tests { use super::*; use rand::OsRng; use sha2::Sha512; use util; fn test_helper_create(n: usize) { let mut rng = OsRng::new().unwrap(); use generators::{Generators, PedersenGenerators}; let gens = Generators::new(PedersenGenerators::default(), n, 1); let G = gens.share(0).G.to_vec(); let H = gens.share(0).H.to_vec(); // Q would be determined upstream in the protocol, so we pick a random one. let Q = RistrettoPoint::hash_from_bytes::<Sha512>(b"test point"); // a and b are the vectors for which we want to prove c = <a,b> let a: Vec<_> = (0..n).map(|_| Scalar::random(&mut rng)).collect(); let b: Vec<_> = (0..n).map(|_| Scalar::random(&mut rng)).collect(); let c = inner_product(&a, &b); // y_inv is (the inverse of) a random challenge let y_inv = Scalar::random(&mut rng);
// where b' = b \circ y^(-n) let b_prime = b.iter().zip(util::exp_iter(y_inv)).map(|(bi, yi)| bi * yi); // a.iter() has Item=&Scalar, need Item=Scalar to chain with b_prime let a_prime = a.iter().cloned(); let P = RistrettoPoint::vartime_multiscalar_mul( a_prime.chain(b_prime).chain(iter::once(c)), G.iter().chain(H.iter()).chain(iter::once(&Q)), ); let mut verifier = ProofTranscript::new(b"innerproducttest"); let proof = InnerProductProof::create( &mut verifier, &Q, util::exp_iter(y_inv), G.clone(), H.clone(), a.clone(), b.clone(), ); let mut verifier = ProofTranscript::new(b"innerproducttest"); assert!( proof .verify(&mut verifier, util::exp_iter(y_inv), &P, &Q, &G, &H) .is_ok() ); } #[test] fn make_ipp_1() { test_helper_create(1); } #[test] fn make_ipp_2() { test_helper_create(2); } #[test] fn make_ipp_4() { test_helper_create(4); } #[test] fn make_ipp_32() { test_helper_create(32); } #[test] fn make_ipp_64() { test_helper_create(64); } #[test] fn test_inner_product() { let a = vec![ Scalar::from_u64(1), Scalar::from_u64(2), Scalar::from_u64(3), Scalar::from_u64(4), ]; let b = vec![ Scalar::from_u64(2), Scalar::from_u64(3), Scalar::from_u64(4), Scalar::from_u64(5), ]; assert_eq!(Scalar::from_u64(40), inner_product(&a, &b)); } }
// P would be determined upstream, but we need a correct P to check the proof. // // To generate P = <a,G> + <b,H'> + <a,b> Q, compute // P = <a,G> + <b',H> + <a,b> Q,
random_line_split
main.rs
#[macro_use] extern crate errln; #[macro_use] extern crate error_chain; extern crate clap; extern crate hex; extern crate lalrpop_util; extern crate parser_haskell; extern crate regex; extern crate tempdir; extern crate walkdir; extern crate corollary; extern crate inflector; use parser_haskell::util::{print_parse_error, simplify_parse_error}; use clap::{Arg, App}; use regex::Regex; use std::fmt::Write; use std::fs::{File, create_dir_all}; use std::io::prelude::*; use std::path::{Path, PathBuf}; use std::process::Command; use tempdir::TempDir; use corollary::print_item_list; use corollary::ir::PrintState; // Define error chain. mod errors { error_chain! { foreign_links { Walkdir(::walkdir::Error); Io(::std::io::Error); Fmt(::std::fmt::Error); } } } use errors::*; #[test] #[ignore] fn test_single_file() { let a = "./corrode/src/Language/Rust/Corrode/C.lhs"; // let a = "./corrode/src/Language/Rust/Corrode/C.hs"; // let a = "./test/input.hs"; println!("file: {}", a); let mut file = File::open(a).unwrap(); let mut contents = String::new(); file.read_to_string(&mut contents).unwrap(); if a.ends_with(".lhs") { contents = strip_lhs(&contents); } let contents = parser_haskell::preprocess(&contents); // let mut a = ::std::fs::File::create("temp.txt").unwrap(); // a.write_all(contents.as_bytes()); let mut errors = Vec::new(); match parser_haskell::parse(&mut errors, &contents) { Ok(okay) => println!("{:#?}", okay), Err(e) => { let e = simplify_parse_error(e); print_parse_error(&contents, &e); panic!(e); } } } #[test] fn test_no_regressions() { let a = vec![ "../deps/corrode/src/Language/Rust/AST.hs", "../deps/corrode/src/Language/Rust/Corrode/C.lhs", "../deps/corrode/src/Language/Rust/Corrode/CFG.lhs", "../deps/corrode/src/Language/Rust/Corrode/CrateMap.hs", "../deps/corrode/src/Language/Rust/Idiomatic.hs", "../deps/corrode/src/Language/Rust.hs", "../deps/language-c/src/Language/C/Analysis/AstAnalysis.hs", "../deps/language-c/src/Language/C/Analysis/Builtins.hs", "../deps/language-c/src/Language/C/Analysis/ConstEval.hs", "../deps/language-c/src/Language/C/Analysis/Debug.hs", "../deps/language-c/src/Language/C/Analysis/DeclAnalysis.hs", "../deps/language-c/src/Language/C/Analysis/DefTable.hs", "../deps/language-c/src/Language/C/Analysis/Export.hs", "../deps/language-c/src/Language/C/Analysis/NameSpaceMap.hs", "../deps/language-c/src/Language/C/Analysis/SemError.hs", "../deps/language-c/src/Language/C/Analysis/SemRep.hs", "../deps/language-c/src/Language/C/Analysis/TravMonad.hs", "../deps/language-c/src/Language/C/Analysis/TypeCheck.hs", "../deps/language-c/src/Language/C/Analysis/TypeConversions.hs", "../deps/language-c/src/Language/C/Analysis/TypeUtils.hs", "../deps/language-c/src/Language/C/Analysis.hs", "../deps/language-c/src/Language/C/Data/Error.hs", "../deps/language-c/src/Language/C/Data/Ident.hs", "../deps/language-c/src/Language/C/Data/InputStream.hs", "../deps/language-c/src/Language/C/Data/Name.hs", "../deps/language-c/src/Language/C/Data/Node.hs", "../deps/language-c/src/Language/C/Data/Position.hs", "../deps/language-c/src/Language/C/Data/RList.hs", "../deps/language-c/src/Language/C/Data.hs", "../deps/language-c/src/Language/C/Parser/Builtin.hs", "../deps/language-c/src/Language/C/Parser/ParserMonad.hs", "../deps/language-c/src/Language/C/Parser/Tokens.hs", "../deps/language-c/src/Language/C/Parser.hs", "../deps/language-c/src/Language/C/Pretty.hs", "../deps/language-c/src/Language/C/Syntax/AST.hs", "../deps/language-c/src/Language/C/Syntax/Constants.hs", "../deps/language-c/src/Language/C/Syntax/Ops.hs", "../deps/language-c/src/Language/C/Syntax/Utils.hs", "../deps/language-c/src/Language/C/Syntax.hs", "../deps/language-c/src/Language/C/System/GCC.hs", "../deps/language-c/src/Language/C/System/Preprocess.hs", "../parser-c/gen/Lexer.hs", "../parser-c/gen/Parser.hs", ]; for path in a { let mut file = File::open(path).unwrap(); let mut contents = String::new(); file.read_to_string(&mut contents).unwrap(); if path.ends_with(".lhs") { contents = strip_lhs(&contents); } let contents = parser_haskell::preprocess(&contents); // Do not output preprocessed data temp.txt println!("{:?}", path); // use ::std::io::Write; // let mut a = ::std::fs::File::create("temp.txt").unwrap(); // a.write_all(contents.as_bytes()); let mut errors = Vec::new(); match parser_haskell::parse(&mut errors, &contents) { Ok(_) => { // OK } Err(e) => { //TODO print_parse_error return string, feed to panic print_parse_error(&contents, &simplify_parse_error(e)); panic!("cannot convert file {:?}", path); } } } } fn strip_lhs(s: &str) -> String { let re = Regex::new(r"([ \t]*)```haskell([\s\S]*?)```").unwrap(); let mut out = vec![]; for cap in re.captures_iter(&s) { let indent = cap[1].to_string().len(); let group = cap[2].to_string() .lines() .map(|x| { x.chars().skip(indent).collect::<String>() }) .collect::<Vec<_>>() .join("\n"); out.push(group); } out.join("\n\n") } /// Converts a Haskell file by its path into a Rust module. fn convert_file(input: &str, p: &Path, inline_mod: bool, dump_ast: bool) -> Result<(String, String)>
match parser_haskell::parse(&mut errors, &contents) { Ok(v) => { // errln!("{:?}", v); if dump_ast { println!("{}", format!("{:#?}", v).replace(" ", " ")); } else { writeln!(file_out, "// Original file: {:?}", p.file_name().unwrap())?; writeln!(file_out, "// File auto-generated using Corollary.")?; writeln!(file_out, "")?; if inline_mod { writeln!(file_out, "pub mod {} {{", v.name.0.replace(".", "_"))?; writeln!(file_out, " use haskell_support::*;")?; writeln!(file_out, "")?; let state = PrintState::new(); writeln!(file_out, "{}", print_item_list(state.tab(), &v.items, true))?; writeln!(file_out, "}}\n")?; } else { writeln!(file_out, "#[macro_use] use corollary_support::*;")?; writeln!(file_out, "")?; let state = PrintState::new(); writeln!(file_out, "{}", print_item_list(state, &v.items, true))?; } } } Err(e) => { errln!("/* ERROR: cannot convert file {:?}",p); // TODO have this write to Format print_parse_error(&contents, &simplify_parse_error(e)); errln!("*/"); panic!("COULDN'T PARSE"); } } Ok((file_out, rust_out)) } quick_main!(run); fn run() -> Result<()> { use std::io::Write; let matches = App::new("corollary") .version("0.1") .about("Converts Haskell to Rust") .arg(Arg::with_name("run") .short("r") .long("run") .help("Runs the file")) .arg(Arg::with_name("out") .short("o") .long("out") .help("Output path") .takes_value(true)) .arg(Arg::with_name("ast") .short("a") .long("ast") .help("Dump AST")) .arg(Arg::with_name("INPUT") .help("Sets the input file to use") .required(true) .index(1)) .get_matches(); let arg_input = matches.value_of("INPUT").unwrap(); let arg_run = matches.is_present("run"); let arg_out: Option<_> = matches.value_of("out"); let arg_ast = matches.is_present("ast"); if arg_run && arg_out.is_some() { bail!("Cannot use --out and --run at the same time."); } if (arg_run || arg_out.is_some()) && arg_ast { bail!("Cannot use --ast and (--run or --out) at the same time."); } // Starting message. if arg_run { errln!("running {:?}...", arg_input); } else { errln!("cross-compiling {:?}...", arg_input); } // Read file contents. let mut file = File::open(arg_input) .chain_err(|| format!("Could not open {:?}", arg_input))?; let mut contents = String::new(); file.read_to_string(&mut contents)?; // Preprocess the file. Translate.lhs. if arg_input.ends_with(".lhs") { contents = strip_lhs(&contents); } let (mut file_section, rust_section) = convert_file(&contents, &PathBuf::from(arg_input), false, arg_ast)?; if arg_ast { return Ok(()); } // Add Rust segments RUST... /RUST and Haskell support code. let _ = writeln!(file_section, ""); let _ = writeln!(file_section, ""); if rust_section.len() > 0 { let _ = writeln!(file_section, "/* RUST... /RUST */"); let _ = writeln!(file_section, "{}", rust_section); } if let Some(out_path) = arg_out { // Create directory. let _ = create_dir_all(&Path::new(&arg_out.unwrap()).parent().unwrap()); // Write file to path. errln!("... outputting to {:?}", out_path); let mut f = File::create(&out_path)?; let _ = f.write_all(file_section.as_bytes()); } else if!arg_run { // Print file to stdout. print!("{}", file_section); } else if arg_run { // Run the file. let dir = TempDir::new("corollary")?; let file_path = dir.path().join("script.rs"); let mut f = File::create(&file_path)?; let _ = f.write_all(b"// cargo-deps: corollary-support={path=\"/Users/trim/Desktop/corrode-but-in-rust/corollary-support\"}\n\nextern crate corollary_support;\n\n"); let _ = f.write_all(file_section.as_bytes()); if rust_section.len() == 0 { let _ = f.write_all(b"\n\nfn main() { let _ = __main(); }\n"); } drop(f); let output = Command::new("cargo") .args(&["script", &file_path.display().to_string()]) .output() .expect("failed to execute process"); if!output.status.success() { err!("{}", String::from_utf8_lossy(&output.stderr)); } err!("{}", String::from_utf8_lossy(&output.stdout)); ::std::process::exit(output.status.code().unwrap()); } Ok(()) }
{ let mut contents = input.to_string(); let mut file_out = String::new(); let mut rust_out = String::new(); // Parse out HASKELL /HASKELL RUST /RUST sections. let re = Regex::new(r#"HASKELL[\s\S]*?/HASKELL"#).unwrap(); contents = re.replace(&contents, "").to_string(); let re = Regex::new(r#"RUST([\s\S]*?)/RUST"#).unwrap(); if let Some(cap) = re.captures(&contents) { rust_out.push_str(&cap.get(1).unwrap().as_str().to_string()); } contents = re.replace(&contents, "").to_string(); // Preprocess the file. let contents = parser_haskell::preprocess(&contents); // errln!("{}", contents); // Parse the file. let mut errors = Vec::new();
identifier_body
main.rs
#[macro_use] extern crate errln; #[macro_use] extern crate error_chain; extern crate clap; extern crate hex; extern crate lalrpop_util; extern crate parser_haskell; extern crate regex; extern crate tempdir; extern crate walkdir; extern crate corollary; extern crate inflector; use parser_haskell::util::{print_parse_error, simplify_parse_error}; use clap::{Arg, App}; use regex::Regex; use std::fmt::Write; use std::fs::{File, create_dir_all}; use std::io::prelude::*; use std::path::{Path, PathBuf}; use std::process::Command; use tempdir::TempDir; use corollary::print_item_list; use corollary::ir::PrintState; // Define error chain. mod errors { error_chain! { foreign_links { Walkdir(::walkdir::Error); Io(::std::io::Error); Fmt(::std::fmt::Error); } } } use errors::*; #[test] #[ignore] fn test_single_file() { let a = "./corrode/src/Language/Rust/Corrode/C.lhs"; // let a = "./corrode/src/Language/Rust/Corrode/C.hs"; // let a = "./test/input.hs"; println!("file: {}", a); let mut file = File::open(a).unwrap(); let mut contents = String::new(); file.read_to_string(&mut contents).unwrap(); if a.ends_with(".lhs") { contents = strip_lhs(&contents); } let contents = parser_haskell::preprocess(&contents); // let mut a = ::std::fs::File::create("temp.txt").unwrap(); // a.write_all(contents.as_bytes()); let mut errors = Vec::new(); match parser_haskell::parse(&mut errors, &contents) { Ok(okay) => println!("{:#?}", okay), Err(e) => { let e = simplify_parse_error(e); print_parse_error(&contents, &e); panic!(e); } } } #[test] fn test_no_regressions() { let a = vec![ "../deps/corrode/src/Language/Rust/AST.hs", "../deps/corrode/src/Language/Rust/Corrode/C.lhs", "../deps/corrode/src/Language/Rust/Corrode/CFG.lhs", "../deps/corrode/src/Language/Rust/Corrode/CrateMap.hs", "../deps/corrode/src/Language/Rust/Idiomatic.hs", "../deps/corrode/src/Language/Rust.hs", "../deps/language-c/src/Language/C/Analysis/AstAnalysis.hs", "../deps/language-c/src/Language/C/Analysis/Builtins.hs", "../deps/language-c/src/Language/C/Analysis/ConstEval.hs", "../deps/language-c/src/Language/C/Analysis/Debug.hs", "../deps/language-c/src/Language/C/Analysis/DeclAnalysis.hs", "../deps/language-c/src/Language/C/Analysis/DefTable.hs", "../deps/language-c/src/Language/C/Analysis/Export.hs", "../deps/language-c/src/Language/C/Analysis/NameSpaceMap.hs", "../deps/language-c/src/Language/C/Analysis/SemError.hs", "../deps/language-c/src/Language/C/Analysis/SemRep.hs", "../deps/language-c/src/Language/C/Analysis/TravMonad.hs", "../deps/language-c/src/Language/C/Analysis/TypeCheck.hs", "../deps/language-c/src/Language/C/Analysis/TypeConversions.hs", "../deps/language-c/src/Language/C/Analysis/TypeUtils.hs", "../deps/language-c/src/Language/C/Analysis.hs", "../deps/language-c/src/Language/C/Data/Error.hs", "../deps/language-c/src/Language/C/Data/Ident.hs", "../deps/language-c/src/Language/C/Data/InputStream.hs", "../deps/language-c/src/Language/C/Data/Name.hs", "../deps/language-c/src/Language/C/Data/Node.hs", "../deps/language-c/src/Language/C/Data/Position.hs", "../deps/language-c/src/Language/C/Data/RList.hs", "../deps/language-c/src/Language/C/Data.hs", "../deps/language-c/src/Language/C/Parser/Builtin.hs", "../deps/language-c/src/Language/C/Parser/ParserMonad.hs", "../deps/language-c/src/Language/C/Parser/Tokens.hs", "../deps/language-c/src/Language/C/Parser.hs", "../deps/language-c/src/Language/C/Pretty.hs", "../deps/language-c/src/Language/C/Syntax/AST.hs", "../deps/language-c/src/Language/C/Syntax/Constants.hs", "../deps/language-c/src/Language/C/Syntax/Ops.hs", "../deps/language-c/src/Language/C/Syntax/Utils.hs", "../deps/language-c/src/Language/C/Syntax.hs", "../deps/language-c/src/Language/C/System/GCC.hs", "../deps/language-c/src/Language/C/System/Preprocess.hs", "../parser-c/gen/Lexer.hs", "../parser-c/gen/Parser.hs", ]; for path in a { let mut file = File::open(path).unwrap(); let mut contents = String::new(); file.read_to_string(&mut contents).unwrap(); if path.ends_with(".lhs") { contents = strip_lhs(&contents); } let contents = parser_haskell::preprocess(&contents); // Do not output preprocessed data temp.txt println!("{:?}", path); // use ::std::io::Write; // let mut a = ::std::fs::File::create("temp.txt").unwrap(); // a.write_all(contents.as_bytes()); let mut errors = Vec::new(); match parser_haskell::parse(&mut errors, &contents) { Ok(_) => { // OK } Err(e) => { //TODO print_parse_error return string, feed to panic print_parse_error(&contents, &simplify_parse_error(e)); panic!("cannot convert file {:?}", path); } } } } fn strip_lhs(s: &str) -> String { let re = Regex::new(r"([ \t]*)```haskell([\s\S]*?)```").unwrap(); let mut out = vec![]; for cap in re.captures_iter(&s) { let indent = cap[1].to_string().len(); let group = cap[2].to_string() .lines() .map(|x| { x.chars().skip(indent).collect::<String>() }) .collect::<Vec<_>>() .join("\n"); out.push(group); } out.join("\n\n") } /// Converts a Haskell file by its path into a Rust module. fn convert_file(input: &str, p: &Path, inline_mod: bool, dump_ast: bool) -> Result<(String, String)> { let mut contents = input.to_string(); let mut file_out = String::new(); let mut rust_out = String::new(); // Parse out HASKELL /HASKELL RUST /RUST sections. let re = Regex::new(r#"HASKELL[\s\S]*?/HASKELL"#).unwrap(); contents = re.replace(&contents, "").to_string(); let re = Regex::new(r#"RUST([\s\S]*?)/RUST"#).unwrap(); if let Some(cap) = re.captures(&contents) { rust_out.push_str(&cap.get(1).unwrap().as_str().to_string()); } contents = re.replace(&contents, "").to_string(); // Preprocess the file. let contents = parser_haskell::preprocess(&contents); // errln!("{}", contents); // Parse the file. let mut errors = Vec::new(); match parser_haskell::parse(&mut errors, &contents) { Ok(v) => { // errln!("{:?}", v); if dump_ast { println!("{}", format!("{:#?}", v).replace(" ", " ")); } else { writeln!(file_out, "// Original file: {:?}", p.file_name().unwrap())?; writeln!(file_out, "// File auto-generated using Corollary.")?; writeln!(file_out, "")?; if inline_mod { writeln!(file_out, "pub mod {} {{", v.name.0.replace(".", "_"))?; writeln!(file_out, " use haskell_support::*;")?; writeln!(file_out, "")?; let state = PrintState::new(); writeln!(file_out, "{}", print_item_list(state.tab(), &v.items, true))?; writeln!(file_out, "}}\n")?; } else { writeln!(file_out, "#[macro_use] use corollary_support::*;")?; writeln!(file_out, "")?; let state = PrintState::new(); writeln!(file_out, "{}", print_item_list(state, &v.items, true))?; } } } Err(e) => { errln!("/* ERROR: cannot convert file {:?}",p); // TODO have this write to Format print_parse_error(&contents, &simplify_parse_error(e)); errln!("*/"); panic!("COULDN'T PARSE"); } } Ok((file_out, rust_out)) } quick_main!(run); fn run() -> Result<()> { use std::io::Write; let matches = App::new("corollary") .version("0.1") .about("Converts Haskell to Rust") .arg(Arg::with_name("run") .short("r") .long("run") .help("Runs the file")) .arg(Arg::with_name("out") .short("o") .long("out") .help("Output path") .takes_value(true)) .arg(Arg::with_name("ast") .short("a") .long("ast") .help("Dump AST")) .arg(Arg::with_name("INPUT") .help("Sets the input file to use") .required(true) .index(1)) .get_matches(); let arg_input = matches.value_of("INPUT").unwrap(); let arg_run = matches.is_present("run"); let arg_out: Option<_> = matches.value_of("out"); let arg_ast = matches.is_present("ast"); if arg_run && arg_out.is_some() { bail!("Cannot use --out and --run at the same time.");
} // Starting message. if arg_run { errln!("running {:?}...", arg_input); } else { errln!("cross-compiling {:?}...", arg_input); } // Read file contents. let mut file = File::open(arg_input) .chain_err(|| format!("Could not open {:?}", arg_input))?; let mut contents = String::new(); file.read_to_string(&mut contents)?; // Preprocess the file. Translate.lhs. if arg_input.ends_with(".lhs") { contents = strip_lhs(&contents); } let (mut file_section, rust_section) = convert_file(&contents, &PathBuf::from(arg_input), false, arg_ast)?; if arg_ast { return Ok(()); } // Add Rust segments RUST... /RUST and Haskell support code. let _ = writeln!(file_section, ""); let _ = writeln!(file_section, ""); if rust_section.len() > 0 { let _ = writeln!(file_section, "/* RUST... /RUST */"); let _ = writeln!(file_section, "{}", rust_section); } if let Some(out_path) = arg_out { // Create directory. let _ = create_dir_all(&Path::new(&arg_out.unwrap()).parent().unwrap()); // Write file to path. errln!("... outputting to {:?}", out_path); let mut f = File::create(&out_path)?; let _ = f.write_all(file_section.as_bytes()); } else if!arg_run { // Print file to stdout. print!("{}", file_section); } else if arg_run { // Run the file. let dir = TempDir::new("corollary")?; let file_path = dir.path().join("script.rs"); let mut f = File::create(&file_path)?; let _ = f.write_all(b"// cargo-deps: corollary-support={path=\"/Users/trim/Desktop/corrode-but-in-rust/corollary-support\"}\n\nextern crate corollary_support;\n\n"); let _ = f.write_all(file_section.as_bytes()); if rust_section.len() == 0 { let _ = f.write_all(b"\n\nfn main() { let _ = __main(); }\n"); } drop(f); let output = Command::new("cargo") .args(&["script", &file_path.display().to_string()]) .output() .expect("failed to execute process"); if!output.status.success() { err!("{}", String::from_utf8_lossy(&output.stderr)); } err!("{}", String::from_utf8_lossy(&output.stdout)); ::std::process::exit(output.status.code().unwrap()); } Ok(()) }
} if (arg_run || arg_out.is_some()) && arg_ast { bail!("Cannot use --ast and (--run or --out) at the same time.");
random_line_split
main.rs
#[macro_use] extern crate errln; #[macro_use] extern crate error_chain; extern crate clap; extern crate hex; extern crate lalrpop_util; extern crate parser_haskell; extern crate regex; extern crate tempdir; extern crate walkdir; extern crate corollary; extern crate inflector; use parser_haskell::util::{print_parse_error, simplify_parse_error}; use clap::{Arg, App}; use regex::Regex; use std::fmt::Write; use std::fs::{File, create_dir_all}; use std::io::prelude::*; use std::path::{Path, PathBuf}; use std::process::Command; use tempdir::TempDir; use corollary::print_item_list; use corollary::ir::PrintState; // Define error chain. mod errors { error_chain! { foreign_links { Walkdir(::walkdir::Error); Io(::std::io::Error); Fmt(::std::fmt::Error); } } } use errors::*; #[test] #[ignore] fn test_single_file() { let a = "./corrode/src/Language/Rust/Corrode/C.lhs"; // let a = "./corrode/src/Language/Rust/Corrode/C.hs"; // let a = "./test/input.hs"; println!("file: {}", a); let mut file = File::open(a).unwrap(); let mut contents = String::new(); file.read_to_string(&mut contents).unwrap(); if a.ends_with(".lhs") { contents = strip_lhs(&contents); } let contents = parser_haskell::preprocess(&contents); // let mut a = ::std::fs::File::create("temp.txt").unwrap(); // a.write_all(contents.as_bytes()); let mut errors = Vec::new(); match parser_haskell::parse(&mut errors, &contents) { Ok(okay) => println!("{:#?}", okay), Err(e) => { let e = simplify_parse_error(e); print_parse_error(&contents, &e); panic!(e); } } } #[test] fn test_no_regressions() { let a = vec![ "../deps/corrode/src/Language/Rust/AST.hs", "../deps/corrode/src/Language/Rust/Corrode/C.lhs", "../deps/corrode/src/Language/Rust/Corrode/CFG.lhs", "../deps/corrode/src/Language/Rust/Corrode/CrateMap.hs", "../deps/corrode/src/Language/Rust/Idiomatic.hs", "../deps/corrode/src/Language/Rust.hs", "../deps/language-c/src/Language/C/Analysis/AstAnalysis.hs", "../deps/language-c/src/Language/C/Analysis/Builtins.hs", "../deps/language-c/src/Language/C/Analysis/ConstEval.hs", "../deps/language-c/src/Language/C/Analysis/Debug.hs", "../deps/language-c/src/Language/C/Analysis/DeclAnalysis.hs", "../deps/language-c/src/Language/C/Analysis/DefTable.hs", "../deps/language-c/src/Language/C/Analysis/Export.hs", "../deps/language-c/src/Language/C/Analysis/NameSpaceMap.hs", "../deps/language-c/src/Language/C/Analysis/SemError.hs", "../deps/language-c/src/Language/C/Analysis/SemRep.hs", "../deps/language-c/src/Language/C/Analysis/TravMonad.hs", "../deps/language-c/src/Language/C/Analysis/TypeCheck.hs", "../deps/language-c/src/Language/C/Analysis/TypeConversions.hs", "../deps/language-c/src/Language/C/Analysis/TypeUtils.hs", "../deps/language-c/src/Language/C/Analysis.hs", "../deps/language-c/src/Language/C/Data/Error.hs", "../deps/language-c/src/Language/C/Data/Ident.hs", "../deps/language-c/src/Language/C/Data/InputStream.hs", "../deps/language-c/src/Language/C/Data/Name.hs", "../deps/language-c/src/Language/C/Data/Node.hs", "../deps/language-c/src/Language/C/Data/Position.hs", "../deps/language-c/src/Language/C/Data/RList.hs", "../deps/language-c/src/Language/C/Data.hs", "../deps/language-c/src/Language/C/Parser/Builtin.hs", "../deps/language-c/src/Language/C/Parser/ParserMonad.hs", "../deps/language-c/src/Language/C/Parser/Tokens.hs", "../deps/language-c/src/Language/C/Parser.hs", "../deps/language-c/src/Language/C/Pretty.hs", "../deps/language-c/src/Language/C/Syntax/AST.hs", "../deps/language-c/src/Language/C/Syntax/Constants.hs", "../deps/language-c/src/Language/C/Syntax/Ops.hs", "../deps/language-c/src/Language/C/Syntax/Utils.hs", "../deps/language-c/src/Language/C/Syntax.hs", "../deps/language-c/src/Language/C/System/GCC.hs", "../deps/language-c/src/Language/C/System/Preprocess.hs", "../parser-c/gen/Lexer.hs", "../parser-c/gen/Parser.hs", ]; for path in a { let mut file = File::open(path).unwrap(); let mut contents = String::new(); file.read_to_string(&mut contents).unwrap(); if path.ends_with(".lhs") { contents = strip_lhs(&contents); } let contents = parser_haskell::preprocess(&contents); // Do not output preprocessed data temp.txt println!("{:?}", path); // use ::std::io::Write; // let mut a = ::std::fs::File::create("temp.txt").unwrap(); // a.write_all(contents.as_bytes()); let mut errors = Vec::new(); match parser_haskell::parse(&mut errors, &contents) { Ok(_) => { // OK } Err(e) => { //TODO print_parse_error return string, feed to panic print_parse_error(&contents, &simplify_parse_error(e)); panic!("cannot convert file {:?}", path); } } } } fn strip_lhs(s: &str) -> String { let re = Regex::new(r"([ \t]*)```haskell([\s\S]*?)```").unwrap(); let mut out = vec![]; for cap in re.captures_iter(&s) { let indent = cap[1].to_string().len(); let group = cap[2].to_string() .lines() .map(|x| { x.chars().skip(indent).collect::<String>() }) .collect::<Vec<_>>() .join("\n"); out.push(group); } out.join("\n\n") } /// Converts a Haskell file by its path into a Rust module. fn
(input: &str, p: &Path, inline_mod: bool, dump_ast: bool) -> Result<(String, String)> { let mut contents = input.to_string(); let mut file_out = String::new(); let mut rust_out = String::new(); // Parse out HASKELL /HASKELL RUST /RUST sections. let re = Regex::new(r#"HASKELL[\s\S]*?/HASKELL"#).unwrap(); contents = re.replace(&contents, "").to_string(); let re = Regex::new(r#"RUST([\s\S]*?)/RUST"#).unwrap(); if let Some(cap) = re.captures(&contents) { rust_out.push_str(&cap.get(1).unwrap().as_str().to_string()); } contents = re.replace(&contents, "").to_string(); // Preprocess the file. let contents = parser_haskell::preprocess(&contents); // errln!("{}", contents); // Parse the file. let mut errors = Vec::new(); match parser_haskell::parse(&mut errors, &contents) { Ok(v) => { // errln!("{:?}", v); if dump_ast { println!("{}", format!("{:#?}", v).replace(" ", " ")); } else { writeln!(file_out, "// Original file: {:?}", p.file_name().unwrap())?; writeln!(file_out, "// File auto-generated using Corollary.")?; writeln!(file_out, "")?; if inline_mod { writeln!(file_out, "pub mod {} {{", v.name.0.replace(".", "_"))?; writeln!(file_out, " use haskell_support::*;")?; writeln!(file_out, "")?; let state = PrintState::new(); writeln!(file_out, "{}", print_item_list(state.tab(), &v.items, true))?; writeln!(file_out, "}}\n")?; } else { writeln!(file_out, "#[macro_use] use corollary_support::*;")?; writeln!(file_out, "")?; let state = PrintState::new(); writeln!(file_out, "{}", print_item_list(state, &v.items, true))?; } } } Err(e) => { errln!("/* ERROR: cannot convert file {:?}",p); // TODO have this write to Format print_parse_error(&contents, &simplify_parse_error(e)); errln!("*/"); panic!("COULDN'T PARSE"); } } Ok((file_out, rust_out)) } quick_main!(run); fn run() -> Result<()> { use std::io::Write; let matches = App::new("corollary") .version("0.1") .about("Converts Haskell to Rust") .arg(Arg::with_name("run") .short("r") .long("run") .help("Runs the file")) .arg(Arg::with_name("out") .short("o") .long("out") .help("Output path") .takes_value(true)) .arg(Arg::with_name("ast") .short("a") .long("ast") .help("Dump AST")) .arg(Arg::with_name("INPUT") .help("Sets the input file to use") .required(true) .index(1)) .get_matches(); let arg_input = matches.value_of("INPUT").unwrap(); let arg_run = matches.is_present("run"); let arg_out: Option<_> = matches.value_of("out"); let arg_ast = matches.is_present("ast"); if arg_run && arg_out.is_some() { bail!("Cannot use --out and --run at the same time."); } if (arg_run || arg_out.is_some()) && arg_ast { bail!("Cannot use --ast and (--run or --out) at the same time."); } // Starting message. if arg_run { errln!("running {:?}...", arg_input); } else { errln!("cross-compiling {:?}...", arg_input); } // Read file contents. let mut file = File::open(arg_input) .chain_err(|| format!("Could not open {:?}", arg_input))?; let mut contents = String::new(); file.read_to_string(&mut contents)?; // Preprocess the file. Translate.lhs. if arg_input.ends_with(".lhs") { contents = strip_lhs(&contents); } let (mut file_section, rust_section) = convert_file(&contents, &PathBuf::from(arg_input), false, arg_ast)?; if arg_ast { return Ok(()); } // Add Rust segments RUST... /RUST and Haskell support code. let _ = writeln!(file_section, ""); let _ = writeln!(file_section, ""); if rust_section.len() > 0 { let _ = writeln!(file_section, "/* RUST... /RUST */"); let _ = writeln!(file_section, "{}", rust_section); } if let Some(out_path) = arg_out { // Create directory. let _ = create_dir_all(&Path::new(&arg_out.unwrap()).parent().unwrap()); // Write file to path. errln!("... outputting to {:?}", out_path); let mut f = File::create(&out_path)?; let _ = f.write_all(file_section.as_bytes()); } else if!arg_run { // Print file to stdout. print!("{}", file_section); } else if arg_run { // Run the file. let dir = TempDir::new("corollary")?; let file_path = dir.path().join("script.rs"); let mut f = File::create(&file_path)?; let _ = f.write_all(b"// cargo-deps: corollary-support={path=\"/Users/trim/Desktop/corrode-but-in-rust/corollary-support\"}\n\nextern crate corollary_support;\n\n"); let _ = f.write_all(file_section.as_bytes()); if rust_section.len() == 0 { let _ = f.write_all(b"\n\nfn main() { let _ = __main(); }\n"); } drop(f); let output = Command::new("cargo") .args(&["script", &file_path.display().to_string()]) .output() .expect("failed to execute process"); if!output.status.success() { err!("{}", String::from_utf8_lossy(&output.stderr)); } err!("{}", String::from_utf8_lossy(&output.stdout)); ::std::process::exit(output.status.code().unwrap()); } Ok(()) }
convert_file
identifier_name
main.rs
#[macro_use] extern crate errln; #[macro_use] extern crate error_chain; extern crate clap; extern crate hex; extern crate lalrpop_util; extern crate parser_haskell; extern crate regex; extern crate tempdir; extern crate walkdir; extern crate corollary; extern crate inflector; use parser_haskell::util::{print_parse_error, simplify_parse_error}; use clap::{Arg, App}; use regex::Regex; use std::fmt::Write; use std::fs::{File, create_dir_all}; use std::io::prelude::*; use std::path::{Path, PathBuf}; use std::process::Command; use tempdir::TempDir; use corollary::print_item_list; use corollary::ir::PrintState; // Define error chain. mod errors { error_chain! { foreign_links { Walkdir(::walkdir::Error); Io(::std::io::Error); Fmt(::std::fmt::Error); } } } use errors::*; #[test] #[ignore] fn test_single_file() { let a = "./corrode/src/Language/Rust/Corrode/C.lhs"; // let a = "./corrode/src/Language/Rust/Corrode/C.hs"; // let a = "./test/input.hs"; println!("file: {}", a); let mut file = File::open(a).unwrap(); let mut contents = String::new(); file.read_to_string(&mut contents).unwrap(); if a.ends_with(".lhs") { contents = strip_lhs(&contents); } let contents = parser_haskell::preprocess(&contents); // let mut a = ::std::fs::File::create("temp.txt").unwrap(); // a.write_all(contents.as_bytes()); let mut errors = Vec::new(); match parser_haskell::parse(&mut errors, &contents) { Ok(okay) => println!("{:#?}", okay), Err(e) => { let e = simplify_parse_error(e); print_parse_error(&contents, &e); panic!(e); } } } #[test] fn test_no_regressions() { let a = vec![ "../deps/corrode/src/Language/Rust/AST.hs", "../deps/corrode/src/Language/Rust/Corrode/C.lhs", "../deps/corrode/src/Language/Rust/Corrode/CFG.lhs", "../deps/corrode/src/Language/Rust/Corrode/CrateMap.hs", "../deps/corrode/src/Language/Rust/Idiomatic.hs", "../deps/corrode/src/Language/Rust.hs", "../deps/language-c/src/Language/C/Analysis/AstAnalysis.hs", "../deps/language-c/src/Language/C/Analysis/Builtins.hs", "../deps/language-c/src/Language/C/Analysis/ConstEval.hs", "../deps/language-c/src/Language/C/Analysis/Debug.hs", "../deps/language-c/src/Language/C/Analysis/DeclAnalysis.hs", "../deps/language-c/src/Language/C/Analysis/DefTable.hs", "../deps/language-c/src/Language/C/Analysis/Export.hs", "../deps/language-c/src/Language/C/Analysis/NameSpaceMap.hs", "../deps/language-c/src/Language/C/Analysis/SemError.hs", "../deps/language-c/src/Language/C/Analysis/SemRep.hs", "../deps/language-c/src/Language/C/Analysis/TravMonad.hs", "../deps/language-c/src/Language/C/Analysis/TypeCheck.hs", "../deps/language-c/src/Language/C/Analysis/TypeConversions.hs", "../deps/language-c/src/Language/C/Analysis/TypeUtils.hs", "../deps/language-c/src/Language/C/Analysis.hs", "../deps/language-c/src/Language/C/Data/Error.hs", "../deps/language-c/src/Language/C/Data/Ident.hs", "../deps/language-c/src/Language/C/Data/InputStream.hs", "../deps/language-c/src/Language/C/Data/Name.hs", "../deps/language-c/src/Language/C/Data/Node.hs", "../deps/language-c/src/Language/C/Data/Position.hs", "../deps/language-c/src/Language/C/Data/RList.hs", "../deps/language-c/src/Language/C/Data.hs", "../deps/language-c/src/Language/C/Parser/Builtin.hs", "../deps/language-c/src/Language/C/Parser/ParserMonad.hs", "../deps/language-c/src/Language/C/Parser/Tokens.hs", "../deps/language-c/src/Language/C/Parser.hs", "../deps/language-c/src/Language/C/Pretty.hs", "../deps/language-c/src/Language/C/Syntax/AST.hs", "../deps/language-c/src/Language/C/Syntax/Constants.hs", "../deps/language-c/src/Language/C/Syntax/Ops.hs", "../deps/language-c/src/Language/C/Syntax/Utils.hs", "../deps/language-c/src/Language/C/Syntax.hs", "../deps/language-c/src/Language/C/System/GCC.hs", "../deps/language-c/src/Language/C/System/Preprocess.hs", "../parser-c/gen/Lexer.hs", "../parser-c/gen/Parser.hs", ]; for path in a { let mut file = File::open(path).unwrap(); let mut contents = String::new(); file.read_to_string(&mut contents).unwrap(); if path.ends_with(".lhs") { contents = strip_lhs(&contents); } let contents = parser_haskell::preprocess(&contents); // Do not output preprocessed data temp.txt println!("{:?}", path); // use ::std::io::Write; // let mut a = ::std::fs::File::create("temp.txt").unwrap(); // a.write_all(contents.as_bytes()); let mut errors = Vec::new(); match parser_haskell::parse(&mut errors, &contents) { Ok(_) => { // OK } Err(e) => { //TODO print_parse_error return string, feed to panic print_parse_error(&contents, &simplify_parse_error(e)); panic!("cannot convert file {:?}", path); } } } } fn strip_lhs(s: &str) -> String { let re = Regex::new(r"([ \t]*)```haskell([\s\S]*?)```").unwrap(); let mut out = vec![]; for cap in re.captures_iter(&s) { let indent = cap[1].to_string().len(); let group = cap[2].to_string() .lines() .map(|x| { x.chars().skip(indent).collect::<String>() }) .collect::<Vec<_>>() .join("\n"); out.push(group); } out.join("\n\n") } /// Converts a Haskell file by its path into a Rust module. fn convert_file(input: &str, p: &Path, inline_mod: bool, dump_ast: bool) -> Result<(String, String)> { let mut contents = input.to_string(); let mut file_out = String::new(); let mut rust_out = String::new(); // Parse out HASKELL /HASKELL RUST /RUST sections. let re = Regex::new(r#"HASKELL[\s\S]*?/HASKELL"#).unwrap(); contents = re.replace(&contents, "").to_string(); let re = Regex::new(r#"RUST([\s\S]*?)/RUST"#).unwrap(); if let Some(cap) = re.captures(&contents) { rust_out.push_str(&cap.get(1).unwrap().as_str().to_string()); } contents = re.replace(&contents, "").to_string(); // Preprocess the file. let contents = parser_haskell::preprocess(&contents); // errln!("{}", contents); // Parse the file. let mut errors = Vec::new(); match parser_haskell::parse(&mut errors, &contents) { Ok(v) => { // errln!("{:?}", v); if dump_ast { println!("{}", format!("{:#?}", v).replace(" ", " ")); } else { writeln!(file_out, "// Original file: {:?}", p.file_name().unwrap())?; writeln!(file_out, "// File auto-generated using Corollary.")?; writeln!(file_out, "")?; if inline_mod
else { writeln!(file_out, "#[macro_use] use corollary_support::*;")?; writeln!(file_out, "")?; let state = PrintState::new(); writeln!(file_out, "{}", print_item_list(state, &v.items, true))?; } } } Err(e) => { errln!("/* ERROR: cannot convert file {:?}",p); // TODO have this write to Format print_parse_error(&contents, &simplify_parse_error(e)); errln!("*/"); panic!("COULDN'T PARSE"); } } Ok((file_out, rust_out)) } quick_main!(run); fn run() -> Result<()> { use std::io::Write; let matches = App::new("corollary") .version("0.1") .about("Converts Haskell to Rust") .arg(Arg::with_name("run") .short("r") .long("run") .help("Runs the file")) .arg(Arg::with_name("out") .short("o") .long("out") .help("Output path") .takes_value(true)) .arg(Arg::with_name("ast") .short("a") .long("ast") .help("Dump AST")) .arg(Arg::with_name("INPUT") .help("Sets the input file to use") .required(true) .index(1)) .get_matches(); let arg_input = matches.value_of("INPUT").unwrap(); let arg_run = matches.is_present("run"); let arg_out: Option<_> = matches.value_of("out"); let arg_ast = matches.is_present("ast"); if arg_run && arg_out.is_some() { bail!("Cannot use --out and --run at the same time."); } if (arg_run || arg_out.is_some()) && arg_ast { bail!("Cannot use --ast and (--run or --out) at the same time."); } // Starting message. if arg_run { errln!("running {:?}...", arg_input); } else { errln!("cross-compiling {:?}...", arg_input); } // Read file contents. let mut file = File::open(arg_input) .chain_err(|| format!("Could not open {:?}", arg_input))?; let mut contents = String::new(); file.read_to_string(&mut contents)?; // Preprocess the file. Translate.lhs. if arg_input.ends_with(".lhs") { contents = strip_lhs(&contents); } let (mut file_section, rust_section) = convert_file(&contents, &PathBuf::from(arg_input), false, arg_ast)?; if arg_ast { return Ok(()); } // Add Rust segments RUST... /RUST and Haskell support code. let _ = writeln!(file_section, ""); let _ = writeln!(file_section, ""); if rust_section.len() > 0 { let _ = writeln!(file_section, "/* RUST... /RUST */"); let _ = writeln!(file_section, "{}", rust_section); } if let Some(out_path) = arg_out { // Create directory. let _ = create_dir_all(&Path::new(&arg_out.unwrap()).parent().unwrap()); // Write file to path. errln!("... outputting to {:?}", out_path); let mut f = File::create(&out_path)?; let _ = f.write_all(file_section.as_bytes()); } else if!arg_run { // Print file to stdout. print!("{}", file_section); } else if arg_run { // Run the file. let dir = TempDir::new("corollary")?; let file_path = dir.path().join("script.rs"); let mut f = File::create(&file_path)?; let _ = f.write_all(b"// cargo-deps: corollary-support={path=\"/Users/trim/Desktop/corrode-but-in-rust/corollary-support\"}\n\nextern crate corollary_support;\n\n"); let _ = f.write_all(file_section.as_bytes()); if rust_section.len() == 0 { let _ = f.write_all(b"\n\nfn main() { let _ = __main(); }\n"); } drop(f); let output = Command::new("cargo") .args(&["script", &file_path.display().to_string()]) .output() .expect("failed to execute process"); if!output.status.success() { err!("{}", String::from_utf8_lossy(&output.stderr)); } err!("{}", String::from_utf8_lossy(&output.stdout)); ::std::process::exit(output.status.code().unwrap()); } Ok(()) }
{ writeln!(file_out, "pub mod {} {{", v.name.0.replace(".", "_"))?; writeln!(file_out, " use haskell_support::*;")?; writeln!(file_out, "")?; let state = PrintState::new(); writeln!(file_out, "{}", print_item_list(state.tab(), &v.items, true))?; writeln!(file_out, "}}\n")?; }
conditional_block
exec.rs
use re_builder::RegexOptions; use re_set; use re_trait::{RegularExpression, Slot, Locations, as_slots}; use re_unicode; use utf8::next_utf8; /// `Exec` manages the execution of a regular expression. /// /// In particular, this manages the various compiled forms of a single regular /// expression and the choice of which matching engine to use to execute a /// regular expression. pub struct Exec { /// All read only state. ro: Arc<ExecReadOnly>, /// Caches for the various matching engines. cache: CachedThreadLocal<ProgramCache>, } /// `ExecNoSync` is like `Exec`, except it embeds a reference to a cache. This /// means it is no longer Sync, but we can now avoid the overhead of /// synchronization to fetch the cache. #[derive(Debug)] pub struct ExecNoSync<'c> { /// All read only state. ro: &'c Arc<ExecReadOnly>, /// Caches for the various matching engines. cache: &'c ProgramCache, } /// `ExecNoSyncStr` is like `ExecNoSync`, but matches on &str instead of &[u8]. pub struct ExecNoSyncStr<'c>(ExecNoSync<'c>); /// `ExecReadOnly` comprises all read only state for a regex. Namely, all such /// state is determined at compile time and never changes during search. #[derive(Debug)] struct ExecReadOnly { /// The original regular expressions given by the caller to compile. res: Vec<String>, /// A compiled program that is used in the NFA simulation and backtracking. /// It can be byte-based or Unicode codepoint based. /// /// N.B. It is not possibly to make this byte-based from the public API. /// It is only used for testing byte based programs in the NFA simulations. nfa: Program, /// match_type encodes as much upfront knowledge about how we're going to /// execute a search as possible. match_type: MatchType, } /// Facilitates the construction of an executor by exposing various knobs /// to control how a regex is executed and what kinds of resources it's /// permitted to use. pub struct ExecBuilder { options: RegexOptions, match_type: Option<MatchType>, bytes: bool, only_utf8: bool, } /// Parsed represents a set of parsed regular expressions and their detected /// literals. struct Parsed { exprs: Vec<Expr>, bytes: bool, } impl ExecBuilder { /// Create a regex execution builder. /// /// This uses default settings for everything except the regex itself, /// which must be provided. Further knobs can be set by calling methods, /// and then finally, `build` to actually create the executor. pub fn new(re: &str) -> Self { Self::new_many(&[re]) } /// Like new, but compiles the union of the given regular expressions. /// /// Note that when compiling 2 or more regular expressions, capture groups /// are completely unsupported. (This means both `find` and `captures` /// wont work.) pub fn new_many<I, S>(res: I) -> Self where S: AsRef<str>, I: IntoIterator<Item=S> { let mut opts = RegexOptions::default(); opts.pats = res.into_iter().map(|s| s.as_ref().to_owned()).collect(); Self::new_options(opts) } /// Create a regex execution builder. pub fn new_options(opts: RegexOptions) -> Self { ExecBuilder { options: opts, match_type: None, bytes: false, only_utf8: true, } } /// Set the matching engine to be automatically determined. /// /// This is the default state and will apply whatever optimizations are /// possible, such as running a DFA.
/// /// This overrides whatever was previously set via the `nfa` or /// `bounded_backtracking` methods. pub fn automatic(mut self) -> Self { self.match_type = None; self } /// Sets the matching engine to use the NFA algorithm no matter what /// optimizations are possible. /// /// This overrides whatever was previously set via the `automatic` or /// `bounded_backtracking` methods. pub fn nfa(mut self) -> Self { self.match_type = Some(MatchType::Nfa); self } /// Compiles byte based programs for use with the NFA matching engines. /// /// By default, the NFA engines match on Unicode scalar values. They can /// be made to use byte based programs instead. In general, the byte based /// programs are slower because of a less efficient encoding of character /// classes. /// /// Note that this does not impact DFA matching engines, which always /// execute on bytes. pub fn bytes(mut self, yes: bool) -> Self { self.bytes = yes; self } /// When disabled, the program compiled may match arbitrary bytes. /// /// When enabled (the default), all compiled programs exclusively match /// valid UTF-8 bytes. pub fn only_utf8(mut self, yes: bool) -> Self { self.only_utf8 = yes; self } /// Set the Unicode flag. pub fn unicode(mut self, yes: bool) -> Self { self.options.unicode = yes; self } /// Parse the current set of patterns into their AST and extract literals. fn parse(&self) -> Result<Parsed, Error> { let mut exprs = Vec::with_capacity(self.options.pats.len()); let mut prefixes = Some(Literals::empty()); let mut suffixes = Some(Literals::empty()); let mut bytes = false; let is_set = self.options.pats.len() > 1; // If we're compiling a regex set and that set has any anchored // expressions, then disable all literal optimizations. for pat in &self.options.pats { let parser = ExprBuilder::new() .case_insensitive(self.options.case_insensitive) .multi_line(self.options.multi_line) .dot_matches_new_line(self.options.dot_matches_new_line) .swap_greed(self.options.swap_greed) .ignore_whitespace(self.options.ignore_whitespace) .unicode(self.options.unicode) .allow_bytes(!self.only_utf8); let expr = try!(parser.parse(pat)); bytes = bytes || expr.has_bytes(); if!expr.is_anchored_start() && expr.has_anchored_start() { // Partial anchors unfortunately make it hard to use prefixes, // so disable them. prefixes = None; } else if is_set && expr.is_anchored_start() { // Regex sets with anchors do not go well with literal // optimizations. prefixes = None; } prefixes = prefixes.and_then(|mut prefixes| { if!prefixes.union_prefixes(&expr) { None } else { Some(prefixes) } }); if!expr.is_anchored_end() && expr.has_anchored_end() { // Partial anchors unfortunately make it hard to use suffixes, // so disable them. suffixes = None; } else if is_set && expr.is_anchored_end() { // Regex sets with anchors do not go well with literal // optimizations. prefixes = None; } suffixes = suffixes.and_then(|mut suffixes| { if!suffixes.union_suffixes(&expr) { None } else { Some(suffixes) } }); exprs.push(expr); } Ok(Parsed { exprs: exprs, bytes: bytes, }) } /// Build an executor that can run a regular expression. pub fn build(self) -> Result<Exec, Error> { // Special case when we have no patterns to compile. // This can happen when compiling a regex set. if self.options.pats.is_empty() { let ro = Arc::new(ExecReadOnly { res: vec![], nfa: Program::new(), match_type: MatchType::Nothing, }); return Ok(Exec { ro: ro, cache: CachedThreadLocal::new() }); } let parsed = try!(self.parse()); let nfa = try!( Compiler::new() .size_limit(self.options.size_limit) .bytes(self.bytes || parsed.bytes) .only_utf8(self.only_utf8) .compile(&parsed.exprs)); let mut ro = ExecReadOnly { res: self.options.pats, nfa: nfa, match_type: MatchType::Nothing, }; ro.match_type = ro.choose_match_type(self.match_type); let ro = Arc::new(ro); Ok(Exec { ro: ro, cache: CachedThreadLocal::new() }) } } impl<'c> RegularExpression for ExecNoSyncStr<'c> { type Text = str; fn slots_len(&self) -> usize { self.0.slots_len() } fn next_after_empty(&self, text: &str, i: usize) -> usize { next_utf8(text.as_bytes(), i) } #[inline(always)] // reduces constant overhead fn shortest_match_at(&self, text: &str, start: usize) -> Option<usize> { self.0.shortest_match_at(text.as_bytes(), start) } #[inline(always)] // reduces constant overhead fn is_match_at(&self, text: &str, start: usize) -> bool { self.0.is_match_at(text.as_bytes(), start) } #[inline(always)] // reduces constant overhead fn find_at(&self, text: &str, start: usize) -> Option<(usize, usize)> { self.0.find_at(text.as_bytes(), start) } #[inline(always)] // reduces constant overhead fn read_captures_at( &self, locs: &mut Locations, text: &str, start: usize, ) -> Option<(usize, usize)> { self.0.read_captures_at(locs, text.as_bytes(), start) } } impl<'c> RegularExpression for ExecNoSync<'c> { type Text = [u8]; /// Returns the number of capture slots in the regular expression. (There /// are two slots for every capture group, corresponding to possibly empty /// start and end locations of the capture.) fn slots_len(&self) -> usize { self.ro.nfa.captures.len() * 2 } fn next_after_empty(&self, _text: &[u8], i: usize) -> usize { i + 1 } /// Returns the end of a match location, possibly occurring before the /// end location of the correct leftmost-first match. #[inline(always)] // reduces constant overhead fn shortest_match_at(&self, text: &[u8], start: usize) -> Option<usize> { match self.ro.match_type { MatchType::Nfa => self.shortest_nfa(text, start), MatchType::Nothing => None, } } /// Returns true if and only if the regex matches text. /// /// For single regular expressions, this is equivalent to calling /// shortest_match(...).is_some(). #[inline(always)] // reduces constant overhead fn is_match_at(&self, text: &[u8], start: usize) -> bool { // We need to do this dance because shortest_match relies on the NFA // filling in captures[1], but a RegexSet has no captures. In other // words, a RegexSet can't (currently) use shortest_match. ---AG match self.ro.match_type { MatchType::Nfa => self.match_nfa(text, start), MatchType::Nothing => false, } } /// Finds the start and end location of the leftmost-first match, starting /// at the given location. #[inline(always)] // reduces constant overhead fn find_at(&self, text: &[u8], start: usize) -> Option<(usize, usize)> { match self.ro.match_type { MatchType::Nfa => self.find_nfa(text, start), MatchType::Nothing => None, } } /// Finds the start and end location of the leftmost-first match and also /// fills in all matching capture groups. /// /// The number of capture slots given should be equal to the total number /// of capture slots in the compiled program. /// /// Note that the first two slots always correspond to the start and end /// locations of the overall match. fn read_captures_at( &self, locs: &mut Locations, text: &[u8], start: usize, ) -> Option<(usize, usize)> { let slots = as_slots(locs); for slot in slots.iter_mut() { *slot = None; } // If the caller unnecessarily uses this, then we try to save them // from themselves. match slots.len() { 0 => return self.find_at(text, start), 2 => { return self.find_at(text, start).map(|(s, e)| { slots[0] = Some(s); slots[1] = Some(e); (s, e) }); } _ => {} // fallthrough } match self.ro.match_type { MatchType::Nfa => { self.captures_nfa(slots, text, start) } MatchType::Nothing => None, } } } impl<'c> ExecNoSync<'c> { /// Executes the NFA engine to return whether there is a match or not. /// /// Ideally, we could use shortest_nfa(...).is_some() and get the same /// performance characteristics, but regex sets don't have captures, which /// shortest_nfa depends on. fn match_nfa( &self, text: &[u8], start: usize, ) -> bool { self.exec_pikevm(&mut [false], &mut [], true, text, start) } /// Finds the shortest match using an NFA. fn shortest_nfa(&self, text: &[u8], start: usize) -> Option<usize> { let mut slots = [None, None]; if self.exec_pikevm(&mut [false], &mut slots, true, text, start) { slots[1] } else { None } } /// Like find, but executes an NFA engine. fn find_nfa( &self, text: &[u8], start: usize, ) -> Option<(usize, usize)> { let mut slots = [None, None]; if self.exec_pikevm(&mut [false], &mut slots, false, text, start) { match (slots[0], slots[1]) { (Some(s), Some(e)) => Some((s, e)), _ => None, } } else { None } } /// Like find_nfa, but fills in captures. /// /// `slots` should have length equal to `2 * nfa.captures.len()`. fn captures_nfa( &self, slots: &mut [Slot], text: &[u8], start: usize, ) -> Option<(usize, usize)> { if self.exec_pikevm(&mut [false], slots, false, text, start) { match (slots[0], slots[1]) { (Some(s), Some(e)) => Some((s, e)), _ => None, } } else { None } } /// Always run the NFA algorithm. fn exec_pikevm( &self, matches: &mut [bool], slots: &mut [Slot], quit_after_match: bool, text: &[u8], start: usize, ) -> bool { use input::Input; let cache = &mut self.cache.borrow_mut().pikevm; if start == 0 { cache.reset(); } else { cache.prep_for_next_match(); } if self.ro.nfa.uses_bytes() { let input = ByteInput::new(text, self.ro.nfa.only_utf8); let mut at = input.at(start); let mut fsm = pikevm::Fsm::new( &self.ro.nfa, quit_after_match, ); loop { let stop = fsm.next( cache, matches, slots, at, input.only_utf8(), ); if stop || at.is_end() { break; } at = input.at(at.next_pos()); } } else { let input = CharInput::new(text); let mut at = input.at(start); let mut fsm = pikevm::Fsm::new( &self.ro.nfa, quit_after_match, ); loop { let stop = fsm.next( cache, matches, slots, at, input.only_utf8(), ); if stop || at.is_end() { break; } at = input.at(at.next_pos()); } } matches.iter().any(|b| *b) } /// Finds which regular expressions match the given text. /// /// `matches` should have length equal to the number of regexes being /// searched. /// /// This is only useful when one wants to know which regexes in a set /// match some text. pub fn many_matches_at( &self, matches: &mut [bool], text: &[u8], start: usize, ) -> bool { use self::MatchType::*; match self.ro.match_type { Nfa => self.exec_pikevm(matches, &mut [], false, text, start), Nothing => false, } } pub fn capture_name_idx(&self) -> &Arc<HashMap<String, usize>> { &self.ro.nfa.capture_name_idx } } impl<'c> ExecNoSyncStr<'c> { pub fn capture_name_idx(&self) -> &Arc<HashMap<String, usize>> { self.0.capture_name_idx() } } impl Exec { /// Get a searcher that isn't Sync. #[inline(always)] // reduces constant overhead pub fn searcher(&self) -> ExecNoSync { let create = || Box::new(RefCell::new(ProgramCacheInner::new(&self.ro))); ExecNoSync { ro: &self.ro, // a clone is too expensive here! (and not needed) cache: self.cache.get_or(create), } } /// Get a searcher that isn't Sync and can match on &str. #[inline(always)] // reduces constant overhead pub fn searcher_str(&self) -> ExecNoSyncStr { ExecNoSyncStr(self.searcher()) } /// Build a Regex from this executor. pub fn into_regex(self) -> re_unicode::Regex { re_unicode::Regex::from(self) } /// Build a RegexSet from this executor. pub fn into_regex_set(self) -> re_set::unicode::RegexSet { re_set::unicode::RegexSet::from(self) } /// The original regular expressions given by the caller that were /// compiled. pub fn regex_strings(&self) -> &[String] { &self.ro.res } /// Return a slice of capture names. /// /// Any capture that isn't named is None. pub fn capture_names(&self) -> &[Option<String>] { &self.ro.nfa.captures } /// Return a reference to named groups mapping (from group name to /// group position). pub fn capture_name_idx(&self) -> &Arc<HashMap<String, usize>> { &self.ro
random_line_split
exec.rs
use re_builder::RegexOptions; use re_set; use re_trait::{RegularExpression, Slot, Locations, as_slots}; use re_unicode; use utf8::next_utf8; /// `Exec` manages the execution of a regular expression. /// /// In particular, this manages the various compiled forms of a single regular /// expression and the choice of which matching engine to use to execute a /// regular expression. pub struct Exec { /// All read only state. ro: Arc<ExecReadOnly>, /// Caches for the various matching engines. cache: CachedThreadLocal<ProgramCache>, } /// `ExecNoSync` is like `Exec`, except it embeds a reference to a cache. This /// means it is no longer Sync, but we can now avoid the overhead of /// synchronization to fetch the cache. #[derive(Debug)] pub struct ExecNoSync<'c> { /// All read only state. ro: &'c Arc<ExecReadOnly>, /// Caches for the various matching engines. cache: &'c ProgramCache, } /// `ExecNoSyncStr` is like `ExecNoSync`, but matches on &str instead of &[u8]. pub struct
<'c>(ExecNoSync<'c>); /// `ExecReadOnly` comprises all read only state for a regex. Namely, all such /// state is determined at compile time and never changes during search. #[derive(Debug)] struct ExecReadOnly { /// The original regular expressions given by the caller to compile. res: Vec<String>, /// A compiled program that is used in the NFA simulation and backtracking. /// It can be byte-based or Unicode codepoint based. /// /// N.B. It is not possibly to make this byte-based from the public API. /// It is only used for testing byte based programs in the NFA simulations. nfa: Program, /// match_type encodes as much upfront knowledge about how we're going to /// execute a search as possible. match_type: MatchType, } /// Facilitates the construction of an executor by exposing various knobs /// to control how a regex is executed and what kinds of resources it's /// permitted to use. pub struct ExecBuilder { options: RegexOptions, match_type: Option<MatchType>, bytes: bool, only_utf8: bool, } /// Parsed represents a set of parsed regular expressions and their detected /// literals. struct Parsed { exprs: Vec<Expr>, bytes: bool, } impl ExecBuilder { /// Create a regex execution builder. /// /// This uses default settings for everything except the regex itself, /// which must be provided. Further knobs can be set by calling methods, /// and then finally, `build` to actually create the executor. pub fn new(re: &str) -> Self { Self::new_many(&[re]) } /// Like new, but compiles the union of the given regular expressions. /// /// Note that when compiling 2 or more regular expressions, capture groups /// are completely unsupported. (This means both `find` and `captures` /// wont work.) pub fn new_many<I, S>(res: I) -> Self where S: AsRef<str>, I: IntoIterator<Item=S> { let mut opts = RegexOptions::default(); opts.pats = res.into_iter().map(|s| s.as_ref().to_owned()).collect(); Self::new_options(opts) } /// Create a regex execution builder. pub fn new_options(opts: RegexOptions) -> Self { ExecBuilder { options: opts, match_type: None, bytes: false, only_utf8: true, } } /// Set the matching engine to be automatically determined. /// /// This is the default state and will apply whatever optimizations are /// possible, such as running a DFA. /// /// This overrides whatever was previously set via the `nfa` or /// `bounded_backtracking` methods. pub fn automatic(mut self) -> Self { self.match_type = None; self } /// Sets the matching engine to use the NFA algorithm no matter what /// optimizations are possible. /// /// This overrides whatever was previously set via the `automatic` or /// `bounded_backtracking` methods. pub fn nfa(mut self) -> Self { self.match_type = Some(MatchType::Nfa); self } /// Compiles byte based programs for use with the NFA matching engines. /// /// By default, the NFA engines match on Unicode scalar values. They can /// be made to use byte based programs instead. In general, the byte based /// programs are slower because of a less efficient encoding of character /// classes. /// /// Note that this does not impact DFA matching engines, which always /// execute on bytes. pub fn bytes(mut self, yes: bool) -> Self { self.bytes = yes; self } /// When disabled, the program compiled may match arbitrary bytes. /// /// When enabled (the default), all compiled programs exclusively match /// valid UTF-8 bytes. pub fn only_utf8(mut self, yes: bool) -> Self { self.only_utf8 = yes; self } /// Set the Unicode flag. pub fn unicode(mut self, yes: bool) -> Self { self.options.unicode = yes; self } /// Parse the current set of patterns into their AST and extract literals. fn parse(&self) -> Result<Parsed, Error> { let mut exprs = Vec::with_capacity(self.options.pats.len()); let mut prefixes = Some(Literals::empty()); let mut suffixes = Some(Literals::empty()); let mut bytes = false; let is_set = self.options.pats.len() > 1; // If we're compiling a regex set and that set has any anchored // expressions, then disable all literal optimizations. for pat in &self.options.pats { let parser = ExprBuilder::new() .case_insensitive(self.options.case_insensitive) .multi_line(self.options.multi_line) .dot_matches_new_line(self.options.dot_matches_new_line) .swap_greed(self.options.swap_greed) .ignore_whitespace(self.options.ignore_whitespace) .unicode(self.options.unicode) .allow_bytes(!self.only_utf8); let expr = try!(parser.parse(pat)); bytes = bytes || expr.has_bytes(); if!expr.is_anchored_start() && expr.has_anchored_start() { // Partial anchors unfortunately make it hard to use prefixes, // so disable them. prefixes = None; } else if is_set && expr.is_anchored_start() { // Regex sets with anchors do not go well with literal // optimizations. prefixes = None; } prefixes = prefixes.and_then(|mut prefixes| { if!prefixes.union_prefixes(&expr) { None } else { Some(prefixes) } }); if!expr.is_anchored_end() && expr.has_anchored_end() { // Partial anchors unfortunately make it hard to use suffixes, // so disable them. suffixes = None; } else if is_set && expr.is_anchored_end() { // Regex sets with anchors do not go well with literal // optimizations. prefixes = None; } suffixes = suffixes.and_then(|mut suffixes| { if!suffixes.union_suffixes(&expr) { None } else { Some(suffixes) } }); exprs.push(expr); } Ok(Parsed { exprs: exprs, bytes: bytes, }) } /// Build an executor that can run a regular expression. pub fn build(self) -> Result<Exec, Error> { // Special case when we have no patterns to compile. // This can happen when compiling a regex set. if self.options.pats.is_empty() { let ro = Arc::new(ExecReadOnly { res: vec![], nfa: Program::new(), match_type: MatchType::Nothing, }); return Ok(Exec { ro: ro, cache: CachedThreadLocal::new() }); } let parsed = try!(self.parse()); let nfa = try!( Compiler::new() .size_limit(self.options.size_limit) .bytes(self.bytes || parsed.bytes) .only_utf8(self.only_utf8) .compile(&parsed.exprs)); let mut ro = ExecReadOnly { res: self.options.pats, nfa: nfa, match_type: MatchType::Nothing, }; ro.match_type = ro.choose_match_type(self.match_type); let ro = Arc::new(ro); Ok(Exec { ro: ro, cache: CachedThreadLocal::new() }) } } impl<'c> RegularExpression for ExecNoSyncStr<'c> { type Text = str; fn slots_len(&self) -> usize { self.0.slots_len() } fn next_after_empty(&self, text: &str, i: usize) -> usize { next_utf8(text.as_bytes(), i) } #[inline(always)] // reduces constant overhead fn shortest_match_at(&self, text: &str, start: usize) -> Option<usize> { self.0.shortest_match_at(text.as_bytes(), start) } #[inline(always)] // reduces constant overhead fn is_match_at(&self, text: &str, start: usize) -> bool { self.0.is_match_at(text.as_bytes(), start) } #[inline(always)] // reduces constant overhead fn find_at(&self, text: &str, start: usize) -> Option<(usize, usize)> { self.0.find_at(text.as_bytes(), start) } #[inline(always)] // reduces constant overhead fn read_captures_at( &self, locs: &mut Locations, text: &str, start: usize, ) -> Option<(usize, usize)> { self.0.read_captures_at(locs, text.as_bytes(), start) } } impl<'c> RegularExpression for ExecNoSync<'c> { type Text = [u8]; /// Returns the number of capture slots in the regular expression. (There /// are two slots for every capture group, corresponding to possibly empty /// start and end locations of the capture.) fn slots_len(&self) -> usize { self.ro.nfa.captures.len() * 2 } fn next_after_empty(&self, _text: &[u8], i: usize) -> usize { i + 1 } /// Returns the end of a match location, possibly occurring before the /// end location of the correct leftmost-first match. #[inline(always)] // reduces constant overhead fn shortest_match_at(&self, text: &[u8], start: usize) -> Option<usize> { match self.ro.match_type { MatchType::Nfa => self.shortest_nfa(text, start), MatchType::Nothing => None, } } /// Returns true if and only if the regex matches text. /// /// For single regular expressions, this is equivalent to calling /// shortest_match(...).is_some(). #[inline(always)] // reduces constant overhead fn is_match_at(&self, text: &[u8], start: usize) -> bool { // We need to do this dance because shortest_match relies on the NFA // filling in captures[1], but a RegexSet has no captures. In other // words, a RegexSet can't (currently) use shortest_match. ---AG match self.ro.match_type { MatchType::Nfa => self.match_nfa(text, start), MatchType::Nothing => false, } } /// Finds the start and end location of the leftmost-first match, starting /// at the given location. #[inline(always)] // reduces constant overhead fn find_at(&self, text: &[u8], start: usize) -> Option<(usize, usize)> { match self.ro.match_type { MatchType::Nfa => self.find_nfa(text, start), MatchType::Nothing => None, } } /// Finds the start and end location of the leftmost-first match and also /// fills in all matching capture groups. /// /// The number of capture slots given should be equal to the total number /// of capture slots in the compiled program. /// /// Note that the first two slots always correspond to the start and end /// locations of the overall match. fn read_captures_at( &self, locs: &mut Locations, text: &[u8], start: usize, ) -> Option<(usize, usize)> { let slots = as_slots(locs); for slot in slots.iter_mut() { *slot = None; } // If the caller unnecessarily uses this, then we try to save them // from themselves. match slots.len() { 0 => return self.find_at(text, start), 2 => { return self.find_at(text, start).map(|(s, e)| { slots[0] = Some(s); slots[1] = Some(e); (s, e) }); } _ => {} // fallthrough } match self.ro.match_type { MatchType::Nfa => { self.captures_nfa(slots, text, start) } MatchType::Nothing => None, } } } impl<'c> ExecNoSync<'c> { /// Executes the NFA engine to return whether there is a match or not. /// /// Ideally, we could use shortest_nfa(...).is_some() and get the same /// performance characteristics, but regex sets don't have captures, which /// shortest_nfa depends on. fn match_nfa( &self, text: &[u8], start: usize, ) -> bool { self.exec_pikevm(&mut [false], &mut [], true, text, start) } /// Finds the shortest match using an NFA. fn shortest_nfa(&self, text: &[u8], start: usize) -> Option<usize> { let mut slots = [None, None]; if self.exec_pikevm(&mut [false], &mut slots, true, text, start) { slots[1] } else { None } } /// Like find, but executes an NFA engine. fn find_nfa( &self, text: &[u8], start: usize, ) -> Option<(usize, usize)> { let mut slots = [None, None]; if self.exec_pikevm(&mut [false], &mut slots, false, text, start) { match (slots[0], slots[1]) { (Some(s), Some(e)) => Some((s, e)), _ => None, } } else { None } } /// Like find_nfa, but fills in captures. /// /// `slots` should have length equal to `2 * nfa.captures.len()`. fn captures_nfa( &self, slots: &mut [Slot], text: &[u8], start: usize, ) -> Option<(usize, usize)> { if self.exec_pikevm(&mut [false], slots, false, text, start) { match (slots[0], slots[1]) { (Some(s), Some(e)) => Some((s, e)), _ => None, } } else { None } } /// Always run the NFA algorithm. fn exec_pikevm( &self, matches: &mut [bool], slots: &mut [Slot], quit_after_match: bool, text: &[u8], start: usize, ) -> bool { use input::Input; let cache = &mut self.cache.borrow_mut().pikevm; if start == 0 { cache.reset(); } else { cache.prep_for_next_match(); } if self.ro.nfa.uses_bytes() { let input = ByteInput::new(text, self.ro.nfa.only_utf8); let mut at = input.at(start); let mut fsm = pikevm::Fsm::new( &self.ro.nfa, quit_after_match, ); loop { let stop = fsm.next( cache, matches, slots, at, input.only_utf8(), ); if stop || at.is_end() { break; } at = input.at(at.next_pos()); } } else { let input = CharInput::new(text); let mut at = input.at(start); let mut fsm = pikevm::Fsm::new( &self.ro.nfa, quit_after_match, ); loop { let stop = fsm.next( cache, matches, slots, at, input.only_utf8(), ); if stop || at.is_end() { break; } at = input.at(at.next_pos()); } } matches.iter().any(|b| *b) } /// Finds which regular expressions match the given text. /// /// `matches` should have length equal to the number of regexes being /// searched. /// /// This is only useful when one wants to know which regexes in a set /// match some text. pub fn many_matches_at( &self, matches: &mut [bool], text: &[u8], start: usize, ) -> bool { use self::MatchType::*; match self.ro.match_type { Nfa => self.exec_pikevm(matches, &mut [], false, text, start), Nothing => false, } } pub fn capture_name_idx(&self) -> &Arc<HashMap<String, usize>> { &self.ro.nfa.capture_name_idx } } impl<'c> ExecNoSyncStr<'c> { pub fn capture_name_idx(&self) -> &Arc<HashMap<String, usize>> { self.0.capture_name_idx() } } impl Exec { /// Get a searcher that isn't Sync. #[inline(always)] // reduces constant overhead pub fn searcher(&self) -> ExecNoSync { let create = || Box::new(RefCell::new(ProgramCacheInner::new(&self.ro))); ExecNoSync { ro: &self.ro, // a clone is too expensive here! (and not needed) cache: self.cache.get_or(create), } } /// Get a searcher that isn't Sync and can match on &str. #[inline(always)] // reduces constant overhead pub fn searcher_str(&self) -> ExecNoSyncStr { ExecNoSyncStr(self.searcher()) } /// Build a Regex from this executor. pub fn into_regex(self) -> re_unicode::Regex { re_unicode::Regex::from(self) } /// Build a RegexSet from this executor. pub fn into_regex_set(self) -> re_set::unicode::RegexSet { re_set::unicode::RegexSet::from(self) } /// The original regular expressions given by the caller that were /// compiled. pub fn regex_strings(&self) -> &[String] { &self.ro.res } /// Return a slice of capture names. /// /// Any capture that isn't named is None. pub fn capture_names(&self) -> &[Option<String>] { &self.ro.nfa.captures } /// Return a reference to named groups mapping (from group name to /// group position). pub fn capture_name_idx(&self) -> &Arc<HashMap<String, usize>> { &self.
ExecNoSyncStr
identifier_name
exec.rs
use re_builder::RegexOptions; use re_set; use re_trait::{RegularExpression, Slot, Locations, as_slots}; use re_unicode; use utf8::next_utf8; /// `Exec` manages the execution of a regular expression. /// /// In particular, this manages the various compiled forms of a single regular /// expression and the choice of which matching engine to use to execute a /// regular expression. pub struct Exec { /// All read only state. ro: Arc<ExecReadOnly>, /// Caches for the various matching engines. cache: CachedThreadLocal<ProgramCache>, } /// `ExecNoSync` is like `Exec`, except it embeds a reference to a cache. This /// means it is no longer Sync, but we can now avoid the overhead of /// synchronization to fetch the cache. #[derive(Debug)] pub struct ExecNoSync<'c> { /// All read only state. ro: &'c Arc<ExecReadOnly>, /// Caches for the various matching engines. cache: &'c ProgramCache, } /// `ExecNoSyncStr` is like `ExecNoSync`, but matches on &str instead of &[u8]. pub struct ExecNoSyncStr<'c>(ExecNoSync<'c>); /// `ExecReadOnly` comprises all read only state for a regex. Namely, all such /// state is determined at compile time and never changes during search. #[derive(Debug)] struct ExecReadOnly { /// The original regular expressions given by the caller to compile. res: Vec<String>, /// A compiled program that is used in the NFA simulation and backtracking. /// It can be byte-based or Unicode codepoint based. /// /// N.B. It is not possibly to make this byte-based from the public API. /// It is only used for testing byte based programs in the NFA simulations. nfa: Program, /// match_type encodes as much upfront knowledge about how we're going to /// execute a search as possible. match_type: MatchType, } /// Facilitates the construction of an executor by exposing various knobs /// to control how a regex is executed and what kinds of resources it's /// permitted to use. pub struct ExecBuilder { options: RegexOptions, match_type: Option<MatchType>, bytes: bool, only_utf8: bool, } /// Parsed represents a set of parsed regular expressions and their detected /// literals. struct Parsed { exprs: Vec<Expr>, bytes: bool, } impl ExecBuilder { /// Create a regex execution builder. /// /// This uses default settings for everything except the regex itself, /// which must be provided. Further knobs can be set by calling methods, /// and then finally, `build` to actually create the executor. pub fn new(re: &str) -> Self { Self::new_many(&[re]) } /// Like new, but compiles the union of the given regular expressions. /// /// Note that when compiling 2 or more regular expressions, capture groups /// are completely unsupported. (This means both `find` and `captures` /// wont work.) pub fn new_many<I, S>(res: I) -> Self where S: AsRef<str>, I: IntoIterator<Item=S> { let mut opts = RegexOptions::default(); opts.pats = res.into_iter().map(|s| s.as_ref().to_owned()).collect(); Self::new_options(opts) } /// Create a regex execution builder. pub fn new_options(opts: RegexOptions) -> Self { ExecBuilder { options: opts, match_type: None, bytes: false, only_utf8: true, } } /// Set the matching engine to be automatically determined. /// /// This is the default state and will apply whatever optimizations are /// possible, such as running a DFA. /// /// This overrides whatever was previously set via the `nfa` or /// `bounded_backtracking` methods. pub fn automatic(mut self) -> Self { self.match_type = None; self } /// Sets the matching engine to use the NFA algorithm no matter what /// optimizations are possible. /// /// This overrides whatever was previously set via the `automatic` or /// `bounded_backtracking` methods. pub fn nfa(mut self) -> Self { self.match_type = Some(MatchType::Nfa); self } /// Compiles byte based programs for use with the NFA matching engines. /// /// By default, the NFA engines match on Unicode scalar values. They can /// be made to use byte based programs instead. In general, the byte based /// programs are slower because of a less efficient encoding of character /// classes. /// /// Note that this does not impact DFA matching engines, which always /// execute on bytes. pub fn bytes(mut self, yes: bool) -> Self { self.bytes = yes; self } /// When disabled, the program compiled may match arbitrary bytes. /// /// When enabled (the default), all compiled programs exclusively match /// valid UTF-8 bytes. pub fn only_utf8(mut self, yes: bool) -> Self { self.only_utf8 = yes; self } /// Set the Unicode flag. pub fn unicode(mut self, yes: bool) -> Self { self.options.unicode = yes; self } /// Parse the current set of patterns into their AST and extract literals. fn parse(&self) -> Result<Parsed, Error> { let mut exprs = Vec::with_capacity(self.options.pats.len()); let mut prefixes = Some(Literals::empty()); let mut suffixes = Some(Literals::empty()); let mut bytes = false; let is_set = self.options.pats.len() > 1; // If we're compiling a regex set and that set has any anchored // expressions, then disable all literal optimizations. for pat in &self.options.pats { let parser = ExprBuilder::new() .case_insensitive(self.options.case_insensitive) .multi_line(self.options.multi_line) .dot_matches_new_line(self.options.dot_matches_new_line) .swap_greed(self.options.swap_greed) .ignore_whitespace(self.options.ignore_whitespace) .unicode(self.options.unicode) .allow_bytes(!self.only_utf8); let expr = try!(parser.parse(pat)); bytes = bytes || expr.has_bytes(); if!expr.is_anchored_start() && expr.has_anchored_start() { // Partial anchors unfortunately make it hard to use prefixes, // so disable them. prefixes = None; } else if is_set && expr.is_anchored_start() { // Regex sets with anchors do not go well with literal // optimizations. prefixes = None; } prefixes = prefixes.and_then(|mut prefixes| { if!prefixes.union_prefixes(&expr) { None } else { Some(prefixes) } }); if!expr.is_anchored_end() && expr.has_anchored_end() { // Partial anchors unfortunately make it hard to use suffixes, // so disable them. suffixes = None; } else if is_set && expr.is_anchored_end() { // Regex sets with anchors do not go well with literal // optimizations. prefixes = None; } suffixes = suffixes.and_then(|mut suffixes| { if!suffixes.union_suffixes(&expr) { None } else { Some(suffixes) } }); exprs.push(expr); } Ok(Parsed { exprs: exprs, bytes: bytes, }) } /// Build an executor that can run a regular expression. pub fn build(self) -> Result<Exec, Error> { // Special case when we have no patterns to compile. // This can happen when compiling a regex set. if self.options.pats.is_empty() { let ro = Arc::new(ExecReadOnly { res: vec![], nfa: Program::new(), match_type: MatchType::Nothing, }); return Ok(Exec { ro: ro, cache: CachedThreadLocal::new() }); } let parsed = try!(self.parse()); let nfa = try!( Compiler::new() .size_limit(self.options.size_limit) .bytes(self.bytes || parsed.bytes) .only_utf8(self.only_utf8) .compile(&parsed.exprs)); let mut ro = ExecReadOnly { res: self.options.pats, nfa: nfa, match_type: MatchType::Nothing, }; ro.match_type = ro.choose_match_type(self.match_type); let ro = Arc::new(ro); Ok(Exec { ro: ro, cache: CachedThreadLocal::new() }) } } impl<'c> RegularExpression for ExecNoSyncStr<'c> { type Text = str; fn slots_len(&self) -> usize { self.0.slots_len() } fn next_after_empty(&self, text: &str, i: usize) -> usize { next_utf8(text.as_bytes(), i) } #[inline(always)] // reduces constant overhead fn shortest_match_at(&self, text: &str, start: usize) -> Option<usize>
#[inline(always)] // reduces constant overhead fn is_match_at(&self, text: &str, start: usize) -> bool { self.0.is_match_at(text.as_bytes(), start) } #[inline(always)] // reduces constant overhead fn find_at(&self, text: &str, start: usize) -> Option<(usize, usize)> { self.0.find_at(text.as_bytes(), start) } #[inline(always)] // reduces constant overhead fn read_captures_at( &self, locs: &mut Locations, text: &str, start: usize, ) -> Option<(usize, usize)> { self.0.read_captures_at(locs, text.as_bytes(), start) } } impl<'c> RegularExpression for ExecNoSync<'c> { type Text = [u8]; /// Returns the number of capture slots in the regular expression. (There /// are two slots for every capture group, corresponding to possibly empty /// start and end locations of the capture.) fn slots_len(&self) -> usize { self.ro.nfa.captures.len() * 2 } fn next_after_empty(&self, _text: &[u8], i: usize) -> usize { i + 1 } /// Returns the end of a match location, possibly occurring before the /// end location of the correct leftmost-first match. #[inline(always)] // reduces constant overhead fn shortest_match_at(&self, text: &[u8], start: usize) -> Option<usize> { match self.ro.match_type { MatchType::Nfa => self.shortest_nfa(text, start), MatchType::Nothing => None, } } /// Returns true if and only if the regex matches text. /// /// For single regular expressions, this is equivalent to calling /// shortest_match(...).is_some(). #[inline(always)] // reduces constant overhead fn is_match_at(&self, text: &[u8], start: usize) -> bool { // We need to do this dance because shortest_match relies on the NFA // filling in captures[1], but a RegexSet has no captures. In other // words, a RegexSet can't (currently) use shortest_match. ---AG match self.ro.match_type { MatchType::Nfa => self.match_nfa(text, start), MatchType::Nothing => false, } } /// Finds the start and end location of the leftmost-first match, starting /// at the given location. #[inline(always)] // reduces constant overhead fn find_at(&self, text: &[u8], start: usize) -> Option<(usize, usize)> { match self.ro.match_type { MatchType::Nfa => self.find_nfa(text, start), MatchType::Nothing => None, } } /// Finds the start and end location of the leftmost-first match and also /// fills in all matching capture groups. /// /// The number of capture slots given should be equal to the total number /// of capture slots in the compiled program. /// /// Note that the first two slots always correspond to the start and end /// locations of the overall match. fn read_captures_at( &self, locs: &mut Locations, text: &[u8], start: usize, ) -> Option<(usize, usize)> { let slots = as_slots(locs); for slot in slots.iter_mut() { *slot = None; } // If the caller unnecessarily uses this, then we try to save them // from themselves. match slots.len() { 0 => return self.find_at(text, start), 2 => { return self.find_at(text, start).map(|(s, e)| { slots[0] = Some(s); slots[1] = Some(e); (s, e) }); } _ => {} // fallthrough } match self.ro.match_type { MatchType::Nfa => { self.captures_nfa(slots, text, start) } MatchType::Nothing => None, } } } impl<'c> ExecNoSync<'c> { /// Executes the NFA engine to return whether there is a match or not. /// /// Ideally, we could use shortest_nfa(...).is_some() and get the same /// performance characteristics, but regex sets don't have captures, which /// shortest_nfa depends on. fn match_nfa( &self, text: &[u8], start: usize, ) -> bool { self.exec_pikevm(&mut [false], &mut [], true, text, start) } /// Finds the shortest match using an NFA. fn shortest_nfa(&self, text: &[u8], start: usize) -> Option<usize> { let mut slots = [None, None]; if self.exec_pikevm(&mut [false], &mut slots, true, text, start) { slots[1] } else { None } } /// Like find, but executes an NFA engine. fn find_nfa( &self, text: &[u8], start: usize, ) -> Option<(usize, usize)> { let mut slots = [None, None]; if self.exec_pikevm(&mut [false], &mut slots, false, text, start) { match (slots[0], slots[1]) { (Some(s), Some(e)) => Some((s, e)), _ => None, } } else { None } } /// Like find_nfa, but fills in captures. /// /// `slots` should have length equal to `2 * nfa.captures.len()`. fn captures_nfa( &self, slots: &mut [Slot], text: &[u8], start: usize, ) -> Option<(usize, usize)> { if self.exec_pikevm(&mut [false], slots, false, text, start) { match (slots[0], slots[1]) { (Some(s), Some(e)) => Some((s, e)), _ => None, } } else { None } } /// Always run the NFA algorithm. fn exec_pikevm( &self, matches: &mut [bool], slots: &mut [Slot], quit_after_match: bool, text: &[u8], start: usize, ) -> bool { use input::Input; let cache = &mut self.cache.borrow_mut().pikevm; if start == 0 { cache.reset(); } else { cache.prep_for_next_match(); } if self.ro.nfa.uses_bytes() { let input = ByteInput::new(text, self.ro.nfa.only_utf8); let mut at = input.at(start); let mut fsm = pikevm::Fsm::new( &self.ro.nfa, quit_after_match, ); loop { let stop = fsm.next( cache, matches, slots, at, input.only_utf8(), ); if stop || at.is_end() { break; } at = input.at(at.next_pos()); } } else { let input = CharInput::new(text); let mut at = input.at(start); let mut fsm = pikevm::Fsm::new( &self.ro.nfa, quit_after_match, ); loop { let stop = fsm.next( cache, matches, slots, at, input.only_utf8(), ); if stop || at.is_end() { break; } at = input.at(at.next_pos()); } } matches.iter().any(|b| *b) } /// Finds which regular expressions match the given text. /// /// `matches` should have length equal to the number of regexes being /// searched. /// /// This is only useful when one wants to know which regexes in a set /// match some text. pub fn many_matches_at( &self, matches: &mut [bool], text: &[u8], start: usize, ) -> bool { use self::MatchType::*; match self.ro.match_type { Nfa => self.exec_pikevm(matches, &mut [], false, text, start), Nothing => false, } } pub fn capture_name_idx(&self) -> &Arc<HashMap<String, usize>> { &self.ro.nfa.capture_name_idx } } impl<'c> ExecNoSyncStr<'c> { pub fn capture_name_idx(&self) -> &Arc<HashMap<String, usize>> { self.0.capture_name_idx() } } impl Exec { /// Get a searcher that isn't Sync. #[inline(always)] // reduces constant overhead pub fn searcher(&self) -> ExecNoSync { let create = || Box::new(RefCell::new(ProgramCacheInner::new(&self.ro))); ExecNoSync { ro: &self.ro, // a clone is too expensive here! (and not needed) cache: self.cache.get_or(create), } } /// Get a searcher that isn't Sync and can match on &str. #[inline(always)] // reduces constant overhead pub fn searcher_str(&self) -> ExecNoSyncStr { ExecNoSyncStr(self.searcher()) } /// Build a Regex from this executor. pub fn into_regex(self) -> re_unicode::Regex { re_unicode::Regex::from(self) } /// Build a RegexSet from this executor. pub fn into_regex_set(self) -> re_set::unicode::RegexSet { re_set::unicode::RegexSet::from(self) } /// The original regular expressions given by the caller that were /// compiled. pub fn regex_strings(&self) -> &[String] { &self.ro.res } /// Return a slice of capture names. /// /// Any capture that isn't named is None. pub fn capture_names(&self) -> &[Option<String>] { &self.ro.nfa.captures } /// Return a reference to named groups mapping (from group name to /// group position). pub fn capture_name_idx(&self) -> &Arc<HashMap<String, usize>> { &self.
{ self.0.shortest_match_at(text.as_bytes(), start) }
identifier_body
blob_store.rs
// Copyright 2014 Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Combines data chunks into larger blobs to be stored externally. use std::sync::{Arc, Mutex}; use serialize::{json, Encodable, Decodable}; use serialize::hex::{ToHex}; use serialize::json::{Json, ToJson, Decoder, from_str}; use std::collections::treemap::{TreeMap}; use std::collections::lru_cache::{LruCache}; use std::io::{File}; use std::str; use process::{Process, MsgHandler}; use blob_index; use blob_index::{BlobIndexProcess}; #[cfg(test)] use blob_index::{BlobIndex}; pub type BlobStoreProcess<B> = Process<Msg, Reply, BlobStore<B>>; pub trait BlobStoreBackend { fn store(&mut self, name: &[u8], data: &[u8]) -> Result<(), String>; fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String>; } #[deriving(Clone)] pub struct FileBackend { root: Path, read_cache: Arc<Mutex<LruCache<Vec<u8>, Result<Vec<u8>, String>>>>, } impl FileBackend { pub fn new(root: Path) -> FileBackend { FileBackend{root: root, read_cache: Arc::new(Mutex::new(LruCache::new(10)))} } fn guarded_cache_get(&self, name: &Vec<u8>) -> Option<Result<Vec<u8>, String>> { self.read_cache.lock().get(name).map(|v| v.clone()) } fn guarded_cache_put(&mut self, name: Vec<u8>, result: Result<Vec<u8>, String>) { self.read_cache.lock().put(name, result); } } impl BlobStoreBackend for FileBackend { fn store(&mut self, name: &[u8], data: &[u8]) -> Result<(), String> { let mut path = self.root.clone(); path.push(name.to_hex()); let mut file = match File::create(&path) { Err(e) => return Err(e.to_string()), Ok(f) => f, }; match file.write(data) { Err(e) => Err(e.to_string()), Ok(()) => Ok(()), } } fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String> { // Check for key in cache: let name = name.into_vec(); let value_opt = self.guarded_cache_get(&name); match value_opt { Some(result) => return result, None => (), } // Read key: let path = { let mut p = self.root.clone(); p.push(name.as_slice().to_hex()); p }; let mut fd = File::open(&path).unwrap(); let res = fd.read_to_end().and_then(|data| { Ok(data.into_vec()) }).or_else(|e| Err(e.to_string())); // Update cache to contain key: self.guarded_cache_put(name, res.clone()); return res; } } #[deriving(Show, Clone, Eq, PartialEq, Encodable, Decodable)] pub struct BlobID { name: Vec<u8>, begin: uint, end: uint, } impl BlobID { pub fn from_bytes(bytes: Vec<u8>) -> BlobID { let mut decoder = Decoder::new(from_str( str::from_utf8(bytes.as_slice()).unwrap()).unwrap()); Decodable::decode(&mut decoder).unwrap() } pub fn as_bytes(&self) -> Vec<u8> { self.to_json().to_string().as_bytes().into_vec() } } impl ToJson for BlobID { fn to_json(&self) -> Json { let mut m = TreeMap::new(); m.insert("name".to_string(), self.name.to_json()); m.insert("begin".to_string(), self.begin.to_json()); m.insert("end".to_string(), self.end.to_json()); json::Object(m).to_json() } } pub enum Msg { /// Store a new data chunk into the current blob. The callback is triggered after the blob /// containing the chunk has been committed to persistent storage (it is then safe to use the /// `BlobID` as persistent reference). Store(Vec<u8>, proc(BlobID):Send -> ()), /// Retrieve the data chunk identified by `BlobID`. Retrieve(BlobID), /// Flush the current blob, independent of its size. Flush, } #[deriving(Eq, PartialEq, Show)] pub enum Reply { StoreOK(BlobID), RetrieveOK(Vec<u8>), FlushOK, } pub struct BlobStore<B> { backend: B, blob_index: BlobIndexProcess, blob_desc: blob_index::BlobDesc, buffer_data: Vec<(BlobID, Vec<u8>, proc(BlobID):Send -> ())>, buffer_data_len: uint, max_blob_size: uint, } fn empty_blob_desc() -> blob_index::BlobDesc { blob_index::BlobDesc{name: b"".into_vec(), id: 0} } impl <B: BlobStoreBackend> BlobStore<B> { pub fn new(index: BlobIndexProcess, backend: B, max_blob_size: uint) -> BlobStore<B> { let mut bs = BlobStore{ backend: backend, blob_index: index, blob_desc: empty_blob_desc(), buffer_data: Vec::new(), buffer_data_len: 0, max_blob_size: max_blob_size, }; bs.reserve_new_blob(); bs } #[cfg(test)] pub fn new_for_testing(backend: B, max_blob_size: uint) -> BlobStore<B> { let biP = Process::new(proc() { BlobIndex::new_for_testing() }); let mut bs = BlobStore{backend: backend, blob_index: biP, blob_desc: empty_blob_desc(), buffer_data: Vec::new(), buffer_data_len: 0, max_blob_size: max_blob_size, }; bs.reserve_new_blob(); bs } fn reserve_new_blob(&mut self) -> blob_index::BlobDesc { let old_blob_desc = self.blob_desc.clone(); let res = self.blob_index.send_reply(blob_index::Reserve); match res { blob_index::Reserved(blob_desc) => { self.blob_desc = blob_desc; }, _ => fail!("Could not reserve blob."), } old_blob_desc } fn backend_store(&mut self, name: &[u8], blob: &[u8]) { match self.backend.store(name, blob) { Ok(()) => (), Err(s) => fail!(s), } } fn backend_read(&mut self, name: &[u8]) -> Vec<u8> { match self.backend.retrieve(name) { Ok(data) => data, Err(s) => fail!(s), } } fn flush(&mut self) { if self.buffer_data_len == 0 { return } // Replace blob id let old_blob_desc = self.reserve_new_blob(); self.buffer_data_len = 0; // Prepare blob let mut ready_callback = Vec::new(); let mut blob = Vec::new(); loop { match self.buffer_data.shift() { Some((chunk_ref, chunk, cb)) => { ready_callback.push((chunk_ref, cb)); blob.push_all(chunk.as_slice()); }, None => break, } } self.blob_index.send_reply(blob_index::InAir(old_blob_desc.clone())); self.backend_store(old_blob_desc.name.as_slice(), blob.as_slice()); self.blob_index.send_reply(blob_index::CommitDone(old_blob_desc)); // Go through callbacks for (blobid, cb) in ready_callback.move_iter() { cb(blobid); } } fn maybe_flush(&mut self) { if self.buffer_data_len >= self.max_blob_size { self.flush(); } } } impl <B: BlobStoreBackend> MsgHandler<Msg, Reply> for BlobStore<B> { fn handle(&mut self, msg: Msg, reply: |Reply|) { match msg { Store(blob, cb) => { if blob.len() == 0 { let id = BlobID{name: vec!(0), begin: 0, end: 0}; let cb_id = id.clone(); spawn(proc(){ cb(cb_id) }); return reply(StoreOK(id)); } let new_size = self.buffer_data_len + blob.len(); let id = BlobID{name: self.blob_desc.name.clone(), begin: self.buffer_data_len, end: new_size}; self.buffer_data_len = new_size; self.buffer_data.push((id.clone(), blob.into_vec(), cb)); // To avoid unnecessary blocking, we reply with the ID *before* possibly flushing. reply(StoreOK(id)); // Flushing can be expensive, so try not block on it. self.maybe_flush(); }, Retrieve(id) => { if id.begin == 0 && id.end == 0 { return reply(RetrieveOK(vec![].into_vec())); } let blob = self.backend_read(id.name.as_slice()); let chunk = blob.slice(id.begin, id.end); return reply(RetrieveOK(chunk.into_vec())); }, Flush => { self.flush(); return reply(FlushOK) }, } } } #[cfg(test)] pub mod tests { use super::*; use std::rand::{task_rng}; use quickcheck::{Config, Testable, gen}; use quickcheck::{quickcheck_config}; use process::{Process}; use std::sync::{Arc, Mutex}; use std::collections::treemap::{TreeMap}; #[deriving(Clone)] pub struct MemoryBackend { files: Arc<Mutex<TreeMap<Vec<u8>, Vec<u8>>>> } impl MemoryBackend { pub fn new() -> MemoryBackend { MemoryBackend{files: Arc::new(Mutex::new(TreeMap::new()))} } fn guarded_insert(&mut self, key: Vec<u8>, value: Vec<u8>) -> Result<(), String>{ let mut guarded_files = self.files.lock(); if guarded_files.contains_key(&key) { return Err(format!("Key already exists: '{}'", key)); } guarded_files.insert(key, value); Ok(()) } fn guarded_retrieve(&mut self, key: &[u8]) -> Result<Vec<u8>, String> { let value_opt = self.files.lock().find(&key.into_vec()).map(|v| v.clone()); value_opt.map(|v| Ok(v)).unwrap_or_else(|| Err(format!("Unknown key: '{}'", key))) } } impl BlobStoreBackend for MemoryBackend { fn store(&mut self, name: &[u8], data: &[u8]) -> Result<(), String> { self.guarded_insert(name.to_owned(), data.into_vec()) } fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String> { self.guarded_retrieve(name) } } #[deriving(Clone)] pub struct DevNullBackend; impl BlobStoreBackend for DevNullBackend { fn store(&mut self, _name: &[u8], _data: &[u8]) -> Result<(), String> { Ok(()) } fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String> { Err(format!("Unknown key: '{}'", name)) } } // QuickCheck configuration static SIZE: uint = 100; static CONFIG: Config = Config { tests: 200, max_tests: 1000, }; // QuickCheck helpers: fn qcheck<A: Testable>(f: A) { quickcheck_config(CONFIG, &mut gen(task_rng(), SIZE), f) } #[test] fn identity() { fn prop(chunks: Vec<Vec<u8>>) -> bool { let mut backend = MemoryBackend::new(); let local_backend = backend.clone(); let bsP : BlobStoreProcess<MemoryBackend> = Process::new(proc() { BlobStore::new_for_testing(local_backend, 1024) }); let mut ids = Vec::new(); for chunk in chunks.iter() { match bsP.send_reply(Store(chunk.as_slice().into_vec(), proc(_){})) { StoreOK(id) => { ids.push((id, chunk)); }, _ => fail!("Unexpected reply from blob store."), } } assert_eq!(bsP.send_reply(Flush), FlushOK); // Non-empty chunks must be in the backend now: for &(ref id, chunk) in ids.iter() { if chunk.len() > 0 { match backend.retrieve(id.name.as_slice()) { Ok(_) => (), Err(e) => fail!(e), } } } // All chunks must be available through the blob store: for &(ref id, chunk) in ids.iter() { match bsP.send_reply(Retrieve(id.clone())) { RetrieveOK(found_chunk) => assert_eq!(found_chunk, chunk.as_slice().into_vec()), _ => fail!("Unexpected reply from blob store."), } } return true; } qcheck(prop); } #[test] fn identity_with_excessive_flushing()
for &(ref id, chunk) in ids.iter() { if chunk.len() > 0 { match backend.retrieve(id.name.as_slice()) { Ok(_) => (), Err(e) => fail!(e), } } } // All chunks must be available through the blob store: for &(ref id, chunk) in ids.iter() { match bsP.send_reply(Retrieve(id.clone())) { RetrieveOK(found_chunk) => assert_eq!(found_chunk, chunk.as_slice().into_vec()), _ => fail!("Unexpected reply from blob store."), } } return true; } qcheck(prop); } #[test] fn blobid_identity() { fn prop(name: Vec<u8>, begin: uint, end: uint) -> bool { let blob_id = BlobID{name: name.into_vec(), begin: begin, end: end}; BlobID::from_bytes(blob_id.as_bytes()) == blob_id } qcheck(prop); } }
{ fn prop(chunks: Vec<Vec<u8>>) -> bool { let mut backend = MemoryBackend::new(); let local_backend = backend.clone(); let bsP: BlobStoreProcess<MemoryBackend> = Process::new(proc() { BlobStore::new_for_testing(local_backend, 1024) }); let mut ids = Vec::new(); for chunk in chunks.iter() { match bsP.send_reply(Store(chunk.as_slice().into_vec(), proc(_){})) { StoreOK(id) => { ids.push((id, chunk)); }, _ => fail!("Unexpected reply from blob store."), } assert_eq!(bsP.send_reply(Flush), FlushOK); let &(ref id, chunk) = ids.last().unwrap(); assert_eq!(bsP.send_reply(Retrieve(id.clone())), RetrieveOK(chunk.clone())); } // Non-empty chunks must be in the backend now:
identifier_body
blob_store.rs
// Copyright 2014 Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Combines data chunks into larger blobs to be stored externally. use std::sync::{Arc, Mutex}; use serialize::{json, Encodable, Decodable}; use serialize::hex::{ToHex}; use serialize::json::{Json, ToJson, Decoder, from_str}; use std::collections::treemap::{TreeMap}; use std::collections::lru_cache::{LruCache}; use std::io::{File}; use std::str; use process::{Process, MsgHandler}; use blob_index; use blob_index::{BlobIndexProcess}; #[cfg(test)] use blob_index::{BlobIndex}; pub type BlobStoreProcess<B> = Process<Msg, Reply, BlobStore<B>>; pub trait BlobStoreBackend { fn store(&mut self, name: &[u8], data: &[u8]) -> Result<(), String>; fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String>; } #[deriving(Clone)] pub struct FileBackend { root: Path, read_cache: Arc<Mutex<LruCache<Vec<u8>, Result<Vec<u8>, String>>>>, } impl FileBackend { pub fn new(root: Path) -> FileBackend { FileBackend{root: root, read_cache: Arc::new(Mutex::new(LruCache::new(10)))} } fn guarded_cache_get(&self, name: &Vec<u8>) -> Option<Result<Vec<u8>, String>> { self.read_cache.lock().get(name).map(|v| v.clone()) } fn guarded_cache_put(&mut self, name: Vec<u8>, result: Result<Vec<u8>, String>) { self.read_cache.lock().put(name, result); } } impl BlobStoreBackend for FileBackend { fn store(&mut self, name: &[u8], data: &[u8]) -> Result<(), String> { let mut path = self.root.clone(); path.push(name.to_hex()); let mut file = match File::create(&path) { Err(e) => return Err(e.to_string()), Ok(f) => f, }; match file.write(data) { Err(e) => Err(e.to_string()), Ok(()) => Ok(()), } } fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String> { // Check for key in cache: let name = name.into_vec(); let value_opt = self.guarded_cache_get(&name); match value_opt { Some(result) => return result, None => (), } // Read key: let path = { let mut p = self.root.clone(); p.push(name.as_slice().to_hex()); p }; let mut fd = File::open(&path).unwrap(); let res = fd.read_to_end().and_then(|data| { Ok(data.into_vec()) }).or_else(|e| Err(e.to_string())); // Update cache to contain key: self.guarded_cache_put(name, res.clone()); return res; } } #[deriving(Show, Clone, Eq, PartialEq, Encodable, Decodable)] pub struct BlobID { name: Vec<u8>, begin: uint, end: uint, } impl BlobID { pub fn from_bytes(bytes: Vec<u8>) -> BlobID { let mut decoder = Decoder::new(from_str( str::from_utf8(bytes.as_slice()).unwrap()).unwrap()); Decodable::decode(&mut decoder).unwrap() } pub fn as_bytes(&self) -> Vec<u8> { self.to_json().to_string().as_bytes().into_vec() } } impl ToJson for BlobID { fn to_json(&self) -> Json { let mut m = TreeMap::new(); m.insert("name".to_string(), self.name.to_json()); m.insert("begin".to_string(), self.begin.to_json()); m.insert("end".to_string(), self.end.to_json()); json::Object(m).to_json() } } pub enum Msg { /// Store a new data chunk into the current blob. The callback is triggered after the blob /// containing the chunk has been committed to persistent storage (it is then safe to use the /// `BlobID` as persistent reference). Store(Vec<u8>, proc(BlobID):Send -> ()), /// Retrieve the data chunk identified by `BlobID`. Retrieve(BlobID), /// Flush the current blob, independent of its size. Flush, } #[deriving(Eq, PartialEq, Show)] pub enum Reply { StoreOK(BlobID), RetrieveOK(Vec<u8>), FlushOK, } pub struct BlobStore<B> { backend: B, blob_index: BlobIndexProcess, blob_desc: blob_index::BlobDesc, buffer_data: Vec<(BlobID, Vec<u8>, proc(BlobID):Send -> ())>, buffer_data_len: uint, max_blob_size: uint, } fn
() -> blob_index::BlobDesc { blob_index::BlobDesc{name: b"".into_vec(), id: 0} } impl <B: BlobStoreBackend> BlobStore<B> { pub fn new(index: BlobIndexProcess, backend: B, max_blob_size: uint) -> BlobStore<B> { let mut bs = BlobStore{ backend: backend, blob_index: index, blob_desc: empty_blob_desc(), buffer_data: Vec::new(), buffer_data_len: 0, max_blob_size: max_blob_size, }; bs.reserve_new_blob(); bs } #[cfg(test)] pub fn new_for_testing(backend: B, max_blob_size: uint) -> BlobStore<B> { let biP = Process::new(proc() { BlobIndex::new_for_testing() }); let mut bs = BlobStore{backend: backend, blob_index: biP, blob_desc: empty_blob_desc(), buffer_data: Vec::new(), buffer_data_len: 0, max_blob_size: max_blob_size, }; bs.reserve_new_blob(); bs } fn reserve_new_blob(&mut self) -> blob_index::BlobDesc { let old_blob_desc = self.blob_desc.clone(); let res = self.blob_index.send_reply(blob_index::Reserve); match res { blob_index::Reserved(blob_desc) => { self.blob_desc = blob_desc; }, _ => fail!("Could not reserve blob."), } old_blob_desc } fn backend_store(&mut self, name: &[u8], blob: &[u8]) { match self.backend.store(name, blob) { Ok(()) => (), Err(s) => fail!(s), } } fn backend_read(&mut self, name: &[u8]) -> Vec<u8> { match self.backend.retrieve(name) { Ok(data) => data, Err(s) => fail!(s), } } fn flush(&mut self) { if self.buffer_data_len == 0 { return } // Replace blob id let old_blob_desc = self.reserve_new_blob(); self.buffer_data_len = 0; // Prepare blob let mut ready_callback = Vec::new(); let mut blob = Vec::new(); loop { match self.buffer_data.shift() { Some((chunk_ref, chunk, cb)) => { ready_callback.push((chunk_ref, cb)); blob.push_all(chunk.as_slice()); }, None => break, } } self.blob_index.send_reply(blob_index::InAir(old_blob_desc.clone())); self.backend_store(old_blob_desc.name.as_slice(), blob.as_slice()); self.blob_index.send_reply(blob_index::CommitDone(old_blob_desc)); // Go through callbacks for (blobid, cb) in ready_callback.move_iter() { cb(blobid); } } fn maybe_flush(&mut self) { if self.buffer_data_len >= self.max_blob_size { self.flush(); } } } impl <B: BlobStoreBackend> MsgHandler<Msg, Reply> for BlobStore<B> { fn handle(&mut self, msg: Msg, reply: |Reply|) { match msg { Store(blob, cb) => { if blob.len() == 0 { let id = BlobID{name: vec!(0), begin: 0, end: 0}; let cb_id = id.clone(); spawn(proc(){ cb(cb_id) }); return reply(StoreOK(id)); } let new_size = self.buffer_data_len + blob.len(); let id = BlobID{name: self.blob_desc.name.clone(), begin: self.buffer_data_len, end: new_size}; self.buffer_data_len = new_size; self.buffer_data.push((id.clone(), blob.into_vec(), cb)); // To avoid unnecessary blocking, we reply with the ID *before* possibly flushing. reply(StoreOK(id)); // Flushing can be expensive, so try not block on it. self.maybe_flush(); }, Retrieve(id) => { if id.begin == 0 && id.end == 0 { return reply(RetrieveOK(vec![].into_vec())); } let blob = self.backend_read(id.name.as_slice()); let chunk = blob.slice(id.begin, id.end); return reply(RetrieveOK(chunk.into_vec())); }, Flush => { self.flush(); return reply(FlushOK) }, } } } #[cfg(test)] pub mod tests { use super::*; use std::rand::{task_rng}; use quickcheck::{Config, Testable, gen}; use quickcheck::{quickcheck_config}; use process::{Process}; use std::sync::{Arc, Mutex}; use std::collections::treemap::{TreeMap}; #[deriving(Clone)] pub struct MemoryBackend { files: Arc<Mutex<TreeMap<Vec<u8>, Vec<u8>>>> } impl MemoryBackend { pub fn new() -> MemoryBackend { MemoryBackend{files: Arc::new(Mutex::new(TreeMap::new()))} } fn guarded_insert(&mut self, key: Vec<u8>, value: Vec<u8>) -> Result<(), String>{ let mut guarded_files = self.files.lock(); if guarded_files.contains_key(&key) { return Err(format!("Key already exists: '{}'", key)); } guarded_files.insert(key, value); Ok(()) } fn guarded_retrieve(&mut self, key: &[u8]) -> Result<Vec<u8>, String> { let value_opt = self.files.lock().find(&key.into_vec()).map(|v| v.clone()); value_opt.map(|v| Ok(v)).unwrap_or_else(|| Err(format!("Unknown key: '{}'", key))) } } impl BlobStoreBackend for MemoryBackend { fn store(&mut self, name: &[u8], data: &[u8]) -> Result<(), String> { self.guarded_insert(name.to_owned(), data.into_vec()) } fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String> { self.guarded_retrieve(name) } } #[deriving(Clone)] pub struct DevNullBackend; impl BlobStoreBackend for DevNullBackend { fn store(&mut self, _name: &[u8], _data: &[u8]) -> Result<(), String> { Ok(()) } fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String> { Err(format!("Unknown key: '{}'", name)) } } // QuickCheck configuration static SIZE: uint = 100; static CONFIG: Config = Config { tests: 200, max_tests: 1000, }; // QuickCheck helpers: fn qcheck<A: Testable>(f: A) { quickcheck_config(CONFIG, &mut gen(task_rng(), SIZE), f) } #[test] fn identity() { fn prop(chunks: Vec<Vec<u8>>) -> bool { let mut backend = MemoryBackend::new(); let local_backend = backend.clone(); let bsP : BlobStoreProcess<MemoryBackend> = Process::new(proc() { BlobStore::new_for_testing(local_backend, 1024) }); let mut ids = Vec::new(); for chunk in chunks.iter() { match bsP.send_reply(Store(chunk.as_slice().into_vec(), proc(_){})) { StoreOK(id) => { ids.push((id, chunk)); }, _ => fail!("Unexpected reply from blob store."), } } assert_eq!(bsP.send_reply(Flush), FlushOK); // Non-empty chunks must be in the backend now: for &(ref id, chunk) in ids.iter() { if chunk.len() > 0 { match backend.retrieve(id.name.as_slice()) { Ok(_) => (), Err(e) => fail!(e), } } } // All chunks must be available through the blob store: for &(ref id, chunk) in ids.iter() { match bsP.send_reply(Retrieve(id.clone())) { RetrieveOK(found_chunk) => assert_eq!(found_chunk, chunk.as_slice().into_vec()), _ => fail!("Unexpected reply from blob store."), } } return true; } qcheck(prop); } #[test] fn identity_with_excessive_flushing() { fn prop(chunks: Vec<Vec<u8>>) -> bool { let mut backend = MemoryBackend::new(); let local_backend = backend.clone(); let bsP: BlobStoreProcess<MemoryBackend> = Process::new(proc() { BlobStore::new_for_testing(local_backend, 1024) }); let mut ids = Vec::new(); for chunk in chunks.iter() { match bsP.send_reply(Store(chunk.as_slice().into_vec(), proc(_){})) { StoreOK(id) => { ids.push((id, chunk)); }, _ => fail!("Unexpected reply from blob store."), } assert_eq!(bsP.send_reply(Flush), FlushOK); let &(ref id, chunk) = ids.last().unwrap(); assert_eq!(bsP.send_reply(Retrieve(id.clone())), RetrieveOK(chunk.clone())); } // Non-empty chunks must be in the backend now: for &(ref id, chunk) in ids.iter() { if chunk.len() > 0 { match backend.retrieve(id.name.as_slice()) { Ok(_) => (), Err(e) => fail!(e), } } } // All chunks must be available through the blob store: for &(ref id, chunk) in ids.iter() { match bsP.send_reply(Retrieve(id.clone())) { RetrieveOK(found_chunk) => assert_eq!(found_chunk, chunk.as_slice().into_vec()), _ => fail!("Unexpected reply from blob store."), } } return true; } qcheck(prop); } #[test] fn blobid_identity() { fn prop(name: Vec<u8>, begin: uint, end: uint) -> bool { let blob_id = BlobID{name: name.into_vec(), begin: begin, end: end}; BlobID::from_bytes(blob_id.as_bytes()) == blob_id } qcheck(prop); } }
empty_blob_desc
identifier_name
blob_store.rs
// Copyright 2014 Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Combines data chunks into larger blobs to be stored externally. use std::sync::{Arc, Mutex}; use serialize::{json, Encodable, Decodable}; use serialize::hex::{ToHex}; use serialize::json::{Json, ToJson, Decoder, from_str}; use std::collections::treemap::{TreeMap}; use std::collections::lru_cache::{LruCache}; use std::io::{File}; use std::str; use process::{Process, MsgHandler}; use blob_index; use blob_index::{BlobIndexProcess}; #[cfg(test)] use blob_index::{BlobIndex}; pub type BlobStoreProcess<B> = Process<Msg, Reply, BlobStore<B>>; pub trait BlobStoreBackend { fn store(&mut self, name: &[u8], data: &[u8]) -> Result<(), String>; fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String>; } #[deriving(Clone)] pub struct FileBackend { root: Path, read_cache: Arc<Mutex<LruCache<Vec<u8>, Result<Vec<u8>, String>>>>, } impl FileBackend { pub fn new(root: Path) -> FileBackend { FileBackend{root: root, read_cache: Arc::new(Mutex::new(LruCache::new(10)))} } fn guarded_cache_get(&self, name: &Vec<u8>) -> Option<Result<Vec<u8>, String>> { self.read_cache.lock().get(name).map(|v| v.clone()) } fn guarded_cache_put(&mut self, name: Vec<u8>, result: Result<Vec<u8>, String>) { self.read_cache.lock().put(name, result); } } impl BlobStoreBackend for FileBackend { fn store(&mut self, name: &[u8], data: &[u8]) -> Result<(), String> { let mut path = self.root.clone(); path.push(name.to_hex()); let mut file = match File::create(&path) { Err(e) => return Err(e.to_string()), Ok(f) => f, }; match file.write(data) { Err(e) => Err(e.to_string()), Ok(()) => Ok(()), } } fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String> { // Check for key in cache: let name = name.into_vec(); let value_opt = self.guarded_cache_get(&name); match value_opt { Some(result) => return result, None => (), } // Read key: let path = { let mut p = self.root.clone(); p.push(name.as_slice().to_hex()); p }; let mut fd = File::open(&path).unwrap(); let res = fd.read_to_end().and_then(|data| { Ok(data.into_vec()) }).or_else(|e| Err(e.to_string())); // Update cache to contain key: self.guarded_cache_put(name, res.clone()); return res; } } #[deriving(Show, Clone, Eq, PartialEq, Encodable, Decodable)] pub struct BlobID { name: Vec<u8>, begin: uint, end: uint, } impl BlobID { pub fn from_bytes(bytes: Vec<u8>) -> BlobID { let mut decoder = Decoder::new(from_str( str::from_utf8(bytes.as_slice()).unwrap()).unwrap()); Decodable::decode(&mut decoder).unwrap() } pub fn as_bytes(&self) -> Vec<u8> { self.to_json().to_string().as_bytes().into_vec() } } impl ToJson for BlobID { fn to_json(&self) -> Json { let mut m = TreeMap::new(); m.insert("name".to_string(), self.name.to_json()); m.insert("begin".to_string(), self.begin.to_json()); m.insert("end".to_string(), self.end.to_json()); json::Object(m).to_json() } } pub enum Msg { /// Store a new data chunk into the current blob. The callback is triggered after the blob /// containing the chunk has been committed to persistent storage (it is then safe to use the /// `BlobID` as persistent reference). Store(Vec<u8>, proc(BlobID):Send -> ()), /// Retrieve the data chunk identified by `BlobID`. Retrieve(BlobID), /// Flush the current blob, independent of its size. Flush, } #[deriving(Eq, PartialEq, Show)] pub enum Reply { StoreOK(BlobID), RetrieveOK(Vec<u8>), FlushOK, } pub struct BlobStore<B> { backend: B, blob_index: BlobIndexProcess, blob_desc: blob_index::BlobDesc, buffer_data: Vec<(BlobID, Vec<u8>, proc(BlobID):Send -> ())>, buffer_data_len: uint, max_blob_size: uint, } fn empty_blob_desc() -> blob_index::BlobDesc { blob_index::BlobDesc{name: b"".into_vec(), id: 0} } impl <B: BlobStoreBackend> BlobStore<B> { pub fn new(index: BlobIndexProcess, backend: B, max_blob_size: uint) -> BlobStore<B> { let mut bs = BlobStore{ backend: backend, blob_index: index, blob_desc: empty_blob_desc(), buffer_data: Vec::new(), buffer_data_len: 0, max_blob_size: max_blob_size, }; bs.reserve_new_blob(); bs } #[cfg(test)] pub fn new_for_testing(backend: B, max_blob_size: uint) -> BlobStore<B> { let biP = Process::new(proc() { BlobIndex::new_for_testing() }); let mut bs = BlobStore{backend: backend, blob_index: biP, blob_desc: empty_blob_desc(), buffer_data: Vec::new(), buffer_data_len: 0, max_blob_size: max_blob_size, }; bs.reserve_new_blob(); bs } fn reserve_new_blob(&mut self) -> blob_index::BlobDesc { let old_blob_desc = self.blob_desc.clone(); let res = self.blob_index.send_reply(blob_index::Reserve); match res { blob_index::Reserved(blob_desc) => { self.blob_desc = blob_desc; }, _ => fail!("Could not reserve blob."), } old_blob_desc } fn backend_store(&mut self, name: &[u8], blob: &[u8]) { match self.backend.store(name, blob) { Ok(()) => (), Err(s) => fail!(s), } } fn backend_read(&mut self, name: &[u8]) -> Vec<u8> { match self.backend.retrieve(name) { Ok(data) => data, Err(s) => fail!(s), } } fn flush(&mut self) { if self.buffer_data_len == 0 { return } // Replace blob id let old_blob_desc = self.reserve_new_blob(); self.buffer_data_len = 0; // Prepare blob let mut ready_callback = Vec::new(); let mut blob = Vec::new(); loop { match self.buffer_data.shift() { Some((chunk_ref, chunk, cb)) => { ready_callback.push((chunk_ref, cb));
}, None => break, } } self.blob_index.send_reply(blob_index::InAir(old_blob_desc.clone())); self.backend_store(old_blob_desc.name.as_slice(), blob.as_slice()); self.blob_index.send_reply(blob_index::CommitDone(old_blob_desc)); // Go through callbacks for (blobid, cb) in ready_callback.move_iter() { cb(blobid); } } fn maybe_flush(&mut self) { if self.buffer_data_len >= self.max_blob_size { self.flush(); } } } impl <B: BlobStoreBackend> MsgHandler<Msg, Reply> for BlobStore<B> { fn handle(&mut self, msg: Msg, reply: |Reply|) { match msg { Store(blob, cb) => { if blob.len() == 0 { let id = BlobID{name: vec!(0), begin: 0, end: 0}; let cb_id = id.clone(); spawn(proc(){ cb(cb_id) }); return reply(StoreOK(id)); } let new_size = self.buffer_data_len + blob.len(); let id = BlobID{name: self.blob_desc.name.clone(), begin: self.buffer_data_len, end: new_size}; self.buffer_data_len = new_size; self.buffer_data.push((id.clone(), blob.into_vec(), cb)); // To avoid unnecessary blocking, we reply with the ID *before* possibly flushing. reply(StoreOK(id)); // Flushing can be expensive, so try not block on it. self.maybe_flush(); }, Retrieve(id) => { if id.begin == 0 && id.end == 0 { return reply(RetrieveOK(vec![].into_vec())); } let blob = self.backend_read(id.name.as_slice()); let chunk = blob.slice(id.begin, id.end); return reply(RetrieveOK(chunk.into_vec())); }, Flush => { self.flush(); return reply(FlushOK) }, } } } #[cfg(test)] pub mod tests { use super::*; use std::rand::{task_rng}; use quickcheck::{Config, Testable, gen}; use quickcheck::{quickcheck_config}; use process::{Process}; use std::sync::{Arc, Mutex}; use std::collections::treemap::{TreeMap}; #[deriving(Clone)] pub struct MemoryBackend { files: Arc<Mutex<TreeMap<Vec<u8>, Vec<u8>>>> } impl MemoryBackend { pub fn new() -> MemoryBackend { MemoryBackend{files: Arc::new(Mutex::new(TreeMap::new()))} } fn guarded_insert(&mut self, key: Vec<u8>, value: Vec<u8>) -> Result<(), String>{ let mut guarded_files = self.files.lock(); if guarded_files.contains_key(&key) { return Err(format!("Key already exists: '{}'", key)); } guarded_files.insert(key, value); Ok(()) } fn guarded_retrieve(&mut self, key: &[u8]) -> Result<Vec<u8>, String> { let value_opt = self.files.lock().find(&key.into_vec()).map(|v| v.clone()); value_opt.map(|v| Ok(v)).unwrap_or_else(|| Err(format!("Unknown key: '{}'", key))) } } impl BlobStoreBackend for MemoryBackend { fn store(&mut self, name: &[u8], data: &[u8]) -> Result<(), String> { self.guarded_insert(name.to_owned(), data.into_vec()) } fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String> { self.guarded_retrieve(name) } } #[deriving(Clone)] pub struct DevNullBackend; impl BlobStoreBackend for DevNullBackend { fn store(&mut self, _name: &[u8], _data: &[u8]) -> Result<(), String> { Ok(()) } fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String> { Err(format!("Unknown key: '{}'", name)) } } // QuickCheck configuration static SIZE: uint = 100; static CONFIG: Config = Config { tests: 200, max_tests: 1000, }; // QuickCheck helpers: fn qcheck<A: Testable>(f: A) { quickcheck_config(CONFIG, &mut gen(task_rng(), SIZE), f) } #[test] fn identity() { fn prop(chunks: Vec<Vec<u8>>) -> bool { let mut backend = MemoryBackend::new(); let local_backend = backend.clone(); let bsP : BlobStoreProcess<MemoryBackend> = Process::new(proc() { BlobStore::new_for_testing(local_backend, 1024) }); let mut ids = Vec::new(); for chunk in chunks.iter() { match bsP.send_reply(Store(chunk.as_slice().into_vec(), proc(_){})) { StoreOK(id) => { ids.push((id, chunk)); }, _ => fail!("Unexpected reply from blob store."), } } assert_eq!(bsP.send_reply(Flush), FlushOK); // Non-empty chunks must be in the backend now: for &(ref id, chunk) in ids.iter() { if chunk.len() > 0 { match backend.retrieve(id.name.as_slice()) { Ok(_) => (), Err(e) => fail!(e), } } } // All chunks must be available through the blob store: for &(ref id, chunk) in ids.iter() { match bsP.send_reply(Retrieve(id.clone())) { RetrieveOK(found_chunk) => assert_eq!(found_chunk, chunk.as_slice().into_vec()), _ => fail!("Unexpected reply from blob store."), } } return true; } qcheck(prop); } #[test] fn identity_with_excessive_flushing() { fn prop(chunks: Vec<Vec<u8>>) -> bool { let mut backend = MemoryBackend::new(); let local_backend = backend.clone(); let bsP: BlobStoreProcess<MemoryBackend> = Process::new(proc() { BlobStore::new_for_testing(local_backend, 1024) }); let mut ids = Vec::new(); for chunk in chunks.iter() { match bsP.send_reply(Store(chunk.as_slice().into_vec(), proc(_){})) { StoreOK(id) => { ids.push((id, chunk)); }, _ => fail!("Unexpected reply from blob store."), } assert_eq!(bsP.send_reply(Flush), FlushOK); let &(ref id, chunk) = ids.last().unwrap(); assert_eq!(bsP.send_reply(Retrieve(id.clone())), RetrieveOK(chunk.clone())); } // Non-empty chunks must be in the backend now: for &(ref id, chunk) in ids.iter() { if chunk.len() > 0 { match backend.retrieve(id.name.as_slice()) { Ok(_) => (), Err(e) => fail!(e), } } } // All chunks must be available through the blob store: for &(ref id, chunk) in ids.iter() { match bsP.send_reply(Retrieve(id.clone())) { RetrieveOK(found_chunk) => assert_eq!(found_chunk, chunk.as_slice().into_vec()), _ => fail!("Unexpected reply from blob store."), } } return true; } qcheck(prop); } #[test] fn blobid_identity() { fn prop(name: Vec<u8>, begin: uint, end: uint) -> bool { let blob_id = BlobID{name: name.into_vec(), begin: begin, end: end}; BlobID::from_bytes(blob_id.as_bytes()) == blob_id } qcheck(prop); } }
blob.push_all(chunk.as_slice());
random_line_split
mod.rs
#![warn(missing_docs)] //! Contains all data structures and method to work with model resources. //! //! Model is an isolated scene that is used to create copies of its data - this //! process is known as `instantiation`. Isolation in this context means that //! such scene cannot be modified, rendered, etc. It just a data source. //! //! All instances will have references to resource they were created from - this //! will help to get correct vertex and indices buffers when loading a save file, //! loader will just take all needed data from resource so we don't need to store //! such data in save file. Also this mechanism works perfectly when you changing //! resource in external editor (3Ds max, Maya, Blender, etc.) engine will assign //! correct visual data when loading a saved game. //! //! # Supported formats //! //! Currently only FBX (common format in game industry for storing complex 3d models) //! and RGS (native Fyroxed format) formats are supported. use crate::{ animation::Animation, asset::{ manager::ResourceManager, options::ImportOptions, Resource, ResourceData, MODEL_RESOURCE_UUID, }, core::{ algebra::{UnitQuaternion, Vector3}, log::{Log, MessageKind}, pool::Handle, reflect::prelude::*, uuid::Uuid, variable::mark_inheritable_properties_non_modified, visitor::{Visit, VisitError, VisitResult, Visitor}, TypeUuidProvider, }, engine::SerializationContext, resource::fbx::{self, error::FbxError}, scene::{ animation::AnimationPlayer, graph::{map::NodeHandleMap, Graph}, node::Node, Scene, SceneLoader, }, }; use serde::{Deserialize, Serialize}; use std::{ any::Any, borrow::Cow, fmt::{Display, Formatter}, path::{Path, PathBuf}, sync::Arc, }; use strum_macros::{AsRefStr, EnumString, EnumVariantNames}; pub mod loader; #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, Reflect)] #[repr(u32)] pub(crate) enum NodeMapping { UseNames = 0, UseHandles = 1, } /// See module docs. #[derive(Debug, Visit, Reflect)] pub struct Model { pub(crate) path: PathBuf, #[visit(skip)] pub(crate) mapping: NodeMapping, #[visit(skip)] scene: Scene, } impl TypeUuidProvider for Model { fn type_uuid() -> Uuid { MODEL_RESOURCE_UUID } } /// Type alias for model resources. pub type ModelResource = Resource<Model>; /// Extension trait for model resources. pub trait ModelResourceExtension: Sized { /// Tries to instantiate model from given resource. fn instantiate_from( model: ModelResource, model_data: &Model, handle: Handle<Node>, dest_graph: &mut Graph, ) -> (Handle<Node>, NodeHandleMap); /// Tries to instantiate model from given resource. fn instantiate(&self, dest_scene: &mut Scene) -> Handle<Node>; /// Instantiates a prefab and places it at specified position and orientation in global coordinates. fn instantiate_at( &self, scene: &mut Scene, position: Vector3<f32>, orientation: UnitQuaternion<f32>, ) -> Handle<Node>; /// Tries to retarget animations from given model resource to a node hierarchy starting /// from `root` on a given scene. /// /// Animation retargeting allows you to "transfer" animation from a model to a model /// instance on a scene. Imagine you have a character that should have multiple animations /// like idle, run, shoot, walk, etc. and you want to store each animation in a separate /// file. Then when you creating a character on a level you want to have all possible /// animations assigned to a character, this is where this function comes into play: /// you just load a model of your character with skeleton, but without any animations, /// then you load several "models" which have only skeleton with some animation (such /// "models" can be considered as "animation" resources). After this you need to /// instantiate model on your level and retarget all animations you need to that instance /// from other "models". All you have after this is a handle to a model and bunch of /// handles to specific animations. After this animations can be blended in any combinations /// you need to. For example idle animation can be blended with walk animation when your /// character starts walking. /// /// # Notes /// /// Most of the 3d model formats can contain only one animation, so in most cases /// this function will return vector with only one animation. fn retarget_animations_directly(&self, root: Handle<Node>, graph: &Graph) -> Vec<Animation>; /// Tries to retarget animations from given model resource to a node hierarchy starting /// from `root` on a given scene. Unlike [`Self::retarget_animations_directly`], it automatically /// adds retargetted animations to the specified animation player in the hierarchy of given `root`. /// /// # Panic /// /// Panics if `dest_animation_player` is invalid handle, or the node does not have [`AnimationPlayer`] /// component. fn retarget_animations_to_player( &self, root: Handle<Node>, dest_animation_player: Handle<Node>, graph: &mut Graph, ) -> Vec<Handle<Animation>>; /// Tries to retarget animations from given model resource to a node hierarchy starting /// from `root` on a given scene. Unlike [`Self::retarget_animations_directly`], it automatically /// adds retargetted animations to a first animation player in the hierarchy of given `root`. /// /// # Panic /// /// Panics if there's no animation player in the given hierarchy (descendant nodes of `root`). fn retarget_animations(&self, root: Handle<Node>, graph: &mut Graph) -> Vec<Handle<Animation>>; } impl ModelResourceExtension for ModelResource { fn instantiate_from( model: ModelResource, model_data: &Model, handle: Handle<Node>, dest_graph: &mut Graph, ) -> (Handle<Node>, NodeHandleMap) { let (root, old_to_new) = model_data .scene .graph .copy_node(handle, dest_graph, &mut |_, _| true); // Notify instantiated nodes about resource they were created from. let mut stack = vec![root]; while let Some(node_handle) = stack.pop() { let node = &mut dest_graph[node_handle]; node.resource = Some(model.clone()); // Reset resource instance root flag, this is needed because a node after instantiation cannot // be a root anymore. node.is_resource_instance_root = false; // Reset inheritable properties, so property inheritance system will take properties // from parent objects on resolve stage. node.as_reflect_mut(&mut |node| mark_inheritable_properties_non_modified(node)); // Continue on children. stack.extend_from_slice(node.children()); } // Fill original handles to instances. for (&old, &new) in old_to_new.inner().iter() { dest_graph[new].original_handle_in_resource = old; } dest_graph.update_hierarchical_data_for_descendants(root); (root, old_to_new) } fn instantiate(&self, dest_scene: &mut Scene) -> Handle<Node> { let data = self.data_ref(); let instance_root = Self::instantiate_from( self.clone(), &data, data.scene.graph.get_root(), &mut dest_scene.graph, ) .0; dest_scene.graph[instance_root].is_resource_instance_root = true; std::mem::drop(data); instance_root } fn instantiate_at( &self, scene: &mut Scene, position: Vector3<f32>, orientation: UnitQuaternion<f32>, ) -> Handle<Node> { let root = self.instantiate(scene); scene.graph[root] .local_transform_mut() .set_position(position) .set_rotation(orientation); scene.graph.update_hierarchical_data_for_descendants(root); root } fn retarget_animations_directly(&self, root: Handle<Node>, graph: &Graph) -> Vec<Animation> { let mut retargetted_animations = Vec::new(); let data = self.data_ref(); for src_node_ref in data.scene.graph.linear_iter() { if let Some(src_player) = src_node_ref.query_component_ref::<AnimationPlayer>() { for src_anim in src_player.animations().iter() { let mut anim_copy = src_anim.clone(); // Remap animation track nodes from resource to instance. This is required // because we've made a plain copy and it has tracks with node handles mapped // to nodes of internal scene. for (i, ref_track) in src_anim.tracks().iter().enumerate() { let ref_node = &data.scene.graph[ref_track.target()]; let track = &mut anim_copy.tracks_mut()[i]; // Find instantiated node that corresponds to node in resource match graph.find_by_name(root, ref_node.name()) { Some((instance_node, _)) => { // One-to-one track mapping so there is [i] indexing. track.set_target(instance_node); } None => { track.set_target(Handle::NONE); Log::writeln( MessageKind::Error, format!( "Failed to retarget animation {:?} for node {}", data.path(), ref_node.name() ), ); } } } retargetted_animations.push(anim_copy); } } } retargetted_animations } fn retarget_animations_to_player( &self, root: Handle<Node>, dest_animation_player: Handle<Node>, graph: &mut Graph, ) -> Vec<Handle<Animation>> { let mut animation_handles = Vec::new(); let animations = self.retarget_animations_directly(root, graph); let dest_animation_player = graph[dest_animation_player] .query_component_mut::<AnimationPlayer>() .unwrap(); for animation in animations { animation_handles.push(dest_animation_player.animations_mut().add(animation)); } animation_handles } fn retarget_animations(&self, root: Handle<Node>, graph: &mut Graph) -> Vec<Handle<Animation>> { if let Some((animation_player, _)) = graph.find(root, &mut |n| { n.query_component_ref::<AnimationPlayer>().is_some() }) { self.retarget_animations_to_player(root, animation_player, graph) } else { Default::default() } } } impl ResourceData for Model { fn path(&self) -> Cow<Path> { Cow::Borrowed(&self.path) } fn set_path(&mut self, path: PathBuf) { self.path = path; } fn as_any(&self) -> &dyn Any { self } fn as_any_mut(&mut self) -> &mut dyn Any { self } fn type_uuid(&self) -> Uuid { <Self as TypeUuidProvider>::type_uuid() } } impl Default for Model { fn default() -> Self { Self { path: PathBuf::new(), mapping: NodeMapping::UseNames, scene: Scene::new(), } } } /// Defines a way of searching materials when loading a model resource from foreign file format such as FBX. /// /// # Motivation /// /// Most 3d model file formats store paths to external resources (textures and other things) as absolute paths, /// which makes it impossible to use with "location-independent" application like games. To fix that issue, the /// engine provides few ways of resolving paths to external resources. The engine starts resolving by stripping /// everything but file name from an external resource's path, then it uses one of the following methods to find /// a texture with the file name. It could look up on folders hierarchy by using [`MaterialSearchOptions::RecursiveUp`] /// method, or even use global search starting from the working directory of your game /// ([`MaterialSearchOptions::WorkingDirectory`]) #[derive( Clone, Debug, Visit, PartialEq, Eq, Deserialize, Serialize, Reflect, AsRefStr, EnumString, EnumVariantNames, )] pub enum MaterialSearchOptions { /// Search in specified materials directory. It is suitable for cases when /// your model resource use shared textures. /// /// # Platform specific /// /// Works on every platform. MaterialsDirectory(PathBuf), /// Recursive-up search. It is suitable for cases when textures are placed /// near your model resource. This is **default** option. /// /// # Platform specific /// /// Works on every platform. RecursiveUp, /// Global search starting from working directory. Slowest option with a lot of ambiguities - /// it may load unexpected file in cases when there are two or more files with same name but /// lying in different directories. /// /// # Platform specific /// /// WebAssembly - **not supported** due to lack of file system. WorkingDirectory, /// Try to use paths stored in the model resource directly. This options has limited usage, /// it is suitable to load animations, or any other model which does not have any materials. /// /// # Important notes /// /// RGS (native engine scenes) files should be loaded with this option by default, otherwise /// the engine won't be able to correctly find materials. UsePathDirectly, } impl Default for MaterialSearchOptions { fn default() -> Self { Self::RecursiveUp } } impl MaterialSearchOptions { /// A helper to create MaterialsDirectory variant. pub fn materials_directory<P: AsRef<Path>>(path: P) -> Self {
/// A set of options that will be applied to a model resource when loading it from external source. /// /// # Details /// /// The engine has a convenient way of storing import options in a `.options` files. For example you may /// have a `foo.fbx` 3d model, to change import options create a new file with additional `.options` /// extension: `foo.fbx.options`. The content of an options file could be something like this: /// /// ```text /// ( /// material_search_options: RecursiveUp /// ) /// ``` /// /// Check documentation of the field of the structure for more info about each parameter. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default, Reflect, Eq)] pub struct ModelImportOptions { /// See [`MaterialSearchOptions`] docs for more info. #[serde(default)] pub material_search_options: MaterialSearchOptions, } impl ImportOptions for ModelImportOptions {} /// All possible errors that may occur while trying to load model from some /// data source. #[derive(Debug)] pub enum ModelLoadError { /// An error occurred while reading a data source. Visit(VisitError), /// Format is not supported. NotSupported(String), /// An error occurred while loading FBX file. Fbx(FbxError), } impl Display for ModelLoadError { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { ModelLoadError::Visit(v) => { write!(f, "An error occurred while reading a data source {v:?}") } ModelLoadError::NotSupported(v) => { write!(f, "Model format is not supported: {v}") } ModelLoadError::Fbx(v) => v.fmt(f), } } } impl From<FbxError> for ModelLoadError { fn from(fbx: FbxError) -> Self { ModelLoadError::Fbx(fbx) } } impl From<VisitError> for ModelLoadError { fn from(e: VisitError) -> Self { ModelLoadError::Visit(e) } } impl Model { pub(crate) async fn load<P: AsRef<Path>>( path: P, serialization_context: Arc<SerializationContext>, resource_manager: ResourceManager, model_import_options: ModelImportOptions, ) -> Result<Self, ModelLoadError> { let extension = path .as_ref() .extension() .unwrap_or_default() .to_string_lossy() .as_ref() .to_lowercase(); let (scene, mapping) = match extension.as_ref() { "fbx" => { let mut scene = Scene::new(); if let Some(filename) = path.as_ref().file_name() { let root = scene.graph.get_root(); scene.graph[root].set_name(&filename.to_string_lossy()); } fbx::load_to_scene( &mut scene, resource_manager, path.as_ref(), &model_import_options, ) .await?; // Set NodeMapping::UseNames as mapping here because FBX does not have // any persistent unique ids, and we have to use names. (scene, NodeMapping::UseNames) } // Scene can be used directly as model resource. Such scenes can be created in // Fyroxed. "rgs" => ( SceneLoader::from_file( path.as_ref(), serialization_context, resource_manager.clone(), ) .await? .finish() .await, NodeMapping::UseHandles, ), // TODO: Add more formats. _ => { return Err(ModelLoadError::NotSupported(format!( "Unsupported model resource format: {}", extension ))) } }; Ok(Self { path: path.as_ref().to_owned(), scene, mapping, }) } /// Returns shared reference to internal scene, there is no way to obtain /// mutable reference to inner scene because resource is immutable source /// of data. pub fn get_scene(&self) -> &Scene { &self.scene } /// Searches for a node in the model, starting from specified node using the specified closure. Returns a tuple with a /// handle and a reference to the found node. If nothing is found, it returns [`None`]. pub fn find_node_by_name(&self, name: &str) -> Option<(Handle<Node>, &Node)> { self.scene.graph.find_by_name_from_root(name) } pub(crate) fn get_scene_mut(&mut self) -> &mut Scene { &mut self.scene } }
Self::MaterialsDirectory(path.as_ref().to_path_buf()) } }
random_line_split
mod.rs
#![warn(missing_docs)] //! Contains all data structures and method to work with model resources. //! //! Model is an isolated scene that is used to create copies of its data - this //! process is known as `instantiation`. Isolation in this context means that //! such scene cannot be modified, rendered, etc. It just a data source. //! //! All instances will have references to resource they were created from - this //! will help to get correct vertex and indices buffers when loading a save file, //! loader will just take all needed data from resource so we don't need to store //! such data in save file. Also this mechanism works perfectly when you changing //! resource in external editor (3Ds max, Maya, Blender, etc.) engine will assign //! correct visual data when loading a saved game. //! //! # Supported formats //! //! Currently only FBX (common format in game industry for storing complex 3d models) //! and RGS (native Fyroxed format) formats are supported. use crate::{ animation::Animation, asset::{ manager::ResourceManager, options::ImportOptions, Resource, ResourceData, MODEL_RESOURCE_UUID, }, core::{ algebra::{UnitQuaternion, Vector3}, log::{Log, MessageKind}, pool::Handle, reflect::prelude::*, uuid::Uuid, variable::mark_inheritable_properties_non_modified, visitor::{Visit, VisitError, VisitResult, Visitor}, TypeUuidProvider, }, engine::SerializationContext, resource::fbx::{self, error::FbxError}, scene::{ animation::AnimationPlayer, graph::{map::NodeHandleMap, Graph}, node::Node, Scene, SceneLoader, }, }; use serde::{Deserialize, Serialize}; use std::{ any::Any, borrow::Cow, fmt::{Display, Formatter}, path::{Path, PathBuf}, sync::Arc, }; use strum_macros::{AsRefStr, EnumString, EnumVariantNames}; pub mod loader; #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, Reflect)] #[repr(u32)] pub(crate) enum NodeMapping { UseNames = 0, UseHandles = 1, } /// See module docs. #[derive(Debug, Visit, Reflect)] pub struct Model { pub(crate) path: PathBuf, #[visit(skip)] pub(crate) mapping: NodeMapping, #[visit(skip)] scene: Scene, } impl TypeUuidProvider for Model { fn type_uuid() -> Uuid { MODEL_RESOURCE_UUID } } /// Type alias for model resources. pub type ModelResource = Resource<Model>; /// Extension trait for model resources. pub trait ModelResourceExtension: Sized { /// Tries to instantiate model from given resource. fn instantiate_from( model: ModelResource, model_data: &Model, handle: Handle<Node>, dest_graph: &mut Graph, ) -> (Handle<Node>, NodeHandleMap); /// Tries to instantiate model from given resource. fn instantiate(&self, dest_scene: &mut Scene) -> Handle<Node>; /// Instantiates a prefab and places it at specified position and orientation in global coordinates. fn instantiate_at( &self, scene: &mut Scene, position: Vector3<f32>, orientation: UnitQuaternion<f32>, ) -> Handle<Node>; /// Tries to retarget animations from given model resource to a node hierarchy starting /// from `root` on a given scene. /// /// Animation retargeting allows you to "transfer" animation from a model to a model /// instance on a scene. Imagine you have a character that should have multiple animations /// like idle, run, shoot, walk, etc. and you want to store each animation in a separate /// file. Then when you creating a character on a level you want to have all possible /// animations assigned to a character, this is where this function comes into play: /// you just load a model of your character with skeleton, but without any animations, /// then you load several "models" which have only skeleton with some animation (such /// "models" can be considered as "animation" resources). After this you need to /// instantiate model on your level and retarget all animations you need to that instance /// from other "models". All you have after this is a handle to a model and bunch of /// handles to specific animations. After this animations can be blended in any combinations /// you need to. For example idle animation can be blended with walk animation when your /// character starts walking. /// /// # Notes /// /// Most of the 3d model formats can contain only one animation, so in most cases /// this function will return vector with only one animation. fn retarget_animations_directly(&self, root: Handle<Node>, graph: &Graph) -> Vec<Animation>; /// Tries to retarget animations from given model resource to a node hierarchy starting /// from `root` on a given scene. Unlike [`Self::retarget_animations_directly`], it automatically /// adds retargetted animations to the specified animation player in the hierarchy of given `root`. /// /// # Panic /// /// Panics if `dest_animation_player` is invalid handle, or the node does not have [`AnimationPlayer`] /// component. fn retarget_animations_to_player( &self, root: Handle<Node>, dest_animation_player: Handle<Node>, graph: &mut Graph, ) -> Vec<Handle<Animation>>; /// Tries to retarget animations from given model resource to a node hierarchy starting /// from `root` on a given scene. Unlike [`Self::retarget_animations_directly`], it automatically /// adds retargetted animations to a first animation player in the hierarchy of given `root`. /// /// # Panic /// /// Panics if there's no animation player in the given hierarchy (descendant nodes of `root`). fn retarget_animations(&self, root: Handle<Node>, graph: &mut Graph) -> Vec<Handle<Animation>>; } impl ModelResourceExtension for ModelResource { fn instantiate_from( model: ModelResource, model_data: &Model, handle: Handle<Node>, dest_graph: &mut Graph, ) -> (Handle<Node>, NodeHandleMap) { let (root, old_to_new) = model_data .scene .graph .copy_node(handle, dest_graph, &mut |_, _| true); // Notify instantiated nodes about resource they were created from. let mut stack = vec![root]; while let Some(node_handle) = stack.pop() { let node = &mut dest_graph[node_handle]; node.resource = Some(model.clone()); // Reset resource instance root flag, this is needed because a node after instantiation cannot // be a root anymore. node.is_resource_instance_root = false; // Reset inheritable properties, so property inheritance system will take properties // from parent objects on resolve stage. node.as_reflect_mut(&mut |node| mark_inheritable_properties_non_modified(node)); // Continue on children. stack.extend_from_slice(node.children()); } // Fill original handles to instances. for (&old, &new) in old_to_new.inner().iter() { dest_graph[new].original_handle_in_resource = old; } dest_graph.update_hierarchical_data_for_descendants(root); (root, old_to_new) } fn instantiate(&self, dest_scene: &mut Scene) -> Handle<Node> { let data = self.data_ref(); let instance_root = Self::instantiate_from( self.clone(), &data, data.scene.graph.get_root(), &mut dest_scene.graph, ) .0; dest_scene.graph[instance_root].is_resource_instance_root = true; std::mem::drop(data); instance_root } fn instantiate_at( &self, scene: &mut Scene, position: Vector3<f32>, orientation: UnitQuaternion<f32>, ) -> Handle<Node> { let root = self.instantiate(scene); scene.graph[root] .local_transform_mut() .set_position(position) .set_rotation(orientation); scene.graph.update_hierarchical_data_for_descendants(root); root } fn retarget_animations_directly(&self, root: Handle<Node>, graph: &Graph) -> Vec<Animation> { let mut retargetted_animations = Vec::new(); let data = self.data_ref(); for src_node_ref in data.scene.graph.linear_iter() { if let Some(src_player) = src_node_ref.query_component_ref::<AnimationPlayer>() { for src_anim in src_player.animations().iter() { let mut anim_copy = src_anim.clone(); // Remap animation track nodes from resource to instance. This is required // because we've made a plain copy and it has tracks with node handles mapped // to nodes of internal scene. for (i, ref_track) in src_anim.tracks().iter().enumerate() { let ref_node = &data.scene.graph[ref_track.target()]; let track = &mut anim_copy.tracks_mut()[i]; // Find instantiated node that corresponds to node in resource match graph.find_by_name(root, ref_node.name()) { Some((instance_node, _)) => { // One-to-one track mapping so there is [i] indexing. track.set_target(instance_node); } None => { track.set_target(Handle::NONE); Log::writeln( MessageKind::Error, format!( "Failed to retarget animation {:?} for node {}", data.path(), ref_node.name() ), ); } } } retargetted_animations.push(anim_copy); } } } retargetted_animations } fn retarget_animations_to_player( &self, root: Handle<Node>, dest_animation_player: Handle<Node>, graph: &mut Graph, ) -> Vec<Handle<Animation>> { let mut animation_handles = Vec::new(); let animations = self.retarget_animations_directly(root, graph); let dest_animation_player = graph[dest_animation_player] .query_component_mut::<AnimationPlayer>() .unwrap(); for animation in animations { animation_handles.push(dest_animation_player.animations_mut().add(animation)); } animation_handles } fn retarget_animations(&self, root: Handle<Node>, graph: &mut Graph) -> Vec<Handle<Animation>> { if let Some((animation_player, _)) = graph.find(root, &mut |n| { n.query_component_ref::<AnimationPlayer>().is_some() }) { self.retarget_animations_to_player(root, animation_player, graph) } else { Default::default() } } } impl ResourceData for Model { fn path(&self) -> Cow<Path> { Cow::Borrowed(&self.path) } fn set_path(&mut self, path: PathBuf) { self.path = path; } fn as_any(&self) -> &dyn Any { self } fn as_any_mut(&mut self) -> &mut dyn Any { self } fn type_uuid(&self) -> Uuid { <Self as TypeUuidProvider>::type_uuid() } } impl Default for Model { fn default() -> Self { Self { path: PathBuf::new(), mapping: NodeMapping::UseNames, scene: Scene::new(), } } } /// Defines a way of searching materials when loading a model resource from foreign file format such as FBX. /// /// # Motivation /// /// Most 3d model file formats store paths to external resources (textures and other things) as absolute paths, /// which makes it impossible to use with "location-independent" application like games. To fix that issue, the /// engine provides few ways of resolving paths to external resources. The engine starts resolving by stripping /// everything but file name from an external resource's path, then it uses one of the following methods to find /// a texture with the file name. It could look up on folders hierarchy by using [`MaterialSearchOptions::RecursiveUp`] /// method, or even use global search starting from the working directory of your game /// ([`MaterialSearchOptions::WorkingDirectory`]) #[derive( Clone, Debug, Visit, PartialEq, Eq, Deserialize, Serialize, Reflect, AsRefStr, EnumString, EnumVariantNames, )] pub enum MaterialSearchOptions { /// Search in specified materials directory. It is suitable for cases when /// your model resource use shared textures. /// /// # Platform specific /// /// Works on every platform. MaterialsDirectory(PathBuf), /// Recursive-up search. It is suitable for cases when textures are placed /// near your model resource. This is **default** option. /// /// # Platform specific /// /// Works on every platform. RecursiveUp, /// Global search starting from working directory. Slowest option with a lot of ambiguities - /// it may load unexpected file in cases when there are two or more files with same name but /// lying in different directories. /// /// # Platform specific /// /// WebAssembly - **not supported** due to lack of file system. WorkingDirectory, /// Try to use paths stored in the model resource directly. This options has limited usage, /// it is suitable to load animations, or any other model which does not have any materials. /// /// # Important notes /// /// RGS (native engine scenes) files should be loaded with this option by default, otherwise /// the engine won't be able to correctly find materials. UsePathDirectly, } impl Default for MaterialSearchOptions { fn default() -> Self { Self::RecursiveUp } } impl MaterialSearchOptions { /// A helper to create MaterialsDirectory variant. pub fn materials_directory<P: AsRef<Path>>(path: P) -> Self { Self::MaterialsDirectory(path.as_ref().to_path_buf()) } } /// A set of options that will be applied to a model resource when loading it from external source. /// /// # Details /// /// The engine has a convenient way of storing import options in a `.options` files. For example you may /// have a `foo.fbx` 3d model, to change import options create a new file with additional `.options` /// extension: `foo.fbx.options`. The content of an options file could be something like this: /// /// ```text /// ( /// material_search_options: RecursiveUp /// ) /// ``` /// /// Check documentation of the field of the structure for more info about each parameter. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default, Reflect, Eq)] pub struct ModelImportOptions { /// See [`MaterialSearchOptions`] docs for more info. #[serde(default)] pub material_search_options: MaterialSearchOptions, } impl ImportOptions for ModelImportOptions {} /// All possible errors that may occur while trying to load model from some /// data source. #[derive(Debug)] pub enum ModelLoadError { /// An error occurred while reading a data source. Visit(VisitError), /// Format is not supported. NotSupported(String), /// An error occurred while loading FBX file. Fbx(FbxError), } impl Display for ModelLoadError { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { ModelLoadError::Visit(v) =>
ModelLoadError::NotSupported(v) => { write!(f, "Model format is not supported: {v}") } ModelLoadError::Fbx(v) => v.fmt(f), } } } impl From<FbxError> for ModelLoadError { fn from(fbx: FbxError) -> Self { ModelLoadError::Fbx(fbx) } } impl From<VisitError> for ModelLoadError { fn from(e: VisitError) -> Self { ModelLoadError::Visit(e) } } impl Model { pub(crate) async fn load<P: AsRef<Path>>( path: P, serialization_context: Arc<SerializationContext>, resource_manager: ResourceManager, model_import_options: ModelImportOptions, ) -> Result<Self, ModelLoadError> { let extension = path .as_ref() .extension() .unwrap_or_default() .to_string_lossy() .as_ref() .to_lowercase(); let (scene, mapping) = match extension.as_ref() { "fbx" => { let mut scene = Scene::new(); if let Some(filename) = path.as_ref().file_name() { let root = scene.graph.get_root(); scene.graph[root].set_name(&filename.to_string_lossy()); } fbx::load_to_scene( &mut scene, resource_manager, path.as_ref(), &model_import_options, ) .await?; // Set NodeMapping::UseNames as mapping here because FBX does not have // any persistent unique ids, and we have to use names. (scene, NodeMapping::UseNames) } // Scene can be used directly as model resource. Such scenes can be created in // Fyroxed. "rgs" => ( SceneLoader::from_file( path.as_ref(), serialization_context, resource_manager.clone(), ) .await? .finish() .await, NodeMapping::UseHandles, ), // TODO: Add more formats. _ => { return Err(ModelLoadError::NotSupported(format!( "Unsupported model resource format: {}", extension ))) } }; Ok(Self { path: path.as_ref().to_owned(), scene, mapping, }) } /// Returns shared reference to internal scene, there is no way to obtain /// mutable reference to inner scene because resource is immutable source /// of data. pub fn get_scene(&self) -> &Scene { &self.scene } /// Searches for a node in the model, starting from specified node using the specified closure. Returns a tuple with a /// handle and a reference to the found node. If nothing is found, it returns [`None`]. pub fn find_node_by_name(&self, name: &str) -> Option<(Handle<Node>, &Node)> { self.scene.graph.find_by_name_from_root(name) } pub(crate) fn get_scene_mut(&mut self) -> &mut Scene { &mut self.scene } }
{ write!(f, "An error occurred while reading a data source {v:?}") }
conditional_block
mod.rs
#![warn(missing_docs)] //! Contains all data structures and method to work with model resources. //! //! Model is an isolated scene that is used to create copies of its data - this //! process is known as `instantiation`. Isolation in this context means that //! such scene cannot be modified, rendered, etc. It just a data source. //! //! All instances will have references to resource they were created from - this //! will help to get correct vertex and indices buffers when loading a save file, //! loader will just take all needed data from resource so we don't need to store //! such data in save file. Also this mechanism works perfectly when you changing //! resource in external editor (3Ds max, Maya, Blender, etc.) engine will assign //! correct visual data when loading a saved game. //! //! # Supported formats //! //! Currently only FBX (common format in game industry for storing complex 3d models) //! and RGS (native Fyroxed format) formats are supported. use crate::{ animation::Animation, asset::{ manager::ResourceManager, options::ImportOptions, Resource, ResourceData, MODEL_RESOURCE_UUID, }, core::{ algebra::{UnitQuaternion, Vector3}, log::{Log, MessageKind}, pool::Handle, reflect::prelude::*, uuid::Uuid, variable::mark_inheritable_properties_non_modified, visitor::{Visit, VisitError, VisitResult, Visitor}, TypeUuidProvider, }, engine::SerializationContext, resource::fbx::{self, error::FbxError}, scene::{ animation::AnimationPlayer, graph::{map::NodeHandleMap, Graph}, node::Node, Scene, SceneLoader, }, }; use serde::{Deserialize, Serialize}; use std::{ any::Any, borrow::Cow, fmt::{Display, Formatter}, path::{Path, PathBuf}, sync::Arc, }; use strum_macros::{AsRefStr, EnumString, EnumVariantNames}; pub mod loader; #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, Reflect)] #[repr(u32)] pub(crate) enum NodeMapping { UseNames = 0, UseHandles = 1, } /// See module docs. #[derive(Debug, Visit, Reflect)] pub struct Model { pub(crate) path: PathBuf, #[visit(skip)] pub(crate) mapping: NodeMapping, #[visit(skip)] scene: Scene, } impl TypeUuidProvider for Model { fn type_uuid() -> Uuid { MODEL_RESOURCE_UUID } } /// Type alias for model resources. pub type ModelResource = Resource<Model>; /// Extension trait for model resources. pub trait ModelResourceExtension: Sized { /// Tries to instantiate model from given resource. fn instantiate_from( model: ModelResource, model_data: &Model, handle: Handle<Node>, dest_graph: &mut Graph, ) -> (Handle<Node>, NodeHandleMap); /// Tries to instantiate model from given resource. fn instantiate(&self, dest_scene: &mut Scene) -> Handle<Node>; /// Instantiates a prefab and places it at specified position and orientation in global coordinates. fn instantiate_at( &self, scene: &mut Scene, position: Vector3<f32>, orientation: UnitQuaternion<f32>, ) -> Handle<Node>; /// Tries to retarget animations from given model resource to a node hierarchy starting /// from `root` on a given scene. /// /// Animation retargeting allows you to "transfer" animation from a model to a model /// instance on a scene. Imagine you have a character that should have multiple animations /// like idle, run, shoot, walk, etc. and you want to store each animation in a separate /// file. Then when you creating a character on a level you want to have all possible /// animations assigned to a character, this is where this function comes into play: /// you just load a model of your character with skeleton, but without any animations, /// then you load several "models" which have only skeleton with some animation (such /// "models" can be considered as "animation" resources). After this you need to /// instantiate model on your level and retarget all animations you need to that instance /// from other "models". All you have after this is a handle to a model and bunch of /// handles to specific animations. After this animations can be blended in any combinations /// you need to. For example idle animation can be blended with walk animation when your /// character starts walking. /// /// # Notes /// /// Most of the 3d model formats can contain only one animation, so in most cases /// this function will return vector with only one animation. fn retarget_animations_directly(&self, root: Handle<Node>, graph: &Graph) -> Vec<Animation>; /// Tries to retarget animations from given model resource to a node hierarchy starting /// from `root` on a given scene. Unlike [`Self::retarget_animations_directly`], it automatically /// adds retargetted animations to the specified animation player in the hierarchy of given `root`. /// /// # Panic /// /// Panics if `dest_animation_player` is invalid handle, or the node does not have [`AnimationPlayer`] /// component. fn retarget_animations_to_player( &self, root: Handle<Node>, dest_animation_player: Handle<Node>, graph: &mut Graph, ) -> Vec<Handle<Animation>>; /// Tries to retarget animations from given model resource to a node hierarchy starting /// from `root` on a given scene. Unlike [`Self::retarget_animations_directly`], it automatically /// adds retargetted animations to a first animation player in the hierarchy of given `root`. /// /// # Panic /// /// Panics if there's no animation player in the given hierarchy (descendant nodes of `root`). fn retarget_animations(&self, root: Handle<Node>, graph: &mut Graph) -> Vec<Handle<Animation>>; } impl ModelResourceExtension for ModelResource { fn instantiate_from( model: ModelResource, model_data: &Model, handle: Handle<Node>, dest_graph: &mut Graph, ) -> (Handle<Node>, NodeHandleMap) { let (root, old_to_new) = model_data .scene .graph .copy_node(handle, dest_graph, &mut |_, _| true); // Notify instantiated nodes about resource they were created from. let mut stack = vec![root]; while let Some(node_handle) = stack.pop() { let node = &mut dest_graph[node_handle]; node.resource = Some(model.clone()); // Reset resource instance root flag, this is needed because a node after instantiation cannot // be a root anymore. node.is_resource_instance_root = false; // Reset inheritable properties, so property inheritance system will take properties // from parent objects on resolve stage. node.as_reflect_mut(&mut |node| mark_inheritable_properties_non_modified(node)); // Continue on children. stack.extend_from_slice(node.children()); } // Fill original handles to instances. for (&old, &new) in old_to_new.inner().iter() { dest_graph[new].original_handle_in_resource = old; } dest_graph.update_hierarchical_data_for_descendants(root); (root, old_to_new) } fn instantiate(&self, dest_scene: &mut Scene) -> Handle<Node> { let data = self.data_ref(); let instance_root = Self::instantiate_from( self.clone(), &data, data.scene.graph.get_root(), &mut dest_scene.graph, ) .0; dest_scene.graph[instance_root].is_resource_instance_root = true; std::mem::drop(data); instance_root } fn instantiate_at( &self, scene: &mut Scene, position: Vector3<f32>, orientation: UnitQuaternion<f32>, ) -> Handle<Node> { let root = self.instantiate(scene); scene.graph[root] .local_transform_mut() .set_position(position) .set_rotation(orientation); scene.graph.update_hierarchical_data_for_descendants(root); root } fn retarget_animations_directly(&self, root: Handle<Node>, graph: &Graph) -> Vec<Animation> { let mut retargetted_animations = Vec::new(); let data = self.data_ref(); for src_node_ref in data.scene.graph.linear_iter() { if let Some(src_player) = src_node_ref.query_component_ref::<AnimationPlayer>() { for src_anim in src_player.animations().iter() { let mut anim_copy = src_anim.clone(); // Remap animation track nodes from resource to instance. This is required // because we've made a plain copy and it has tracks with node handles mapped // to nodes of internal scene. for (i, ref_track) in src_anim.tracks().iter().enumerate() { let ref_node = &data.scene.graph[ref_track.target()]; let track = &mut anim_copy.tracks_mut()[i]; // Find instantiated node that corresponds to node in resource match graph.find_by_name(root, ref_node.name()) { Some((instance_node, _)) => { // One-to-one track mapping so there is [i] indexing. track.set_target(instance_node); } None => { track.set_target(Handle::NONE); Log::writeln( MessageKind::Error, format!( "Failed to retarget animation {:?} for node {}", data.path(), ref_node.name() ), ); } } } retargetted_animations.push(anim_copy); } } } retargetted_animations } fn retarget_animations_to_player( &self, root: Handle<Node>, dest_animation_player: Handle<Node>, graph: &mut Graph, ) -> Vec<Handle<Animation>> { let mut animation_handles = Vec::new(); let animations = self.retarget_animations_directly(root, graph); let dest_animation_player = graph[dest_animation_player] .query_component_mut::<AnimationPlayer>() .unwrap(); for animation in animations { animation_handles.push(dest_animation_player.animations_mut().add(animation)); } animation_handles } fn retarget_animations(&self, root: Handle<Node>, graph: &mut Graph) -> Vec<Handle<Animation>> { if let Some((animation_player, _)) = graph.find(root, &mut |n| { n.query_component_ref::<AnimationPlayer>().is_some() }) { self.retarget_animations_to_player(root, animation_player, graph) } else { Default::default() } } } impl ResourceData for Model { fn path(&self) -> Cow<Path> { Cow::Borrowed(&self.path) } fn set_path(&mut self, path: PathBuf) { self.path = path; } fn as_any(&self) -> &dyn Any { self } fn as_any_mut(&mut self) -> &mut dyn Any { self } fn type_uuid(&self) -> Uuid { <Self as TypeUuidProvider>::type_uuid() } } impl Default for Model { fn default() -> Self { Self { path: PathBuf::new(), mapping: NodeMapping::UseNames, scene: Scene::new(), } } } /// Defines a way of searching materials when loading a model resource from foreign file format such as FBX. /// /// # Motivation /// /// Most 3d model file formats store paths to external resources (textures and other things) as absolute paths, /// which makes it impossible to use with "location-independent" application like games. To fix that issue, the /// engine provides few ways of resolving paths to external resources. The engine starts resolving by stripping /// everything but file name from an external resource's path, then it uses one of the following methods to find /// a texture with the file name. It could look up on folders hierarchy by using [`MaterialSearchOptions::RecursiveUp`] /// method, or even use global search starting from the working directory of your game /// ([`MaterialSearchOptions::WorkingDirectory`]) #[derive( Clone, Debug, Visit, PartialEq, Eq, Deserialize, Serialize, Reflect, AsRefStr, EnumString, EnumVariantNames, )] pub enum MaterialSearchOptions { /// Search in specified materials directory. It is suitable for cases when /// your model resource use shared textures. /// /// # Platform specific /// /// Works on every platform. MaterialsDirectory(PathBuf), /// Recursive-up search. It is suitable for cases when textures are placed /// near your model resource. This is **default** option. /// /// # Platform specific /// /// Works on every platform. RecursiveUp, /// Global search starting from working directory. Slowest option with a lot of ambiguities - /// it may load unexpected file in cases when there are two or more files with same name but /// lying in different directories. /// /// # Platform specific /// /// WebAssembly - **not supported** due to lack of file system. WorkingDirectory, /// Try to use paths stored in the model resource directly. This options has limited usage, /// it is suitable to load animations, or any other model which does not have any materials. /// /// # Important notes /// /// RGS (native engine scenes) files should be loaded with this option by default, otherwise /// the engine won't be able to correctly find materials. UsePathDirectly, } impl Default for MaterialSearchOptions { fn default() -> Self { Self::RecursiveUp } } impl MaterialSearchOptions { /// A helper to create MaterialsDirectory variant. pub fn materials_directory<P: AsRef<Path>>(path: P) -> Self { Self::MaterialsDirectory(path.as_ref().to_path_buf()) } } /// A set of options that will be applied to a model resource when loading it from external source. /// /// # Details /// /// The engine has a convenient way of storing import options in a `.options` files. For example you may /// have a `foo.fbx` 3d model, to change import options create a new file with additional `.options` /// extension: `foo.fbx.options`. The content of an options file could be something like this: /// /// ```text /// ( /// material_search_options: RecursiveUp /// ) /// ``` /// /// Check documentation of the field of the structure for more info about each parameter. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default, Reflect, Eq)] pub struct ModelImportOptions { /// See [`MaterialSearchOptions`] docs for more info. #[serde(default)] pub material_search_options: MaterialSearchOptions, } impl ImportOptions for ModelImportOptions {} /// All possible errors that may occur while trying to load model from some /// data source. #[derive(Debug)] pub enum ModelLoadError { /// An error occurred while reading a data source. Visit(VisitError), /// Format is not supported. NotSupported(String), /// An error occurred while loading FBX file. Fbx(FbxError), } impl Display for ModelLoadError { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { ModelLoadError::Visit(v) => { write!(f, "An error occurred while reading a data source {v:?}") } ModelLoadError::NotSupported(v) => { write!(f, "Model format is not supported: {v}") } ModelLoadError::Fbx(v) => v.fmt(f), } } } impl From<FbxError> for ModelLoadError { fn from(fbx: FbxError) -> Self { ModelLoadError::Fbx(fbx) } } impl From<VisitError> for ModelLoadError { fn from(e: VisitError) -> Self
} impl Model { pub(crate) async fn load<P: AsRef<Path>>( path: P, serialization_context: Arc<SerializationContext>, resource_manager: ResourceManager, model_import_options: ModelImportOptions, ) -> Result<Self, ModelLoadError> { let extension = path .as_ref() .extension() .unwrap_or_default() .to_string_lossy() .as_ref() .to_lowercase(); let (scene, mapping) = match extension.as_ref() { "fbx" => { let mut scene = Scene::new(); if let Some(filename) = path.as_ref().file_name() { let root = scene.graph.get_root(); scene.graph[root].set_name(&filename.to_string_lossy()); } fbx::load_to_scene( &mut scene, resource_manager, path.as_ref(), &model_import_options, ) .await?; // Set NodeMapping::UseNames as mapping here because FBX does not have // any persistent unique ids, and we have to use names. (scene, NodeMapping::UseNames) } // Scene can be used directly as model resource. Such scenes can be created in // Fyroxed. "rgs" => ( SceneLoader::from_file( path.as_ref(), serialization_context, resource_manager.clone(), ) .await? .finish() .await, NodeMapping::UseHandles, ), // TODO: Add more formats. _ => { return Err(ModelLoadError::NotSupported(format!( "Unsupported model resource format: {}", extension ))) } }; Ok(Self { path: path.as_ref().to_owned(), scene, mapping, }) } /// Returns shared reference to internal scene, there is no way to obtain /// mutable reference to inner scene because resource is immutable source /// of data. pub fn get_scene(&self) -> &Scene { &self.scene } /// Searches for a node in the model, starting from specified node using the specified closure. Returns a tuple with a /// handle and a reference to the found node. If nothing is found, it returns [`None`]. pub fn find_node_by_name(&self, name: &str) -> Option<(Handle<Node>, &Node)> { self.scene.graph.find_by_name_from_root(name) } pub(crate) fn get_scene_mut(&mut self) -> &mut Scene { &mut self.scene } }
{ ModelLoadError::Visit(e) }
identifier_body
mod.rs
#![warn(missing_docs)] //! Contains all data structures and method to work with model resources. //! //! Model is an isolated scene that is used to create copies of its data - this //! process is known as `instantiation`. Isolation in this context means that //! such scene cannot be modified, rendered, etc. It just a data source. //! //! All instances will have references to resource they were created from - this //! will help to get correct vertex and indices buffers when loading a save file, //! loader will just take all needed data from resource so we don't need to store //! such data in save file. Also this mechanism works perfectly when you changing //! resource in external editor (3Ds max, Maya, Blender, etc.) engine will assign //! correct visual data when loading a saved game. //! //! # Supported formats //! //! Currently only FBX (common format in game industry for storing complex 3d models) //! and RGS (native Fyroxed format) formats are supported. use crate::{ animation::Animation, asset::{ manager::ResourceManager, options::ImportOptions, Resource, ResourceData, MODEL_RESOURCE_UUID, }, core::{ algebra::{UnitQuaternion, Vector3}, log::{Log, MessageKind}, pool::Handle, reflect::prelude::*, uuid::Uuid, variable::mark_inheritable_properties_non_modified, visitor::{Visit, VisitError, VisitResult, Visitor}, TypeUuidProvider, }, engine::SerializationContext, resource::fbx::{self, error::FbxError}, scene::{ animation::AnimationPlayer, graph::{map::NodeHandleMap, Graph}, node::Node, Scene, SceneLoader, }, }; use serde::{Deserialize, Serialize}; use std::{ any::Any, borrow::Cow, fmt::{Display, Formatter}, path::{Path, PathBuf}, sync::Arc, }; use strum_macros::{AsRefStr, EnumString, EnumVariantNames}; pub mod loader; #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, Reflect)] #[repr(u32)] pub(crate) enum NodeMapping { UseNames = 0, UseHandles = 1, } /// See module docs. #[derive(Debug, Visit, Reflect)] pub struct
{ pub(crate) path: PathBuf, #[visit(skip)] pub(crate) mapping: NodeMapping, #[visit(skip)] scene: Scene, } impl TypeUuidProvider for Model { fn type_uuid() -> Uuid { MODEL_RESOURCE_UUID } } /// Type alias for model resources. pub type ModelResource = Resource<Model>; /// Extension trait for model resources. pub trait ModelResourceExtension: Sized { /// Tries to instantiate model from given resource. fn instantiate_from( model: ModelResource, model_data: &Model, handle: Handle<Node>, dest_graph: &mut Graph, ) -> (Handle<Node>, NodeHandleMap); /// Tries to instantiate model from given resource. fn instantiate(&self, dest_scene: &mut Scene) -> Handle<Node>; /// Instantiates a prefab and places it at specified position and orientation in global coordinates. fn instantiate_at( &self, scene: &mut Scene, position: Vector3<f32>, orientation: UnitQuaternion<f32>, ) -> Handle<Node>; /// Tries to retarget animations from given model resource to a node hierarchy starting /// from `root` on a given scene. /// /// Animation retargeting allows you to "transfer" animation from a model to a model /// instance on a scene. Imagine you have a character that should have multiple animations /// like idle, run, shoot, walk, etc. and you want to store each animation in a separate /// file. Then when you creating a character on a level you want to have all possible /// animations assigned to a character, this is where this function comes into play: /// you just load a model of your character with skeleton, but without any animations, /// then you load several "models" which have only skeleton with some animation (such /// "models" can be considered as "animation" resources). After this you need to /// instantiate model on your level and retarget all animations you need to that instance /// from other "models". All you have after this is a handle to a model and bunch of /// handles to specific animations. After this animations can be blended in any combinations /// you need to. For example idle animation can be blended with walk animation when your /// character starts walking. /// /// # Notes /// /// Most of the 3d model formats can contain only one animation, so in most cases /// this function will return vector with only one animation. fn retarget_animations_directly(&self, root: Handle<Node>, graph: &Graph) -> Vec<Animation>; /// Tries to retarget animations from given model resource to a node hierarchy starting /// from `root` on a given scene. Unlike [`Self::retarget_animations_directly`], it automatically /// adds retargetted animations to the specified animation player in the hierarchy of given `root`. /// /// # Panic /// /// Panics if `dest_animation_player` is invalid handle, or the node does not have [`AnimationPlayer`] /// component. fn retarget_animations_to_player( &self, root: Handle<Node>, dest_animation_player: Handle<Node>, graph: &mut Graph, ) -> Vec<Handle<Animation>>; /// Tries to retarget animations from given model resource to a node hierarchy starting /// from `root` on a given scene. Unlike [`Self::retarget_animations_directly`], it automatically /// adds retargetted animations to a first animation player in the hierarchy of given `root`. /// /// # Panic /// /// Panics if there's no animation player in the given hierarchy (descendant nodes of `root`). fn retarget_animations(&self, root: Handle<Node>, graph: &mut Graph) -> Vec<Handle<Animation>>; } impl ModelResourceExtension for ModelResource { fn instantiate_from( model: ModelResource, model_data: &Model, handle: Handle<Node>, dest_graph: &mut Graph, ) -> (Handle<Node>, NodeHandleMap) { let (root, old_to_new) = model_data .scene .graph .copy_node(handle, dest_graph, &mut |_, _| true); // Notify instantiated nodes about resource they were created from. let mut stack = vec![root]; while let Some(node_handle) = stack.pop() { let node = &mut dest_graph[node_handle]; node.resource = Some(model.clone()); // Reset resource instance root flag, this is needed because a node after instantiation cannot // be a root anymore. node.is_resource_instance_root = false; // Reset inheritable properties, so property inheritance system will take properties // from parent objects on resolve stage. node.as_reflect_mut(&mut |node| mark_inheritable_properties_non_modified(node)); // Continue on children. stack.extend_from_slice(node.children()); } // Fill original handles to instances. for (&old, &new) in old_to_new.inner().iter() { dest_graph[new].original_handle_in_resource = old; } dest_graph.update_hierarchical_data_for_descendants(root); (root, old_to_new) } fn instantiate(&self, dest_scene: &mut Scene) -> Handle<Node> { let data = self.data_ref(); let instance_root = Self::instantiate_from( self.clone(), &data, data.scene.graph.get_root(), &mut dest_scene.graph, ) .0; dest_scene.graph[instance_root].is_resource_instance_root = true; std::mem::drop(data); instance_root } fn instantiate_at( &self, scene: &mut Scene, position: Vector3<f32>, orientation: UnitQuaternion<f32>, ) -> Handle<Node> { let root = self.instantiate(scene); scene.graph[root] .local_transform_mut() .set_position(position) .set_rotation(orientation); scene.graph.update_hierarchical_data_for_descendants(root); root } fn retarget_animations_directly(&self, root: Handle<Node>, graph: &Graph) -> Vec<Animation> { let mut retargetted_animations = Vec::new(); let data = self.data_ref(); for src_node_ref in data.scene.graph.linear_iter() { if let Some(src_player) = src_node_ref.query_component_ref::<AnimationPlayer>() { for src_anim in src_player.animations().iter() { let mut anim_copy = src_anim.clone(); // Remap animation track nodes from resource to instance. This is required // because we've made a plain copy and it has tracks with node handles mapped // to nodes of internal scene. for (i, ref_track) in src_anim.tracks().iter().enumerate() { let ref_node = &data.scene.graph[ref_track.target()]; let track = &mut anim_copy.tracks_mut()[i]; // Find instantiated node that corresponds to node in resource match graph.find_by_name(root, ref_node.name()) { Some((instance_node, _)) => { // One-to-one track mapping so there is [i] indexing. track.set_target(instance_node); } None => { track.set_target(Handle::NONE); Log::writeln( MessageKind::Error, format!( "Failed to retarget animation {:?} for node {}", data.path(), ref_node.name() ), ); } } } retargetted_animations.push(anim_copy); } } } retargetted_animations } fn retarget_animations_to_player( &self, root: Handle<Node>, dest_animation_player: Handle<Node>, graph: &mut Graph, ) -> Vec<Handle<Animation>> { let mut animation_handles = Vec::new(); let animations = self.retarget_animations_directly(root, graph); let dest_animation_player = graph[dest_animation_player] .query_component_mut::<AnimationPlayer>() .unwrap(); for animation in animations { animation_handles.push(dest_animation_player.animations_mut().add(animation)); } animation_handles } fn retarget_animations(&self, root: Handle<Node>, graph: &mut Graph) -> Vec<Handle<Animation>> { if let Some((animation_player, _)) = graph.find(root, &mut |n| { n.query_component_ref::<AnimationPlayer>().is_some() }) { self.retarget_animations_to_player(root, animation_player, graph) } else { Default::default() } } } impl ResourceData for Model { fn path(&self) -> Cow<Path> { Cow::Borrowed(&self.path) } fn set_path(&mut self, path: PathBuf) { self.path = path; } fn as_any(&self) -> &dyn Any { self } fn as_any_mut(&mut self) -> &mut dyn Any { self } fn type_uuid(&self) -> Uuid { <Self as TypeUuidProvider>::type_uuid() } } impl Default for Model { fn default() -> Self { Self { path: PathBuf::new(), mapping: NodeMapping::UseNames, scene: Scene::new(), } } } /// Defines a way of searching materials when loading a model resource from foreign file format such as FBX. /// /// # Motivation /// /// Most 3d model file formats store paths to external resources (textures and other things) as absolute paths, /// which makes it impossible to use with "location-independent" application like games. To fix that issue, the /// engine provides few ways of resolving paths to external resources. The engine starts resolving by stripping /// everything but file name from an external resource's path, then it uses one of the following methods to find /// a texture with the file name. It could look up on folders hierarchy by using [`MaterialSearchOptions::RecursiveUp`] /// method, or even use global search starting from the working directory of your game /// ([`MaterialSearchOptions::WorkingDirectory`]) #[derive( Clone, Debug, Visit, PartialEq, Eq, Deserialize, Serialize, Reflect, AsRefStr, EnumString, EnumVariantNames, )] pub enum MaterialSearchOptions { /// Search in specified materials directory. It is suitable for cases when /// your model resource use shared textures. /// /// # Platform specific /// /// Works on every platform. MaterialsDirectory(PathBuf), /// Recursive-up search. It is suitable for cases when textures are placed /// near your model resource. This is **default** option. /// /// # Platform specific /// /// Works on every platform. RecursiveUp, /// Global search starting from working directory. Slowest option with a lot of ambiguities - /// it may load unexpected file in cases when there are two or more files with same name but /// lying in different directories. /// /// # Platform specific /// /// WebAssembly - **not supported** due to lack of file system. WorkingDirectory, /// Try to use paths stored in the model resource directly. This options has limited usage, /// it is suitable to load animations, or any other model which does not have any materials. /// /// # Important notes /// /// RGS (native engine scenes) files should be loaded with this option by default, otherwise /// the engine won't be able to correctly find materials. UsePathDirectly, } impl Default for MaterialSearchOptions { fn default() -> Self { Self::RecursiveUp } } impl MaterialSearchOptions { /// A helper to create MaterialsDirectory variant. pub fn materials_directory<P: AsRef<Path>>(path: P) -> Self { Self::MaterialsDirectory(path.as_ref().to_path_buf()) } } /// A set of options that will be applied to a model resource when loading it from external source. /// /// # Details /// /// The engine has a convenient way of storing import options in a `.options` files. For example you may /// have a `foo.fbx` 3d model, to change import options create a new file with additional `.options` /// extension: `foo.fbx.options`. The content of an options file could be something like this: /// /// ```text /// ( /// material_search_options: RecursiveUp /// ) /// ``` /// /// Check documentation of the field of the structure for more info about each parameter. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default, Reflect, Eq)] pub struct ModelImportOptions { /// See [`MaterialSearchOptions`] docs for more info. #[serde(default)] pub material_search_options: MaterialSearchOptions, } impl ImportOptions for ModelImportOptions {} /// All possible errors that may occur while trying to load model from some /// data source. #[derive(Debug)] pub enum ModelLoadError { /// An error occurred while reading a data source. Visit(VisitError), /// Format is not supported. NotSupported(String), /// An error occurred while loading FBX file. Fbx(FbxError), } impl Display for ModelLoadError { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { ModelLoadError::Visit(v) => { write!(f, "An error occurred while reading a data source {v:?}") } ModelLoadError::NotSupported(v) => { write!(f, "Model format is not supported: {v}") } ModelLoadError::Fbx(v) => v.fmt(f), } } } impl From<FbxError> for ModelLoadError { fn from(fbx: FbxError) -> Self { ModelLoadError::Fbx(fbx) } } impl From<VisitError> for ModelLoadError { fn from(e: VisitError) -> Self { ModelLoadError::Visit(e) } } impl Model { pub(crate) async fn load<P: AsRef<Path>>( path: P, serialization_context: Arc<SerializationContext>, resource_manager: ResourceManager, model_import_options: ModelImportOptions, ) -> Result<Self, ModelLoadError> { let extension = path .as_ref() .extension() .unwrap_or_default() .to_string_lossy() .as_ref() .to_lowercase(); let (scene, mapping) = match extension.as_ref() { "fbx" => { let mut scene = Scene::new(); if let Some(filename) = path.as_ref().file_name() { let root = scene.graph.get_root(); scene.graph[root].set_name(&filename.to_string_lossy()); } fbx::load_to_scene( &mut scene, resource_manager, path.as_ref(), &model_import_options, ) .await?; // Set NodeMapping::UseNames as mapping here because FBX does not have // any persistent unique ids, and we have to use names. (scene, NodeMapping::UseNames) } // Scene can be used directly as model resource. Such scenes can be created in // Fyroxed. "rgs" => ( SceneLoader::from_file( path.as_ref(), serialization_context, resource_manager.clone(), ) .await? .finish() .await, NodeMapping::UseHandles, ), // TODO: Add more formats. _ => { return Err(ModelLoadError::NotSupported(format!( "Unsupported model resource format: {}", extension ))) } }; Ok(Self { path: path.as_ref().to_owned(), scene, mapping, }) } /// Returns shared reference to internal scene, there is no way to obtain /// mutable reference to inner scene because resource is immutable source /// of data. pub fn get_scene(&self) -> &Scene { &self.scene } /// Searches for a node in the model, starting from specified node using the specified closure. Returns a tuple with a /// handle and a reference to the found node. If nothing is found, it returns [`None`]. pub fn find_node_by_name(&self, name: &str) -> Option<(Handle<Node>, &Node)> { self.scene.graph.find_by_name_from_root(name) } pub(crate) fn get_scene_mut(&mut self) -> &mut Scene { &mut self.scene } }
Model
identifier_name
io.rs
//! Persistent storage backend for blocks. use std::collections::VecDeque; use std::fs; use std::io::{self, Read, Seek, Write}; use std::iter; use std::mem; use std::path::Path; use nakamoto_common::bitcoin::consensus::encode::{Decodable, Encodable}; use nakamoto_common::block::store::{Error, Store}; use nakamoto_common::block::Height; /// Append a block to the end of the stream. fn put<H: Sized + Encodable, S: Seek + Write, I: Iterator<Item = H>>( mut stream: S, headers: I, ) -> Result<Height, Error> { let mut pos = stream.seek(io::SeekFrom::End(0))?; let size = std::mem::size_of::<H>(); for header in headers { pos += header.consensus_encode(&mut stream)? as u64; } Ok(pos / size as u64) } /// Get a block from the stream. fn get<H: Decodable, S: Seek + Read>(mut stream: S, ix: u64) -> Result<H, Error> { let size = std::mem::size_of::<H>(); let mut buf = vec![0; size]; // TODO: Use an array when rust has const-generics. stream.seek(io::SeekFrom::Start(ix * size as u64))?; stream.read_exact(&mut buf)?; H::consensus_decode(&mut buf.as_slice()).map_err(Error::from) } /// Reads from a file in an I/O optmized way. #[derive(Debug)] struct FileReader<H> { file: fs::File, queue: VecDeque<H>, index: u64, } impl<H: Decodable> FileReader<H> { const BATCH_SIZE: usize = 16; fn new(file: fs::File) -> Self { Self { file, queue: VecDeque::new(), index: 0, } } fn next(&mut self) -> Result<Option<H>, Error> { let size = std::mem::size_of::<H>(); if self.queue.is_empty() { let mut buf = vec![0; size * Self::BATCH_SIZE]; let from = self.file.seek(io::SeekFrom::Start(self.index))?; match self.file.read_exact(&mut buf) { Ok(()) => {} Err(err) if err.kind() == io::ErrorKind::UnexpectedEof =>
Err(err) => return Err(err.into()), } self.index += buf.len() as u64; let items = buf.len() / size; let mut cursor = io::Cursor::new(buf); let mut item = vec![0; size]; for _ in 0..items { cursor.read_exact(&mut item)?; let item = H::consensus_decode(&mut item.as_slice())?; self.queue.push_back(item); } } Ok(self.queue.pop_front()) } } /// An iterator over block headers in a file. #[derive(Debug)] pub struct Iter<H> { height: Height, file: FileReader<H>, } impl<H: Decodable> Iter<H> { fn new(file: fs::File) -> Self { Self { file: FileReader::new(file), height: 1, } } } impl<H: Decodable> Iterator for Iter<H> { type Item = Result<(Height, H), Error>; fn next(&mut self) -> Option<Self::Item> { let height = self.height; assert!(height > 0); match self.file.next() { // If we hit this branch, it's because we're trying to read passed the end // of the file, which means there are no further headers remaining. Err(Error::Io(err)) if err.kind() == io::ErrorKind::UnexpectedEof => None, // If another kind of error occurs, we want to yield it to the caller, so // that it can be propagated. Err(err) => Some(Err(err)), Ok(Some(header)) => { self.height = height + 1; Some(Ok((height, header))) } Ok(None) => None, } } } /// A `Store` backed by a single file. #[derive(Debug)] pub struct File<H> { file: fs::File, genesis: H, } impl<H> File<H> { /// Open a new file store from the given path and genesis header. pub fn open<P: AsRef<Path>>(path: P, genesis: H) -> io::Result<Self> { fs::OpenOptions::new() .create(true) .read(true) .append(true) .open(path) .map(|file| Self { file, genesis }) } /// Create a new file store at the given path, with the provided genesis header. pub fn create<P: AsRef<Path>>(path: P, genesis: H) -> Result<Self, Error> { let file = fs::OpenOptions::new() .create_new(true) .read(true) .append(true) .open(path)?; Ok(Self { file, genesis }) } } impl<H:'static + Copy + Encodable + Decodable> Store for File<H> { type Header = H; /// Get the genesis block. fn genesis(&self) -> H { self.genesis } /// Append a block to the end of the file. fn put<I: Iterator<Item = Self::Header>>(&mut self, headers: I) -> Result<Height, Error> { self::put(&mut self.file, headers) } /// Get the block at the given height. Returns `io::ErrorKind::UnexpectedEof` if /// the height is not found. fn get(&self, height: Height) -> Result<H, Error> { if let Some(ix) = height.checked_sub(1) { // Clone so this function doesn't have to take a `&mut self`. let mut file = self.file.try_clone()?; get(&mut file, ix) } else { Ok(self.genesis) } } /// Rollback the chain to the given height. Behavior is undefined if the given /// height is not contained in the store. fn rollback(&mut self, height: Height) -> Result<(), Error> { let size = mem::size_of::<H>(); self.file .set_len((height) * size as u64) .map_err(Error::from) } /// Flush changes to disk. fn sync(&mut self) -> Result<(), Error> { self.file.sync_data().map_err(Error::from) } /// Iterate over all headers in the store. fn iter(&self) -> Box<dyn Iterator<Item = Result<(Height, H), Error>>> { // Clone so this function doesn't have to take a `&mut self`. match self.file.try_clone() { Ok(file) => Box::new(iter::once(Ok((0, self.genesis))).chain(Iter::new(file))), Err(err) => Box::new(iter::once(Err(Error::Io(err)))), } } /// Return the number of headers in the store. fn len(&self) -> Result<usize, Error> { let meta = self.file.metadata()?; let len = meta.len(); let size = mem::size_of::<H>(); assert!(len <= usize::MAX as u64); if len as usize % size!= 0 { return Err(Error::Corruption); } Ok(len as usize / size + 1) } /// Return the block height of the store. fn height(&self) -> Result<Height, Error> { self.len().map(|n| n as Height - 1) } /// Check the file store integrity. fn check(&self) -> Result<(), Error> { self.len().map(|_| ()) } /// Attempt to heal data corruption. fn heal(&self) -> Result<(), Error> { let meta = self.file.metadata()?; let len = meta.len(); let size = mem::size_of::<H>(); assert!(len <= usize::MAX as u64); let extraneous = len as usize % size; if extraneous!= 0 { self.file.set_len(len - extraneous as u64)?; } Ok(()) } } #[cfg(test)] mod test { use std::{io, iter}; use nakamoto_common::bitcoin::TxMerkleNode; use nakamoto_common::bitcoin_hashes::Hash; use nakamoto_common::block::BlockHash; use super::{Error, File, Height, Store}; use crate::block::BlockHeader; const HEADER_SIZE: usize = 80; fn store(path: &str) -> File<BlockHeader> { let tmp = tempfile::tempdir().unwrap(); let genesis = BlockHeader { version: 1, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), bits: 0x2ffffff, time: 39123818, nonce: 0, }; File::open(tmp.path().join(path), genesis).unwrap() } #[test] fn test_put_get() { let mut store = store("headers.db"); let header = BlockHeader { version: 1, prev_blockhash: store.genesis.block_hash(), merkle_root: TxMerkleNode::all_zeros(), bits: 0x2ffffff, time: 1842918273, nonce: 312143, }; assert_eq!( store.get(0).unwrap(), store.genesis, "when the store is empty, we can `get` the genesis" ); assert!( store.get(1).is_err(), "when the store is empty, we can't get height `1`" ); let height = store.put(iter::once(header)).unwrap(); store.sync().unwrap(); assert_eq!(height, 1); assert_eq!(store.get(height).unwrap(), header); } #[test] fn test_put_get_batch() { let mut store = store("headers.db"); assert_eq!(store.len().unwrap(), 1); let count = 32; let header = BlockHeader { version: 1, prev_blockhash: store.genesis().block_hash(), merkle_root: TxMerkleNode::all_zeros(), bits: 0x2ffffff, time: 1842918273, nonce: 0, }; let iter = (0..count).map(|i| BlockHeader { nonce: i,..header }); let headers = iter.clone().collect::<Vec<_>>(); // Put all headers into the store and check that we can retrieve them. { let height = store.put(iter).unwrap(); assert_eq!(height, headers.len() as Height); assert_eq!(store.len().unwrap(), headers.len() + 1); // Account for genesis. for (i, h) in headers.iter().enumerate() { assert_eq!(&store.get(i as Height + 1).unwrap(), h); } assert!(&store.get(32 + 1).is_err()); } // Rollback and overwrite the history. { let h = headers.len() as Height / 2; // Some point `h` in the past. assert!(&store.get(h + 1).is_ok()); assert_eq!(store.get(h + 1).unwrap(), headers[h as usize]); store.rollback(h).unwrap(); assert!( &store.get(h + 1).is_err(), "after the rollback, we can't access blocks passed `h`" ); assert_eq!(store.len().unwrap(), h as usize + 1); // We can now overwrite the block at position `h + 1`. let header = BlockHeader { nonce: 49219374, ..header }; let height = store.put(iter::once(header)).unwrap(); assert!(header!= headers[height as usize]); assert_eq!(height, h + 1); assert_eq!(store.get(height).unwrap(), header); // Blocks up to and including `h` are unaffected by the rollback. assert_eq!(store.get(0).unwrap(), store.genesis); assert_eq!(store.get(1).unwrap(), headers[0]); assert_eq!(store.get(h).unwrap(), headers[h as usize - 1]); } } #[test] fn test_iter() { let mut store = store("headers.db"); let count = 32; let header = BlockHeader { version: 1, prev_blockhash: store.genesis().block_hash(), merkle_root: TxMerkleNode::all_zeros(), bits: 0x2ffffff, time: 1842918273, nonce: 0, }; let iter = (0..count).map(|i| BlockHeader { nonce: i,..header }); let headers = iter.clone().collect::<Vec<_>>(); store.put(iter).unwrap(); let mut iter = store.iter(); assert_eq!(iter.next().unwrap().unwrap(), (0, store.genesis)); for (i, result) in iter.enumerate() { let (height, header) = result.unwrap(); assert_eq!(i as u64 + 1, height); assert_eq!(header, headers[height as usize - 1]); } } #[test] fn test_corrupt_file() { let mut store = store("headers.db"); store.check().expect("checking always works"); store.heal().expect("healing when there is no corruption"); let headers = &[ BlockHeader { version: 1, prev_blockhash: store.genesis().block_hash(), merkle_root: TxMerkleNode::all_zeros(), bits: 0x2ffffff, time: 1842918273, nonce: 312143, }, BlockHeader { version: 1, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), bits: 0x1ffffff, time: 1842918920, nonce: 913716378, }, ]; store.put(headers.iter().cloned()).unwrap(); store.check().unwrap(); assert_eq!(store.len().unwrap(), 3); let size = std::mem::size_of::<BlockHeader>(); assert_eq!(size, HEADER_SIZE); // Intentionally corrupt the file, by truncating it by 32 bytes. store .file .set_len(headers.len() as u64 * size as u64 - 32) .unwrap(); assert_eq!( store.get(1).unwrap(), headers[0], "the first header is intact" ); matches! { store .get(2) .expect_err("the second header has been corrupted"), Error::Io(err) if err.kind() == io::ErrorKind::UnexpectedEof }; store.len().expect_err("data is corrupted"); store.check().expect_err("data is corrupted"); store.heal().unwrap(); store.check().unwrap(); assert_eq!( store.len().unwrap(), 2, "the last (corrupted) header was removed" ); } }
{ self.file.seek(io::SeekFrom::Start(from))?; let n = self.file.read_to_end(&mut buf)?; buf.truncate(n); }
conditional_block
io.rs
//! Persistent storage backend for blocks. use std::collections::VecDeque; use std::fs; use std::io::{self, Read, Seek, Write}; use std::iter; use std::mem; use std::path::Path; use nakamoto_common::bitcoin::consensus::encode::{Decodable, Encodable}; use nakamoto_common::block::store::{Error, Store}; use nakamoto_common::block::Height; /// Append a block to the end of the stream. fn put<H: Sized + Encodable, S: Seek + Write, I: Iterator<Item = H>>( mut stream: S, headers: I, ) -> Result<Height, Error> { let mut pos = stream.seek(io::SeekFrom::End(0))?; let size = std::mem::size_of::<H>(); for header in headers { pos += header.consensus_encode(&mut stream)? as u64; } Ok(pos / size as u64) } /// Get a block from the stream. fn get<H: Decodable, S: Seek + Read>(mut stream: S, ix: u64) -> Result<H, Error> { let size = std::mem::size_of::<H>(); let mut buf = vec![0; size]; // TODO: Use an array when rust has const-generics. stream.seek(io::SeekFrom::Start(ix * size as u64))?; stream.read_exact(&mut buf)?; H::consensus_decode(&mut buf.as_slice()).map_err(Error::from) } /// Reads from a file in an I/O optmized way. #[derive(Debug)] struct FileReader<H> { file: fs::File, queue: VecDeque<H>, index: u64, } impl<H: Decodable> FileReader<H> { const BATCH_SIZE: usize = 16; fn new(file: fs::File) -> Self { Self { file, queue: VecDeque::new(), index: 0, } } fn next(&mut self) -> Result<Option<H>, Error> { let size = std::mem::size_of::<H>(); if self.queue.is_empty() { let mut buf = vec![0; size * Self::BATCH_SIZE]; let from = self.file.seek(io::SeekFrom::Start(self.index))?; match self.file.read_exact(&mut buf) { Ok(()) => {} Err(err) if err.kind() == io::ErrorKind::UnexpectedEof => { self.file.seek(io::SeekFrom::Start(from))?; let n = self.file.read_to_end(&mut buf)?; buf.truncate(n); } Err(err) => return Err(err.into()), } self.index += buf.len() as u64; let items = buf.len() / size; let mut cursor = io::Cursor::new(buf); let mut item = vec![0; size];
} } Ok(self.queue.pop_front()) } } /// An iterator over block headers in a file. #[derive(Debug)] pub struct Iter<H> { height: Height, file: FileReader<H>, } impl<H: Decodable> Iter<H> { fn new(file: fs::File) -> Self { Self { file: FileReader::new(file), height: 1, } } } impl<H: Decodable> Iterator for Iter<H> { type Item = Result<(Height, H), Error>; fn next(&mut self) -> Option<Self::Item> { let height = self.height; assert!(height > 0); match self.file.next() { // If we hit this branch, it's because we're trying to read passed the end // of the file, which means there are no further headers remaining. Err(Error::Io(err)) if err.kind() == io::ErrorKind::UnexpectedEof => None, // If another kind of error occurs, we want to yield it to the caller, so // that it can be propagated. Err(err) => Some(Err(err)), Ok(Some(header)) => { self.height = height + 1; Some(Ok((height, header))) } Ok(None) => None, } } } /// A `Store` backed by a single file. #[derive(Debug)] pub struct File<H> { file: fs::File, genesis: H, } impl<H> File<H> { /// Open a new file store from the given path and genesis header. pub fn open<P: AsRef<Path>>(path: P, genesis: H) -> io::Result<Self> { fs::OpenOptions::new() .create(true) .read(true) .append(true) .open(path) .map(|file| Self { file, genesis }) } /// Create a new file store at the given path, with the provided genesis header. pub fn create<P: AsRef<Path>>(path: P, genesis: H) -> Result<Self, Error> { let file = fs::OpenOptions::new() .create_new(true) .read(true) .append(true) .open(path)?; Ok(Self { file, genesis }) } } impl<H:'static + Copy + Encodable + Decodable> Store for File<H> { type Header = H; /// Get the genesis block. fn genesis(&self) -> H { self.genesis } /// Append a block to the end of the file. fn put<I: Iterator<Item = Self::Header>>(&mut self, headers: I) -> Result<Height, Error> { self::put(&mut self.file, headers) } /// Get the block at the given height. Returns `io::ErrorKind::UnexpectedEof` if /// the height is not found. fn get(&self, height: Height) -> Result<H, Error> { if let Some(ix) = height.checked_sub(1) { // Clone so this function doesn't have to take a `&mut self`. let mut file = self.file.try_clone()?; get(&mut file, ix) } else { Ok(self.genesis) } } /// Rollback the chain to the given height. Behavior is undefined if the given /// height is not contained in the store. fn rollback(&mut self, height: Height) -> Result<(), Error> { let size = mem::size_of::<H>(); self.file .set_len((height) * size as u64) .map_err(Error::from) } /// Flush changes to disk. fn sync(&mut self) -> Result<(), Error> { self.file.sync_data().map_err(Error::from) } /// Iterate over all headers in the store. fn iter(&self) -> Box<dyn Iterator<Item = Result<(Height, H), Error>>> { // Clone so this function doesn't have to take a `&mut self`. match self.file.try_clone() { Ok(file) => Box::new(iter::once(Ok((0, self.genesis))).chain(Iter::new(file))), Err(err) => Box::new(iter::once(Err(Error::Io(err)))), } } /// Return the number of headers in the store. fn len(&self) -> Result<usize, Error> { let meta = self.file.metadata()?; let len = meta.len(); let size = mem::size_of::<H>(); assert!(len <= usize::MAX as u64); if len as usize % size!= 0 { return Err(Error::Corruption); } Ok(len as usize / size + 1) } /// Return the block height of the store. fn height(&self) -> Result<Height, Error> { self.len().map(|n| n as Height - 1) } /// Check the file store integrity. fn check(&self) -> Result<(), Error> { self.len().map(|_| ()) } /// Attempt to heal data corruption. fn heal(&self) -> Result<(), Error> { let meta = self.file.metadata()?; let len = meta.len(); let size = mem::size_of::<H>(); assert!(len <= usize::MAX as u64); let extraneous = len as usize % size; if extraneous!= 0 { self.file.set_len(len - extraneous as u64)?; } Ok(()) } } #[cfg(test)] mod test { use std::{io, iter}; use nakamoto_common::bitcoin::TxMerkleNode; use nakamoto_common::bitcoin_hashes::Hash; use nakamoto_common::block::BlockHash; use super::{Error, File, Height, Store}; use crate::block::BlockHeader; const HEADER_SIZE: usize = 80; fn store(path: &str) -> File<BlockHeader> { let tmp = tempfile::tempdir().unwrap(); let genesis = BlockHeader { version: 1, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), bits: 0x2ffffff, time: 39123818, nonce: 0, }; File::open(tmp.path().join(path), genesis).unwrap() } #[test] fn test_put_get() { let mut store = store("headers.db"); let header = BlockHeader { version: 1, prev_blockhash: store.genesis.block_hash(), merkle_root: TxMerkleNode::all_zeros(), bits: 0x2ffffff, time: 1842918273, nonce: 312143, }; assert_eq!( store.get(0).unwrap(), store.genesis, "when the store is empty, we can `get` the genesis" ); assert!( store.get(1).is_err(), "when the store is empty, we can't get height `1`" ); let height = store.put(iter::once(header)).unwrap(); store.sync().unwrap(); assert_eq!(height, 1); assert_eq!(store.get(height).unwrap(), header); } #[test] fn test_put_get_batch() { let mut store = store("headers.db"); assert_eq!(store.len().unwrap(), 1); let count = 32; let header = BlockHeader { version: 1, prev_blockhash: store.genesis().block_hash(), merkle_root: TxMerkleNode::all_zeros(), bits: 0x2ffffff, time: 1842918273, nonce: 0, }; let iter = (0..count).map(|i| BlockHeader { nonce: i,..header }); let headers = iter.clone().collect::<Vec<_>>(); // Put all headers into the store and check that we can retrieve them. { let height = store.put(iter).unwrap(); assert_eq!(height, headers.len() as Height); assert_eq!(store.len().unwrap(), headers.len() + 1); // Account for genesis. for (i, h) in headers.iter().enumerate() { assert_eq!(&store.get(i as Height + 1).unwrap(), h); } assert!(&store.get(32 + 1).is_err()); } // Rollback and overwrite the history. { let h = headers.len() as Height / 2; // Some point `h` in the past. assert!(&store.get(h + 1).is_ok()); assert_eq!(store.get(h + 1).unwrap(), headers[h as usize]); store.rollback(h).unwrap(); assert!( &store.get(h + 1).is_err(), "after the rollback, we can't access blocks passed `h`" ); assert_eq!(store.len().unwrap(), h as usize + 1); // We can now overwrite the block at position `h + 1`. let header = BlockHeader { nonce: 49219374, ..header }; let height = store.put(iter::once(header)).unwrap(); assert!(header!= headers[height as usize]); assert_eq!(height, h + 1); assert_eq!(store.get(height).unwrap(), header); // Blocks up to and including `h` are unaffected by the rollback. assert_eq!(store.get(0).unwrap(), store.genesis); assert_eq!(store.get(1).unwrap(), headers[0]); assert_eq!(store.get(h).unwrap(), headers[h as usize - 1]); } } #[test] fn test_iter() { let mut store = store("headers.db"); let count = 32; let header = BlockHeader { version: 1, prev_blockhash: store.genesis().block_hash(), merkle_root: TxMerkleNode::all_zeros(), bits: 0x2ffffff, time: 1842918273, nonce: 0, }; let iter = (0..count).map(|i| BlockHeader { nonce: i,..header }); let headers = iter.clone().collect::<Vec<_>>(); store.put(iter).unwrap(); let mut iter = store.iter(); assert_eq!(iter.next().unwrap().unwrap(), (0, store.genesis)); for (i, result) in iter.enumerate() { let (height, header) = result.unwrap(); assert_eq!(i as u64 + 1, height); assert_eq!(header, headers[height as usize - 1]); } } #[test] fn test_corrupt_file() { let mut store = store("headers.db"); store.check().expect("checking always works"); store.heal().expect("healing when there is no corruption"); let headers = &[ BlockHeader { version: 1, prev_blockhash: store.genesis().block_hash(), merkle_root: TxMerkleNode::all_zeros(), bits: 0x2ffffff, time: 1842918273, nonce: 312143, }, BlockHeader { version: 1, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), bits: 0x1ffffff, time: 1842918920, nonce: 913716378, }, ]; store.put(headers.iter().cloned()).unwrap(); store.check().unwrap(); assert_eq!(store.len().unwrap(), 3); let size = std::mem::size_of::<BlockHeader>(); assert_eq!(size, HEADER_SIZE); // Intentionally corrupt the file, by truncating it by 32 bytes. store .file .set_len(headers.len() as u64 * size as u64 - 32) .unwrap(); assert_eq!( store.get(1).unwrap(), headers[0], "the first header is intact" ); matches! { store .get(2) .expect_err("the second header has been corrupted"), Error::Io(err) if err.kind() == io::ErrorKind::UnexpectedEof }; store.len().expect_err("data is corrupted"); store.check().expect_err("data is corrupted"); store.heal().unwrap(); store.check().unwrap(); assert_eq!( store.len().unwrap(), 2, "the last (corrupted) header was removed" ); } }
for _ in 0..items { cursor.read_exact(&mut item)?; let item = H::consensus_decode(&mut item.as_slice())?; self.queue.push_back(item);
random_line_split
io.rs
//! Persistent storage backend for blocks. use std::collections::VecDeque; use std::fs; use std::io::{self, Read, Seek, Write}; use std::iter; use std::mem; use std::path::Path; use nakamoto_common::bitcoin::consensus::encode::{Decodable, Encodable}; use nakamoto_common::block::store::{Error, Store}; use nakamoto_common::block::Height; /// Append a block to the end of the stream. fn put<H: Sized + Encodable, S: Seek + Write, I: Iterator<Item = H>>( mut stream: S, headers: I, ) -> Result<Height, Error> { let mut pos = stream.seek(io::SeekFrom::End(0))?; let size = std::mem::size_of::<H>(); for header in headers { pos += header.consensus_encode(&mut stream)? as u64; } Ok(pos / size as u64) } /// Get a block from the stream. fn get<H: Decodable, S: Seek + Read>(mut stream: S, ix: u64) -> Result<H, Error> { let size = std::mem::size_of::<H>(); let mut buf = vec![0; size]; // TODO: Use an array when rust has const-generics. stream.seek(io::SeekFrom::Start(ix * size as u64))?; stream.read_exact(&mut buf)?; H::consensus_decode(&mut buf.as_slice()).map_err(Error::from) } /// Reads from a file in an I/O optmized way. #[derive(Debug)] struct FileReader<H> { file: fs::File, queue: VecDeque<H>, index: u64, } impl<H: Decodable> FileReader<H> { const BATCH_SIZE: usize = 16; fn new(file: fs::File) -> Self { Self { file, queue: VecDeque::new(), index: 0, } } fn next(&mut self) -> Result<Option<H>, Error> { let size = std::mem::size_of::<H>(); if self.queue.is_empty() { let mut buf = vec![0; size * Self::BATCH_SIZE]; let from = self.file.seek(io::SeekFrom::Start(self.index))?; match self.file.read_exact(&mut buf) { Ok(()) => {} Err(err) if err.kind() == io::ErrorKind::UnexpectedEof => { self.file.seek(io::SeekFrom::Start(from))?; let n = self.file.read_to_end(&mut buf)?; buf.truncate(n); } Err(err) => return Err(err.into()), } self.index += buf.len() as u64; let items = buf.len() / size; let mut cursor = io::Cursor::new(buf); let mut item = vec![0; size]; for _ in 0..items { cursor.read_exact(&mut item)?; let item = H::consensus_decode(&mut item.as_slice())?; self.queue.push_back(item); } } Ok(self.queue.pop_front()) } } /// An iterator over block headers in a file. #[derive(Debug)] pub struct Iter<H> { height: Height, file: FileReader<H>, } impl<H: Decodable> Iter<H> { fn new(file: fs::File) -> Self { Self { file: FileReader::new(file), height: 1, } } } impl<H: Decodable> Iterator for Iter<H> { type Item = Result<(Height, H), Error>; fn
(&mut self) -> Option<Self::Item> { let height = self.height; assert!(height > 0); match self.file.next() { // If we hit this branch, it's because we're trying to read passed the end // of the file, which means there are no further headers remaining. Err(Error::Io(err)) if err.kind() == io::ErrorKind::UnexpectedEof => None, // If another kind of error occurs, we want to yield it to the caller, so // that it can be propagated. Err(err) => Some(Err(err)), Ok(Some(header)) => { self.height = height + 1; Some(Ok((height, header))) } Ok(None) => None, } } } /// A `Store` backed by a single file. #[derive(Debug)] pub struct File<H> { file: fs::File, genesis: H, } impl<H> File<H> { /// Open a new file store from the given path and genesis header. pub fn open<P: AsRef<Path>>(path: P, genesis: H) -> io::Result<Self> { fs::OpenOptions::new() .create(true) .read(true) .append(true) .open(path) .map(|file| Self { file, genesis }) } /// Create a new file store at the given path, with the provided genesis header. pub fn create<P: AsRef<Path>>(path: P, genesis: H) -> Result<Self, Error> { let file = fs::OpenOptions::new() .create_new(true) .read(true) .append(true) .open(path)?; Ok(Self { file, genesis }) } } impl<H:'static + Copy + Encodable + Decodable> Store for File<H> { type Header = H; /// Get the genesis block. fn genesis(&self) -> H { self.genesis } /// Append a block to the end of the file. fn put<I: Iterator<Item = Self::Header>>(&mut self, headers: I) -> Result<Height, Error> { self::put(&mut self.file, headers) } /// Get the block at the given height. Returns `io::ErrorKind::UnexpectedEof` if /// the height is not found. fn get(&self, height: Height) -> Result<H, Error> { if let Some(ix) = height.checked_sub(1) { // Clone so this function doesn't have to take a `&mut self`. let mut file = self.file.try_clone()?; get(&mut file, ix) } else { Ok(self.genesis) } } /// Rollback the chain to the given height. Behavior is undefined if the given /// height is not contained in the store. fn rollback(&mut self, height: Height) -> Result<(), Error> { let size = mem::size_of::<H>(); self.file .set_len((height) * size as u64) .map_err(Error::from) } /// Flush changes to disk. fn sync(&mut self) -> Result<(), Error> { self.file.sync_data().map_err(Error::from) } /// Iterate over all headers in the store. fn iter(&self) -> Box<dyn Iterator<Item = Result<(Height, H), Error>>> { // Clone so this function doesn't have to take a `&mut self`. match self.file.try_clone() { Ok(file) => Box::new(iter::once(Ok((0, self.genesis))).chain(Iter::new(file))), Err(err) => Box::new(iter::once(Err(Error::Io(err)))), } } /// Return the number of headers in the store. fn len(&self) -> Result<usize, Error> { let meta = self.file.metadata()?; let len = meta.len(); let size = mem::size_of::<H>(); assert!(len <= usize::MAX as u64); if len as usize % size!= 0 { return Err(Error::Corruption); } Ok(len as usize / size + 1) } /// Return the block height of the store. fn height(&self) -> Result<Height, Error> { self.len().map(|n| n as Height - 1) } /// Check the file store integrity. fn check(&self) -> Result<(), Error> { self.len().map(|_| ()) } /// Attempt to heal data corruption. fn heal(&self) -> Result<(), Error> { let meta = self.file.metadata()?; let len = meta.len(); let size = mem::size_of::<H>(); assert!(len <= usize::MAX as u64); let extraneous = len as usize % size; if extraneous!= 0 { self.file.set_len(len - extraneous as u64)?; } Ok(()) } } #[cfg(test)] mod test { use std::{io, iter}; use nakamoto_common::bitcoin::TxMerkleNode; use nakamoto_common::bitcoin_hashes::Hash; use nakamoto_common::block::BlockHash; use super::{Error, File, Height, Store}; use crate::block::BlockHeader; const HEADER_SIZE: usize = 80; fn store(path: &str) -> File<BlockHeader> { let tmp = tempfile::tempdir().unwrap(); let genesis = BlockHeader { version: 1, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), bits: 0x2ffffff, time: 39123818, nonce: 0, }; File::open(tmp.path().join(path), genesis).unwrap() } #[test] fn test_put_get() { let mut store = store("headers.db"); let header = BlockHeader { version: 1, prev_blockhash: store.genesis.block_hash(), merkle_root: TxMerkleNode::all_zeros(), bits: 0x2ffffff, time: 1842918273, nonce: 312143, }; assert_eq!( store.get(0).unwrap(), store.genesis, "when the store is empty, we can `get` the genesis" ); assert!( store.get(1).is_err(), "when the store is empty, we can't get height `1`" ); let height = store.put(iter::once(header)).unwrap(); store.sync().unwrap(); assert_eq!(height, 1); assert_eq!(store.get(height).unwrap(), header); } #[test] fn test_put_get_batch() { let mut store = store("headers.db"); assert_eq!(store.len().unwrap(), 1); let count = 32; let header = BlockHeader { version: 1, prev_blockhash: store.genesis().block_hash(), merkle_root: TxMerkleNode::all_zeros(), bits: 0x2ffffff, time: 1842918273, nonce: 0, }; let iter = (0..count).map(|i| BlockHeader { nonce: i,..header }); let headers = iter.clone().collect::<Vec<_>>(); // Put all headers into the store and check that we can retrieve them. { let height = store.put(iter).unwrap(); assert_eq!(height, headers.len() as Height); assert_eq!(store.len().unwrap(), headers.len() + 1); // Account for genesis. for (i, h) in headers.iter().enumerate() { assert_eq!(&store.get(i as Height + 1).unwrap(), h); } assert!(&store.get(32 + 1).is_err()); } // Rollback and overwrite the history. { let h = headers.len() as Height / 2; // Some point `h` in the past. assert!(&store.get(h + 1).is_ok()); assert_eq!(store.get(h + 1).unwrap(), headers[h as usize]); store.rollback(h).unwrap(); assert!( &store.get(h + 1).is_err(), "after the rollback, we can't access blocks passed `h`" ); assert_eq!(store.len().unwrap(), h as usize + 1); // We can now overwrite the block at position `h + 1`. let header = BlockHeader { nonce: 49219374, ..header }; let height = store.put(iter::once(header)).unwrap(); assert!(header!= headers[height as usize]); assert_eq!(height, h + 1); assert_eq!(store.get(height).unwrap(), header); // Blocks up to and including `h` are unaffected by the rollback. assert_eq!(store.get(0).unwrap(), store.genesis); assert_eq!(store.get(1).unwrap(), headers[0]); assert_eq!(store.get(h).unwrap(), headers[h as usize - 1]); } } #[test] fn test_iter() { let mut store = store("headers.db"); let count = 32; let header = BlockHeader { version: 1, prev_blockhash: store.genesis().block_hash(), merkle_root: TxMerkleNode::all_zeros(), bits: 0x2ffffff, time: 1842918273, nonce: 0, }; let iter = (0..count).map(|i| BlockHeader { nonce: i,..header }); let headers = iter.clone().collect::<Vec<_>>(); store.put(iter).unwrap(); let mut iter = store.iter(); assert_eq!(iter.next().unwrap().unwrap(), (0, store.genesis)); for (i, result) in iter.enumerate() { let (height, header) = result.unwrap(); assert_eq!(i as u64 + 1, height); assert_eq!(header, headers[height as usize - 1]); } } #[test] fn test_corrupt_file() { let mut store = store("headers.db"); store.check().expect("checking always works"); store.heal().expect("healing when there is no corruption"); let headers = &[ BlockHeader { version: 1, prev_blockhash: store.genesis().block_hash(), merkle_root: TxMerkleNode::all_zeros(), bits: 0x2ffffff, time: 1842918273, nonce: 312143, }, BlockHeader { version: 1, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), bits: 0x1ffffff, time: 1842918920, nonce: 913716378, }, ]; store.put(headers.iter().cloned()).unwrap(); store.check().unwrap(); assert_eq!(store.len().unwrap(), 3); let size = std::mem::size_of::<BlockHeader>(); assert_eq!(size, HEADER_SIZE); // Intentionally corrupt the file, by truncating it by 32 bytes. store .file .set_len(headers.len() as u64 * size as u64 - 32) .unwrap(); assert_eq!( store.get(1).unwrap(), headers[0], "the first header is intact" ); matches! { store .get(2) .expect_err("the second header has been corrupted"), Error::Io(err) if err.kind() == io::ErrorKind::UnexpectedEof }; store.len().expect_err("data is corrupted"); store.check().expect_err("data is corrupted"); store.heal().unwrap(); store.check().unwrap(); assert_eq!( store.len().unwrap(), 2, "the last (corrupted) header was removed" ); } }
next
identifier_name
yuva_info.rs
use super::image_info; use crate::{prelude::*, EncodedOrigin, ISize, Matrix}; use skia_bindings::{self as sb, SkYUVAInfo, SkYUVAInfo_Subsampling}; use std::{fmt, ptr}; /// Specifies the structure of planes for a YUV image with optional alpha. The actual planar data /// is not part of this structure and depending on usage is in external textures or pixmaps. pub type YUVAInfo = Handle<SkYUVAInfo>; unsafe_send_sync!(YUVAInfo); impl NativeDrop for SkYUVAInfo { fn drop(&mut self) { unsafe { sb::C_SkYUVAInfo_destruct(self) } } } /// Specifies how YUV (and optionally A) are divided among planes. Planes are separated by /// underscores in the enum value names. Within each plane the pixmap/texture channels are /// mapped to the YUVA channels in the order specified, e.g. for kY_UV Y is in channel 0 of plane /// 0, U is in channel 0 of plane 1, and V is in channel 1 of plane 1. Channel ordering /// within a pixmap/texture given the channels it contains: /// A: 0:A /// Luminance/Gray: 0:Gray /// Luminance/Gray + Alpha: 0:Gray, 1:A /// RG 0:R, 1:G /// RGB 0:R, 1:G, 2:B /// RGBA 0:R, 1:G, 2:B, 3:A pub use sb::SkYUVAInfo_PlaneConfig as PlaneConfig; variant_name!(PlaneConfig::YUV); /// UV subsampling is also specified in the enum value names using J:a:b notation (e.g. 4:2:0 is /// 1/2 horizontal and 1/2 vertical resolution for U and V). If alpha is present it is not sub- /// sampled. Note that Subsampling values other than k444 are only valid with [PlaneConfig] values /// that have U and V in different planes than Y (and A, if present). #[repr(i32)] #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub enum Subsampling { Unknown = SkYUVAInfo_Subsampling::kUnknown as _, S444 = SkYUVAInfo_Subsampling::k444 as _, S422 = SkYUVAInfo_Subsampling::k422 as _, S420 = SkYUVAInfo_Subsampling::k420 as _, S440 = SkYUVAInfo_Subsampling::k440 as _, S411 = SkYUVAInfo_Subsampling::k411 as _, S410 = SkYUVAInfo_Subsampling::k410 as _, } native_transmutable!(SkYUVAInfo_Subsampling, Subsampling, subsampling_layout); /// Describes how subsampled chroma values are sited relative to luma values. /// /// Currently only centered siting is supported but will expand to support additional sitings. pub use sb::SkYUVAInfo_Siting as Siting; variant_name!(Siting::Centered); /// Ratio of Y/A values to U/V values in x and y. pub fn subsampling_factors(subsampling: Subsampling) -> (i32, i32) { let mut factors: [i32; 2] = Default::default(); unsafe { sb::C_SkYUVAInfo_SubsamplingFactors(subsampling.into_native(), &mut factors[0]) }; #[allow(clippy::tuple_array_conversions)] (factors[0], factors[1]) } /// `SubsamplingFactors(Subsampling)` if `plane_index` refers to a U/V plane and otherwise `(1, 1)` /// if inputs are valid. Invalid inputs consist of incompatible [PlaneConfig] [Subsampling] /// `plane_index` combinations. `(0, 0)` is returned for invalid inputs. pub fn plane_subsampling_factors( plane: PlaneConfig, subsampling: Subsampling, plane_index: usize, ) -> (i32, i32) { let mut factors: [i32; 2] = Default::default(); unsafe { sb::C_SkYUVAInfo_PlaneSubsamplingFactors( plane, subsampling.into_native(), plane_index.try_into().unwrap(), &mut factors[0], ) }; #[allow(clippy::tuple_array_conversions)] (factors[0], factors[1]) } /// Given image dimensions, a planer configuration, subsampling, and origin, determine the expected /// size of each plane. Returns the expected planes. The input image dimensions are as displayed /// (after the planes have been transformed to the intended display orientation). The plane /// dimensions are output as the planes are stored in memory (may be rotated from image dimensions). pub fn plane_dimensions( image_dimensions: impl Into<ISize>, config: PlaneConfig, subsampling: Subsampling, origin: EncodedOrigin, ) -> Vec<ISize> { let mut plane_dimensions = [ISize::default(); YUVAInfo::MAX_PLANES]; let size: usize = unsafe { SkYUVAInfo::PlaneDimensions( image_dimensions.into().into_native(), config, subsampling.into_native(), origin.into_native(), plane_dimensions.native_mut().as_mut_ptr(), ) } .try_into() .unwrap(); plane_dimensions[0..size].to_vec() } /// Number of planes for a given [PlaneConfig]. pub fn num_planes(config: PlaneConfig) -> usize { unsafe { sb::C_SkYUVAInfo_NumPlanes(config) } .try_into() .unwrap() } /// Number of Y, U, V, A channels in the ith plane for a given [PlaneConfig] (or [None] if i is /// invalid). pub fn num_channels_in_plane(config: PlaneConfig, i: usize) -> Option<usize> { (i < num_planes(config)).if_true_then_some(|| { unsafe { sb::C_SkYUVAInfo_NumChannelsInPlane(config, i.try_into().unwrap()) } .try_into() .unwrap() }) } /// Does the [PlaneConfig] have alpha values? pub fn has_alpha(config: PlaneConfig) -> bool { unsafe { sb::SkYUVAInfo_HasAlpha(config) } } impl Default for YUVAInfo { fn default() -> Self { Self::construct(|yi| unsafe { sb::C_SkYUVAInfo_Construct(yi) }) } } impl NativePartialEq for YUVAInfo { fn eq(&self, rhs: &Self) -> bool { unsafe { sb::C_SkYUVAInfo_equals(self.native(), rhs.native()) } } } impl fmt::Debug for YUVAInfo { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("YUVAInfo") .field("dimensions", &self.dimensions()) .field("plane_config", &self.plane_config()) .field("subsampling", &self.subsampling()) .field("yuv_color_space", &self.yuv_color_space()) .field("origin", &self.origin()) .field("siting_xy", &self.siting_xy()) .finish() } } impl YUVAInfo { pub const MAX_PLANES: usize = sb::SkYUVAInfo_kMaxPlanes as _; /// `dimensions` should specify the size of the full resolution image (after planes have been /// oriented to how the image is displayed as indicated by `origin`). pub fn new( dimensions: impl Into<ISize>, config: PlaneConfig, subsampling: Subsampling, color_space: image_info::YUVColorSpace, origin: impl Into<Option<EncodedOrigin>>, siting_xy: impl Into<Option<(Siting, Siting)>>, ) -> Option<Self> { let origin = origin.into().unwrap_or(EncodedOrigin::TopLeft); let (siting_x, siting_y) = siting_xy .into() .unwrap_or((Siting::Centered, Siting::Centered)); let n = unsafe { SkYUVAInfo::new( dimensions.into().into_native(), config, subsampling.into_native(), color_space, origin.into_native(), siting_x, siting_y, ) }; Self::native_is_valid(&n).if_true_then_some(|| Self::from_native_c(n)) } pub fn plane_config(&self) -> PlaneConfig { self.native().fPlaneConfig } pub fn subsampling(&self) -> Subsampling { Subsampling::from_native_c(self.native().fSubsampling) } pub fn
(&self, plane_index: usize) -> (i32, i32) { plane_subsampling_factors(self.plane_config(), self.subsampling(), plane_index) } /// Dimensions of the full resolution image (after planes have been oriented to how the image /// is displayed as indicated by fOrigin). pub fn dimensions(&self) -> ISize { ISize::from_native_c(self.native().fDimensions) } pub fn width(&self) -> i32 { self.dimensions().width } pub fn height(&self) -> i32 { self.dimensions().height } pub fn yuv_color_space(&self) -> image_info::YUVColorSpace { self.native().fYUVColorSpace } pub fn siting_xy(&self) -> (Siting, Siting) { let n = self.native(); (n.fSitingX, n.fSitingY) } pub fn origin(&self) -> EncodedOrigin { EncodedOrigin::from_native_c(self.native().fOrigin) } pub fn origin_matrix(&self) -> Matrix { self.origin().to_matrix((self.width(), self.height())) } pub fn has_alpha(&self) -> bool { has_alpha(self.plane_config()) } /// Returns the dimensions for each plane. Dimensions are as stored in memory, before /// transformation to image display space as indicated by [origin(&self)]. pub fn plane_dimensions(&self) -> Vec<ISize> { self::plane_dimensions( self.dimensions(), self.plane_config(), self.subsampling(), self.origin(), ) } /// Given a per-plane row bytes, determine size to allocate for all planes. Optionally retrieves /// the per-plane byte sizes in planeSizes if not `None`. If total size overflows will return /// `SIZE_MAX` and set all planeSizes to `SIZE_MAX`. pub fn compute_total_bytes( &self, row_bytes: &[usize; Self::MAX_PLANES], plane_sizes: Option<&mut [usize; Self::MAX_PLANES]>, ) -> usize { unsafe { self.native().computeTotalBytes( row_bytes.as_ptr(), plane_sizes .map(|v| v.as_mut_ptr()) .unwrap_or(ptr::null_mut()), ) } } pub fn num_planes(&self) -> usize { num_planes(self.plane_config()) } pub fn num_channels_in_plane(&self, i: usize) -> Option<usize> { num_channels_in_plane(self.plane_config(), i) } /// Returns a [YUVAInfo] that is identical to this one but with the passed [Subsampling]. If the /// passed [Subsampling] is not [Subsampling::S444] and this info's [PlaneConfig] is not /// compatible with chroma subsampling (because Y is in the same plane as UV) then the result /// will be `None`. pub fn with_subsampling(&self, subsampling: Subsampling) -> Option<Self> { Self::try_construct(|info| unsafe { sb::C_SkYUVAInfo_makeSubsampling(self.native(), subsampling.into_native(), info); Self::native_is_valid(&*info) }) } /// Returns a [YUVAInfo] that is identical to this one but with the passed dimensions. If the /// passed dimensions is empty then the result will be `None`. pub fn with_dimensions(&self, dimensions: impl Into<ISize>) -> Option<Self> { Self::try_construct(|info| unsafe { sb::C_SkYUVAInfo_makeDimensions(self.native(), dimensions.into().native(), info); Self::native_is_valid(&*info) }) } pub(crate) fn native_is_valid(info: &SkYUVAInfo) -> bool { info.fPlaneConfig!= PlaneConfig::Unknown } }
plane_subsampling_factors
identifier_name
yuva_info.rs
use super::image_info; use crate::{prelude::*, EncodedOrigin, ISize, Matrix}; use skia_bindings::{self as sb, SkYUVAInfo, SkYUVAInfo_Subsampling}; use std::{fmt, ptr}; /// Specifies the structure of planes for a YUV image with optional alpha. The actual planar data /// is not part of this structure and depending on usage is in external textures or pixmaps. pub type YUVAInfo = Handle<SkYUVAInfo>; unsafe_send_sync!(YUVAInfo); impl NativeDrop for SkYUVAInfo { fn drop(&mut self) { unsafe { sb::C_SkYUVAInfo_destruct(self) } } } /// Specifies how YUV (and optionally A) are divided among planes. Planes are separated by /// underscores in the enum value names. Within each plane the pixmap/texture channels are /// mapped to the YUVA channels in the order specified, e.g. for kY_UV Y is in channel 0 of plane /// 0, U is in channel 0 of plane 1, and V is in channel 1 of plane 1. Channel ordering /// within a pixmap/texture given the channels it contains: /// A: 0:A /// Luminance/Gray: 0:Gray /// Luminance/Gray + Alpha: 0:Gray, 1:A /// RG 0:R, 1:G /// RGB 0:R, 1:G, 2:B /// RGBA 0:R, 1:G, 2:B, 3:A pub use sb::SkYUVAInfo_PlaneConfig as PlaneConfig; variant_name!(PlaneConfig::YUV); /// UV subsampling is also specified in the enum value names using J:a:b notation (e.g. 4:2:0 is /// 1/2 horizontal and 1/2 vertical resolution for U and V). If alpha is present it is not sub- /// sampled. Note that Subsampling values other than k444 are only valid with [PlaneConfig] values /// that have U and V in different planes than Y (and A, if present). #[repr(i32)] #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub enum Subsampling { Unknown = SkYUVAInfo_Subsampling::kUnknown as _, S444 = SkYUVAInfo_Subsampling::k444 as _, S422 = SkYUVAInfo_Subsampling::k422 as _, S420 = SkYUVAInfo_Subsampling::k420 as _, S440 = SkYUVAInfo_Subsampling::k440 as _, S411 = SkYUVAInfo_Subsampling::k411 as _, S410 = SkYUVAInfo_Subsampling::k410 as _, } native_transmutable!(SkYUVAInfo_Subsampling, Subsampling, subsampling_layout); /// Describes how subsampled chroma values are sited relative to luma values. /// /// Currently only centered siting is supported but will expand to support additional sitings. pub use sb::SkYUVAInfo_Siting as Siting; variant_name!(Siting::Centered); /// Ratio of Y/A values to U/V values in x and y. pub fn subsampling_factors(subsampling: Subsampling) -> (i32, i32) { let mut factors: [i32; 2] = Default::default(); unsafe { sb::C_SkYUVAInfo_SubsamplingFactors(subsampling.into_native(), &mut factors[0]) }; #[allow(clippy::tuple_array_conversions)] (factors[0], factors[1]) } /// `SubsamplingFactors(Subsampling)` if `plane_index` refers to a U/V plane and otherwise `(1, 1)` /// if inputs are valid. Invalid inputs consist of incompatible [PlaneConfig] [Subsampling] /// `plane_index` combinations. `(0, 0)` is returned for invalid inputs. pub fn plane_subsampling_factors( plane: PlaneConfig, subsampling: Subsampling, plane_index: usize, ) -> (i32, i32) { let mut factors: [i32; 2] = Default::default(); unsafe { sb::C_SkYUVAInfo_PlaneSubsamplingFactors( plane, subsampling.into_native(), plane_index.try_into().unwrap(), &mut factors[0], ) }; #[allow(clippy::tuple_array_conversions)] (factors[0], factors[1]) } /// Given image dimensions, a planer configuration, subsampling, and origin, determine the expected /// size of each plane. Returns the expected planes. The input image dimensions are as displayed /// (after the planes have been transformed to the intended display orientation). The plane /// dimensions are output as the planes are stored in memory (may be rotated from image dimensions). pub fn plane_dimensions( image_dimensions: impl Into<ISize>, config: PlaneConfig, subsampling: Subsampling, origin: EncodedOrigin, ) -> Vec<ISize> { let mut plane_dimensions = [ISize::default(); YUVAInfo::MAX_PLANES]; let size: usize = unsafe { SkYUVAInfo::PlaneDimensions( image_dimensions.into().into_native(), config, subsampling.into_native(), origin.into_native(), plane_dimensions.native_mut().as_mut_ptr(), ) } .try_into() .unwrap(); plane_dimensions[0..size].to_vec() } /// Number of planes for a given [PlaneConfig]. pub fn num_planes(config: PlaneConfig) -> usize
/// Number of Y, U, V, A channels in the ith plane for a given [PlaneConfig] (or [None] if i is /// invalid). pub fn num_channels_in_plane(config: PlaneConfig, i: usize) -> Option<usize> { (i < num_planes(config)).if_true_then_some(|| { unsafe { sb::C_SkYUVAInfo_NumChannelsInPlane(config, i.try_into().unwrap()) } .try_into() .unwrap() }) } /// Does the [PlaneConfig] have alpha values? pub fn has_alpha(config: PlaneConfig) -> bool { unsafe { sb::SkYUVAInfo_HasAlpha(config) } } impl Default for YUVAInfo { fn default() -> Self { Self::construct(|yi| unsafe { sb::C_SkYUVAInfo_Construct(yi) }) } } impl NativePartialEq for YUVAInfo { fn eq(&self, rhs: &Self) -> bool { unsafe { sb::C_SkYUVAInfo_equals(self.native(), rhs.native()) } } } impl fmt::Debug for YUVAInfo { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("YUVAInfo") .field("dimensions", &self.dimensions()) .field("plane_config", &self.plane_config()) .field("subsampling", &self.subsampling()) .field("yuv_color_space", &self.yuv_color_space()) .field("origin", &self.origin()) .field("siting_xy", &self.siting_xy()) .finish() } } impl YUVAInfo { pub const MAX_PLANES: usize = sb::SkYUVAInfo_kMaxPlanes as _; /// `dimensions` should specify the size of the full resolution image (after planes have been /// oriented to how the image is displayed as indicated by `origin`). pub fn new( dimensions: impl Into<ISize>, config: PlaneConfig, subsampling: Subsampling, color_space: image_info::YUVColorSpace, origin: impl Into<Option<EncodedOrigin>>, siting_xy: impl Into<Option<(Siting, Siting)>>, ) -> Option<Self> { let origin = origin.into().unwrap_or(EncodedOrigin::TopLeft); let (siting_x, siting_y) = siting_xy .into() .unwrap_or((Siting::Centered, Siting::Centered)); let n = unsafe { SkYUVAInfo::new( dimensions.into().into_native(), config, subsampling.into_native(), color_space, origin.into_native(), siting_x, siting_y, ) }; Self::native_is_valid(&n).if_true_then_some(|| Self::from_native_c(n)) } pub fn plane_config(&self) -> PlaneConfig { self.native().fPlaneConfig } pub fn subsampling(&self) -> Subsampling { Subsampling::from_native_c(self.native().fSubsampling) } pub fn plane_subsampling_factors(&self, plane_index: usize) -> (i32, i32) { plane_subsampling_factors(self.plane_config(), self.subsampling(), plane_index) } /// Dimensions of the full resolution image (after planes have been oriented to how the image /// is displayed as indicated by fOrigin). pub fn dimensions(&self) -> ISize { ISize::from_native_c(self.native().fDimensions) } pub fn width(&self) -> i32 { self.dimensions().width } pub fn height(&self) -> i32 { self.dimensions().height } pub fn yuv_color_space(&self) -> image_info::YUVColorSpace { self.native().fYUVColorSpace } pub fn siting_xy(&self) -> (Siting, Siting) { let n = self.native(); (n.fSitingX, n.fSitingY) } pub fn origin(&self) -> EncodedOrigin { EncodedOrigin::from_native_c(self.native().fOrigin) } pub fn origin_matrix(&self) -> Matrix { self.origin().to_matrix((self.width(), self.height())) } pub fn has_alpha(&self) -> bool { has_alpha(self.plane_config()) } /// Returns the dimensions for each plane. Dimensions are as stored in memory, before /// transformation to image display space as indicated by [origin(&self)]. pub fn plane_dimensions(&self) -> Vec<ISize> { self::plane_dimensions( self.dimensions(), self.plane_config(), self.subsampling(), self.origin(), ) } /// Given a per-plane row bytes, determine size to allocate for all planes. Optionally retrieves /// the per-plane byte sizes in planeSizes if not `None`. If total size overflows will return /// `SIZE_MAX` and set all planeSizes to `SIZE_MAX`. pub fn compute_total_bytes( &self, row_bytes: &[usize; Self::MAX_PLANES], plane_sizes: Option<&mut [usize; Self::MAX_PLANES]>, ) -> usize { unsafe { self.native().computeTotalBytes( row_bytes.as_ptr(), plane_sizes .map(|v| v.as_mut_ptr()) .unwrap_or(ptr::null_mut()), ) } } pub fn num_planes(&self) -> usize { num_planes(self.plane_config()) } pub fn num_channels_in_plane(&self, i: usize) -> Option<usize> { num_channels_in_plane(self.plane_config(), i) } /// Returns a [YUVAInfo] that is identical to this one but with the passed [Subsampling]. If the /// passed [Subsampling] is not [Subsampling::S444] and this info's [PlaneConfig] is not /// compatible with chroma subsampling (because Y is in the same plane as UV) then the result /// will be `None`. pub fn with_subsampling(&self, subsampling: Subsampling) -> Option<Self> { Self::try_construct(|info| unsafe { sb::C_SkYUVAInfo_makeSubsampling(self.native(), subsampling.into_native(), info); Self::native_is_valid(&*info) }) } /// Returns a [YUVAInfo] that is identical to this one but with the passed dimensions. If the /// passed dimensions is empty then the result will be `None`. pub fn with_dimensions(&self, dimensions: impl Into<ISize>) -> Option<Self> { Self::try_construct(|info| unsafe { sb::C_SkYUVAInfo_makeDimensions(self.native(), dimensions.into().native(), info); Self::native_is_valid(&*info) }) } pub(crate) fn native_is_valid(info: &SkYUVAInfo) -> bool { info.fPlaneConfig!= PlaneConfig::Unknown } }
{ unsafe { sb::C_SkYUVAInfo_NumPlanes(config) } .try_into() .unwrap() }
identifier_body
yuva_info.rs
use super::image_info; use crate::{prelude::*, EncodedOrigin, ISize, Matrix}; use skia_bindings::{self as sb, SkYUVAInfo, SkYUVAInfo_Subsampling}; use std::{fmt, ptr}; /// Specifies the structure of planes for a YUV image with optional alpha. The actual planar data /// is not part of this structure and depending on usage is in external textures or pixmaps. pub type YUVAInfo = Handle<SkYUVAInfo>; unsafe_send_sync!(YUVAInfo); impl NativeDrop for SkYUVAInfo { fn drop(&mut self) { unsafe { sb::C_SkYUVAInfo_destruct(self) } } } /// Specifies how YUV (and optionally A) are divided among planes. Planes are separated by /// underscores in the enum value names. Within each plane the pixmap/texture channels are /// mapped to the YUVA channels in the order specified, e.g. for kY_UV Y is in channel 0 of plane /// 0, U is in channel 0 of plane 1, and V is in channel 1 of plane 1. Channel ordering /// within a pixmap/texture given the channels it contains: /// A: 0:A /// Luminance/Gray: 0:Gray /// Luminance/Gray + Alpha: 0:Gray, 1:A /// RG 0:R, 1:G /// RGB 0:R, 1:G, 2:B /// RGBA 0:R, 1:G, 2:B, 3:A pub use sb::SkYUVAInfo_PlaneConfig as PlaneConfig; variant_name!(PlaneConfig::YUV); /// UV subsampling is also specified in the enum value names using J:a:b notation (e.g. 4:2:0 is /// 1/2 horizontal and 1/2 vertical resolution for U and V). If alpha is present it is not sub- /// sampled. Note that Subsampling values other than k444 are only valid with [PlaneConfig] values /// that have U and V in different planes than Y (and A, if present). #[repr(i32)] #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub enum Subsampling { Unknown = SkYUVAInfo_Subsampling::kUnknown as _, S444 = SkYUVAInfo_Subsampling::k444 as _, S422 = SkYUVAInfo_Subsampling::k422 as _, S420 = SkYUVAInfo_Subsampling::k420 as _, S440 = SkYUVAInfo_Subsampling::k440 as _, S411 = SkYUVAInfo_Subsampling::k411 as _, S410 = SkYUVAInfo_Subsampling::k410 as _, } native_transmutable!(SkYUVAInfo_Subsampling, Subsampling, subsampling_layout); /// Describes how subsampled chroma values are sited relative to luma values. /// /// Currently only centered siting is supported but will expand to support additional sitings. pub use sb::SkYUVAInfo_Siting as Siting; variant_name!(Siting::Centered); /// Ratio of Y/A values to U/V values in x and y. pub fn subsampling_factors(subsampling: Subsampling) -> (i32, i32) { let mut factors: [i32; 2] = Default::default(); unsafe { sb::C_SkYUVAInfo_SubsamplingFactors(subsampling.into_native(), &mut factors[0]) }; #[allow(clippy::tuple_array_conversions)] (factors[0], factors[1]) } /// `SubsamplingFactors(Subsampling)` if `plane_index` refers to a U/V plane and otherwise `(1, 1)` /// if inputs are valid. Invalid inputs consist of incompatible [PlaneConfig] [Subsampling] /// `plane_index` combinations. `(0, 0)` is returned for invalid inputs. pub fn plane_subsampling_factors( plane: PlaneConfig, subsampling: Subsampling, plane_index: usize, ) -> (i32, i32) { let mut factors: [i32; 2] = Default::default(); unsafe { sb::C_SkYUVAInfo_PlaneSubsamplingFactors( plane, subsampling.into_native(), plane_index.try_into().unwrap(), &mut factors[0], ) }; #[allow(clippy::tuple_array_conversions)] (factors[0], factors[1]) } /// Given image dimensions, a planer configuration, subsampling, and origin, determine the expected /// size of each plane. Returns the expected planes. The input image dimensions are as displayed /// (after the planes have been transformed to the intended display orientation). The plane /// dimensions are output as the planes are stored in memory (may be rotated from image dimensions). pub fn plane_dimensions( image_dimensions: impl Into<ISize>, config: PlaneConfig, subsampling: Subsampling, origin: EncodedOrigin, ) -> Vec<ISize> { let mut plane_dimensions = [ISize::default(); YUVAInfo::MAX_PLANES]; let size: usize = unsafe { SkYUVAInfo::PlaneDimensions( image_dimensions.into().into_native(), config, subsampling.into_native(), origin.into_native(), plane_dimensions.native_mut().as_mut_ptr(), ) } .try_into() .unwrap(); plane_dimensions[0..size].to_vec() } /// Number of planes for a given [PlaneConfig]. pub fn num_planes(config: PlaneConfig) -> usize { unsafe { sb::C_SkYUVAInfo_NumPlanes(config) } .try_into() .unwrap() } /// Number of Y, U, V, A channels in the ith plane for a given [PlaneConfig] (or [None] if i is /// invalid). pub fn num_channels_in_plane(config: PlaneConfig, i: usize) -> Option<usize> { (i < num_planes(config)).if_true_then_some(|| { unsafe { sb::C_SkYUVAInfo_NumChannelsInPlane(config, i.try_into().unwrap()) } .try_into() .unwrap() }) } /// Does the [PlaneConfig] have alpha values? pub fn has_alpha(config: PlaneConfig) -> bool { unsafe { sb::SkYUVAInfo_HasAlpha(config) } } impl Default for YUVAInfo { fn default() -> Self { Self::construct(|yi| unsafe { sb::C_SkYUVAInfo_Construct(yi) }) } } impl NativePartialEq for YUVAInfo { fn eq(&self, rhs: &Self) -> bool { unsafe { sb::C_SkYUVAInfo_equals(self.native(), rhs.native()) } } } impl fmt::Debug for YUVAInfo { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("YUVAInfo") .field("dimensions", &self.dimensions()) .field("plane_config", &self.plane_config()) .field("subsampling", &self.subsampling()) .field("yuv_color_space", &self.yuv_color_space()) .field("origin", &self.origin()) .field("siting_xy", &self.siting_xy()) .finish() } } impl YUVAInfo { pub const MAX_PLANES: usize = sb::SkYUVAInfo_kMaxPlanes as _; /// `dimensions` should specify the size of the full resolution image (after planes have been /// oriented to how the image is displayed as indicated by `origin`). pub fn new( dimensions: impl Into<ISize>, config: PlaneConfig, subsampling: Subsampling, color_space: image_info::YUVColorSpace, origin: impl Into<Option<EncodedOrigin>>, siting_xy: impl Into<Option<(Siting, Siting)>>, ) -> Option<Self> { let origin = origin.into().unwrap_or(EncodedOrigin::TopLeft); let (siting_x, siting_y) = siting_xy .into() .unwrap_or((Siting::Centered, Siting::Centered)); let n = unsafe { SkYUVAInfo::new( dimensions.into().into_native(), config, subsampling.into_native(), color_space, origin.into_native(), siting_x, siting_y, ) }; Self::native_is_valid(&n).if_true_then_some(|| Self::from_native_c(n)) } pub fn plane_config(&self) -> PlaneConfig { self.native().fPlaneConfig } pub fn subsampling(&self) -> Subsampling { Subsampling::from_native_c(self.native().fSubsampling) } pub fn plane_subsampling_factors(&self, plane_index: usize) -> (i32, i32) { plane_subsampling_factors(self.plane_config(), self.subsampling(), plane_index) } /// Dimensions of the full resolution image (after planes have been oriented to how the image /// is displayed as indicated by fOrigin). pub fn dimensions(&self) -> ISize { ISize::from_native_c(self.native().fDimensions) } pub fn width(&self) -> i32 { self.dimensions().width } pub fn height(&self) -> i32 { self.dimensions().height } pub fn yuv_color_space(&self) -> image_info::YUVColorSpace { self.native().fYUVColorSpace } pub fn siting_xy(&self) -> (Siting, Siting) { let n = self.native(); (n.fSitingX, n.fSitingY) } pub fn origin(&self) -> EncodedOrigin { EncodedOrigin::from_native_c(self.native().fOrigin) } pub fn origin_matrix(&self) -> Matrix { self.origin().to_matrix((self.width(), self.height())) }
pub fn has_alpha(&self) -> bool { has_alpha(self.plane_config()) } /// Returns the dimensions for each plane. Dimensions are as stored in memory, before /// transformation to image display space as indicated by [origin(&self)]. pub fn plane_dimensions(&self) -> Vec<ISize> { self::plane_dimensions( self.dimensions(), self.plane_config(), self.subsampling(), self.origin(), ) } /// Given a per-plane row bytes, determine size to allocate for all planes. Optionally retrieves /// the per-plane byte sizes in planeSizes if not `None`. If total size overflows will return /// `SIZE_MAX` and set all planeSizes to `SIZE_MAX`. pub fn compute_total_bytes( &self, row_bytes: &[usize; Self::MAX_PLANES], plane_sizes: Option<&mut [usize; Self::MAX_PLANES]>, ) -> usize { unsafe { self.native().computeTotalBytes( row_bytes.as_ptr(), plane_sizes .map(|v| v.as_mut_ptr()) .unwrap_or(ptr::null_mut()), ) } } pub fn num_planes(&self) -> usize { num_planes(self.plane_config()) } pub fn num_channels_in_plane(&self, i: usize) -> Option<usize> { num_channels_in_plane(self.plane_config(), i) } /// Returns a [YUVAInfo] that is identical to this one but with the passed [Subsampling]. If the /// passed [Subsampling] is not [Subsampling::S444] and this info's [PlaneConfig] is not /// compatible with chroma subsampling (because Y is in the same plane as UV) then the result /// will be `None`. pub fn with_subsampling(&self, subsampling: Subsampling) -> Option<Self> { Self::try_construct(|info| unsafe { sb::C_SkYUVAInfo_makeSubsampling(self.native(), subsampling.into_native(), info); Self::native_is_valid(&*info) }) } /// Returns a [YUVAInfo] that is identical to this one but with the passed dimensions. If the /// passed dimensions is empty then the result will be `None`. pub fn with_dimensions(&self, dimensions: impl Into<ISize>) -> Option<Self> { Self::try_construct(|info| unsafe { sb::C_SkYUVAInfo_makeDimensions(self.native(), dimensions.into().native(), info); Self::native_is_valid(&*info) }) } pub(crate) fn native_is_valid(info: &SkYUVAInfo) -> bool { info.fPlaneConfig!= PlaneConfig::Unknown } }
random_line_split
weapon.rs
use std::{ path::Path, sync::{Arc, Mutex}, }; use crate::{ projectile::{ Projectile, ProjectileKind, }, actor::Actor, HandleFromSelf, GameTime, level::CleanUp, }; use rg3d::{ physics::{RayCastOptions, Physics}, sound::{ source::Source, buffer::BufferKind, context::Context, }, engine::resource_manager::ResourceManager, resource::{ model::Model, }, scene::{ SceneInterfaceMut, node::Node, Scene, graph::Graph, light::{ LightKind, LightBuilder, PointLight, }, base::{BaseBuilder, AsBase}, }, core::{ pool::{ Pool, PoolIterator, PoolIteratorMut, Handle, }, color::Color, visitor::{ Visit, VisitResult, Visitor, }, math::{vec3::Vec3, ray::Ray}, }, }; #[derive(Copy, Clone, PartialEq, Eq)] pub enum WeaponKind { M4, Ak47, PlasmaRifle, } impl WeaponKind { pub fn id(&self) -> u32 { match self { WeaponKind::M4 => 0, WeaponKind::Ak47 => 1, WeaponKind::PlasmaRifle => 2 } } pub fn new(id: u32) -> Result<Self, String> { match id { 0 => Ok(WeaponKind::M4), 1 => Ok(WeaponKind::Ak47), 2 => Ok(WeaponKind::PlasmaRifle), _ => return Err(format!("unknown weapon kind {}", id)) } } } pub struct Weapon { self_handle: Handle<Weapon>, kind: WeaponKind, model: Handle<Node>, laser_dot: Handle<Node>, shot_point: Handle<Node>, offset: Vec3, dest_offset: Vec3, last_shot_time: f64, shot_position: Vec3, owner: Handle<Actor>, ammo: u32, definition: &'static WeaponDefinition, } pub struct WeaponDefinition { model: &'static str, shot_sound: &'static str, ammo: u32, } impl HandleFromSelf<Weapon> for Weapon { fn self_handle(&self) -> Handle<Weapon> { self.self_handle } } impl Default for Weapon { fn default() -> Self { Self { self_handle: Default::default(), kind: WeaponKind::M4, laser_dot: Handle::NONE, model: Handle::NONE, offset: Vec3::ZERO, shot_point: Handle::NONE, dest_offset: Vec3::ZERO, last_shot_time: 0.0, shot_position: Vec3::ZERO, owner: Handle::NONE, ammo: 250, definition: Self::get_definition(WeaponKind::M4), } } } impl Visit for Weapon { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; let mut kind_id = self.kind.id(); kind_id.visit("KindId", visitor)?; if visitor.is_reading() { self.kind = WeaponKind::new(kind_id)? } self.definition = Self::get_definition(self.kind); self.self_handle.visit("SelfHandle", visitor)?; self.model.visit("Model", visitor)?; self.laser_dot.visit("LaserDot", visitor)?; self.offset.visit("Offset", visitor)?; self.dest_offset.visit("DestOffset", visitor)?; self.last_shot_time.visit("LastShotTime", visitor)?; self.owner.visit("Owner", visitor)?; self.ammo.visit("Ammo", visitor)?; visitor.leave_region() } } impl Weapon { pub fn get_definition(kind: WeaponKind) -> &'static WeaponDefinition { match kind { WeaponKind::M4 => { static DEFINITION: WeaponDefinition = WeaponDefinition { model: "data/models/m4.FBX", shot_sound: "data/sounds/m4_shot.wav", ammo: 115, }; &DEFINITION } WeaponKind::Ak47 => { static DEFINITION: WeaponDefinition = WeaponDefinition { model: "data/models/ak47.FBX", shot_sound: "data/sounds/m4_shot.wav", ammo: 100, }; &DEFINITION } WeaponKind::PlasmaRifle => { static DEFINITION: WeaponDefinition = WeaponDefinition { model: "data/models/plasma_rifle.FBX", shot_sound: "data/sounds/plasma_shot.wav", ammo: 40, }; &DEFINITION } } } pub fn new(kind: WeaponKind, resource_manager: &mut ResourceManager, scene: &mut Scene) -> Weapon { let definition = Self::get_definition(kind); let model = Model::instantiate( resource_manager.request_model(Path::new(definition.model)).unwrap(), scene).root; let SceneInterfaceMut { graph,.. } = scene.interface_mut(); let laser_dot = graph.add_node(Node::Light( LightBuilder::new(LightKind::Point(PointLight::new(0.5)), BaseBuilder::new()) .with_color(Color::opaque(255, 0, 0)) .cast_shadows(false) .build())); let shot_point = graph.find_by_name(model, "Weapon:ShotPoint"); if shot_point.is_none() { println!("Shot point not found!"); } Weapon { kind, laser_dot, model, shot_point, definition, ammo: definition.ammo, ..Default::default() } } pub fn set_visibility(&self, visibility: bool, graph: &mut Graph) { graph.get_mut(self.model).base_mut().set_visibility(visibility); graph.get_mut(self.laser_dot).base_mut().set_visibility(visibility); } pub fn get_model(&self) -> Handle<Node> { self.model } pub fn update(&mut self, scene: &mut Scene) { let SceneInterfaceMut { graph, physics,.. } = scene.interface_mut(); self.offset.follow(&self.dest_offset, 0.2); self.update_laser_sight(graph, physics); let node = graph.get_mut(self.model); node.base_mut().get_local_transform_mut().set_position(self.offset); self.shot_position = node.base().get_global_position(); } fn get_shot_position(&self, graph: &Graph) -> Vec3 { if self.shot_point.is_some() { graph.get(self.shot_point).base().get_global_position() } else { // Fallback graph.get(self.model).base().get_global_position() } } pub fn get_kind(&self) -> WeaponKind { self.kind } pub fn add_ammo(&mut self, amount: u32) { self.ammo += amount; } fn update_laser_sight(&self, graph: &mut Graph, physics: &Physics) { let mut laser_dot_position = Vec3::ZERO; let model = graph.get(self.model); let begin = model.base().get_global_position(); let end = begin + model.base().get_look_vector().scale(100.0); if let Some(ray) = Ray::from_two_points(&begin, &end) { let mut result = Vec::new(); if physics.ray_cast(&ray, RayCastOptions::default(), &mut result) { let offset = result[0].normal.normalized().unwrap_or_default().scale(0.2); laser_dot_position = result[0].position + offset; } } graph.get_mut(self.laser_dot).base_mut().get_local_transform_mut().set_position(laser_dot_position); } fn play_shot_sound(&self, resource_manager: &mut ResourceManager, sound_context: Arc<Mutex<Context>>) { let mut sound_context = sound_context.lock().unwrap(); let shot_buffer = resource_manager.request_sound_buffer( Path::new(self.definition.shot_sound), BufferKind::Normal).unwrap(); let mut shot_sound = Source::new_spatial(shot_buffer).unwrap(); shot_sound.set_play_once(true); shot_sound.play(); shot_sound.as_spatial_mut().set_position(&self.shot_position); sound_context.add_source(shot_sound); } pub fn get_ammo(&self) -> u32 { self.ammo } pub fn get_owner(&self) -> Handle<Actor> { self.owner } pub fn set_owner(&mut self, owner: Handle<Actor>) { self.owner = owner; } pub fn try_shoot(&mut self, scene: &mut Scene, resource_manager: &mut ResourceManager, sound_context: Arc<Mutex<Context>>, time: GameTime, weapon_velocity: Vec3) -> Option<Projectile> { if self.ammo!= 0 && time.elapsed - self.last_shot_time >= 0.1 { self.ammo -= 1; self.offset = Vec3::new(0.0, 0.0, -0.05); self.last_shot_time = time.elapsed; self.play_shot_sound(resource_manager, sound_context); let (dir, pos) = { let graph = scene.interface().graph; (graph.get(self.model).base().get_look_vector(), self.get_shot_position(graph)) }; match self.kind { WeaponKind::M4 | WeaponKind::Ak47 => { Some(Projectile::new(ProjectileKind::Bullet, resource_manager, scene, dir, pos, self.self_handle, weapon_velocity)) } WeaponKind::PlasmaRifle => { Some(Projectile::new(ProjectileKind::Plasma, resource_manager, scene, dir, pos, self.self_handle, weapon_velocity)) } } } else { None } } } impl CleanUp for Weapon { fn clean_up(&mut self, scene: &mut Scene) { let SceneInterfaceMut { graph,.. } = scene.interface_mut(); graph.remove_node(self.model); graph.remove_node(self.laser_dot); } } pub struct WeaponContainer { pool: Pool<Weapon> } impl WeaponContainer { pub fn new() -> Self { Self { pool: Pool::new() } } pub fn add(&mut self, weapon: Weapon) -> Handle<Weapon> { let handle = self.pool.spawn(weapon); self.pool.borrow_mut(handle).self_handle = handle; handle } pub fn
(&self) -> PoolIterator<Weapon> { self.pool.iter() } pub fn iter_mut(&mut self) -> PoolIteratorMut<Weapon> { self.pool.iter_mut() } pub fn get(&self, handle: Handle<Weapon>) -> &Weapon { self.pool.borrow(handle) } pub fn get_mut(&mut self, handle: Handle<Weapon>) -> &mut Weapon { self.pool.borrow_mut(handle) } pub fn update(&mut self, scene: &mut Scene) { for weapon in self.pool.iter_mut() { weapon.update(scene) } } } impl Visit for WeaponContainer { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.pool.visit("Pool", visitor)?; visitor.leave_region() } }
iter
identifier_name
weapon.rs
use std::{ path::Path, sync::{Arc, Mutex}, }; use crate::{ projectile::{ Projectile, ProjectileKind, }, actor::Actor, HandleFromSelf, GameTime, level::CleanUp, }; use rg3d::{ physics::{RayCastOptions, Physics}, sound::{ source::Source, buffer::BufferKind, context::Context, }, engine::resource_manager::ResourceManager, resource::{ model::Model, }, scene::{ SceneInterfaceMut, node::Node, Scene, graph::Graph, light::{ LightKind, LightBuilder, PointLight, }, base::{BaseBuilder, AsBase}, }, core::{ pool::{ Pool, PoolIterator, PoolIteratorMut, Handle, }, color::Color, visitor::{ Visit, VisitResult, Visitor, }, math::{vec3::Vec3, ray::Ray}, }, }; #[derive(Copy, Clone, PartialEq, Eq)] pub enum WeaponKind { M4, Ak47, PlasmaRifle, } impl WeaponKind { pub fn id(&self) -> u32 { match self { WeaponKind::M4 => 0, WeaponKind::Ak47 => 1, WeaponKind::PlasmaRifle => 2 } } pub fn new(id: u32) -> Result<Self, String> { match id { 0 => Ok(WeaponKind::M4), 1 => Ok(WeaponKind::Ak47), 2 => Ok(WeaponKind::PlasmaRifle), _ => return Err(format!("unknown weapon kind {}", id)) } } } pub struct Weapon { self_handle: Handle<Weapon>, kind: WeaponKind, model: Handle<Node>, laser_dot: Handle<Node>, shot_point: Handle<Node>, offset: Vec3, dest_offset: Vec3, last_shot_time: f64, shot_position: Vec3, owner: Handle<Actor>, ammo: u32, definition: &'static WeaponDefinition, } pub struct WeaponDefinition { model: &'static str, shot_sound: &'static str, ammo: u32, } impl HandleFromSelf<Weapon> for Weapon { fn self_handle(&self) -> Handle<Weapon> { self.self_handle } } impl Default for Weapon { fn default() -> Self { Self { self_handle: Default::default(), kind: WeaponKind::M4, laser_dot: Handle::NONE, model: Handle::NONE, offset: Vec3::ZERO, shot_point: Handle::NONE, dest_offset: Vec3::ZERO, last_shot_time: 0.0, shot_position: Vec3::ZERO, owner: Handle::NONE, ammo: 250, definition: Self::get_definition(WeaponKind::M4), } } } impl Visit for Weapon { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; let mut kind_id = self.kind.id(); kind_id.visit("KindId", visitor)?; if visitor.is_reading() { self.kind = WeaponKind::new(kind_id)? } self.definition = Self::get_definition(self.kind); self.self_handle.visit("SelfHandle", visitor)?; self.model.visit("Model", visitor)?; self.laser_dot.visit("LaserDot", visitor)?; self.offset.visit("Offset", visitor)?; self.dest_offset.visit("DestOffset", visitor)?; self.last_shot_time.visit("LastShotTime", visitor)?; self.owner.visit("Owner", visitor)?; self.ammo.visit("Ammo", visitor)?; visitor.leave_region() } } impl Weapon { pub fn get_definition(kind: WeaponKind) -> &'static WeaponDefinition { match kind { WeaponKind::M4 => { static DEFINITION: WeaponDefinition = WeaponDefinition { model: "data/models/m4.FBX", shot_sound: "data/sounds/m4_shot.wav", ammo: 115, }; &DEFINITION } WeaponKind::Ak47 => { static DEFINITION: WeaponDefinition = WeaponDefinition { model: "data/models/ak47.FBX", shot_sound: "data/sounds/m4_shot.wav", ammo: 100, }; &DEFINITION } WeaponKind::PlasmaRifle => { static DEFINITION: WeaponDefinition = WeaponDefinition { model: "data/models/plasma_rifle.FBX", shot_sound: "data/sounds/plasma_shot.wav", ammo: 40, }; &DEFINITION } } } pub fn new(kind: WeaponKind, resource_manager: &mut ResourceManager, scene: &mut Scene) -> Weapon { let definition = Self::get_definition(kind); let model = Model::instantiate( resource_manager.request_model(Path::new(definition.model)).unwrap(), scene).root; let SceneInterfaceMut { graph,.. } = scene.interface_mut(); let laser_dot = graph.add_node(Node::Light( LightBuilder::new(LightKind::Point(PointLight::new(0.5)), BaseBuilder::new()) .with_color(Color::opaque(255, 0, 0)) .cast_shadows(false) .build())); let shot_point = graph.find_by_name(model, "Weapon:ShotPoint"); if shot_point.is_none() { println!("Shot point not found!"); } Weapon { kind, laser_dot, model, shot_point, definition, ammo: definition.ammo, ..Default::default() } } pub fn set_visibility(&self, visibility: bool, graph: &mut Graph) { graph.get_mut(self.model).base_mut().set_visibility(visibility); graph.get_mut(self.laser_dot).base_mut().set_visibility(visibility); } pub fn get_model(&self) -> Handle<Node> { self.model } pub fn update(&mut self, scene: &mut Scene) { let SceneInterfaceMut { graph, physics,.. } = scene.interface_mut(); self.offset.follow(&self.dest_offset, 0.2); self.update_laser_sight(graph, physics); let node = graph.get_mut(self.model); node.base_mut().get_local_transform_mut().set_position(self.offset); self.shot_position = node.base().get_global_position(); } fn get_shot_position(&self, graph: &Graph) -> Vec3 { if self.shot_point.is_some() { graph.get(self.shot_point).base().get_global_position() } else { // Fallback graph.get(self.model).base().get_global_position() } } pub fn get_kind(&self) -> WeaponKind { self.kind } pub fn add_ammo(&mut self, amount: u32) { self.ammo += amount; } fn update_laser_sight(&self, graph: &mut Graph, physics: &Physics) { let mut laser_dot_position = Vec3::ZERO; let model = graph.get(self.model); let begin = model.base().get_global_position(); let end = begin + model.base().get_look_vector().scale(100.0); if let Some(ray) = Ray::from_two_points(&begin, &end) { let mut result = Vec::new(); if physics.ray_cast(&ray, RayCastOptions::default(), &mut result) { let offset = result[0].normal.normalized().unwrap_or_default().scale(0.2); laser_dot_position = result[0].position + offset; } } graph.get_mut(self.laser_dot).base_mut().get_local_transform_mut().set_position(laser_dot_position); } fn play_shot_sound(&self, resource_manager: &mut ResourceManager, sound_context: Arc<Mutex<Context>>) { let mut sound_context = sound_context.lock().unwrap(); let shot_buffer = resource_manager.request_sound_buffer( Path::new(self.definition.shot_sound), BufferKind::Normal).unwrap(); let mut shot_sound = Source::new_spatial(shot_buffer).unwrap(); shot_sound.set_play_once(true); shot_sound.play(); shot_sound.as_spatial_mut().set_position(&self.shot_position); sound_context.add_source(shot_sound); } pub fn get_ammo(&self) -> u32 { self.ammo } pub fn get_owner(&self) -> Handle<Actor> { self.owner } pub fn set_owner(&mut self, owner: Handle<Actor>) { self.owner = owner; } pub fn try_shoot(&mut self, scene: &mut Scene, resource_manager: &mut ResourceManager, sound_context: Arc<Mutex<Context>>, time: GameTime, weapon_velocity: Vec3) -> Option<Projectile>
Some(Projectile::new(ProjectileKind::Plasma, resource_manager, scene, dir, pos, self.self_handle, weapon_velocity)) } } } else { None } } } impl CleanUp for Weapon { fn clean_up(&mut self, scene: &mut Scene) { let SceneInterfaceMut { graph,.. } = scene.interface_mut(); graph.remove_node(self.model); graph.remove_node(self.laser_dot); } } pub struct WeaponContainer { pool: Pool<Weapon> } impl WeaponContainer { pub fn new() -> Self { Self { pool: Pool::new() } } pub fn add(&mut self, weapon: Weapon) -> Handle<Weapon> { let handle = self.pool.spawn(weapon); self.pool.borrow_mut(handle).self_handle = handle; handle } pub fn iter(&self) -> PoolIterator<Weapon> { self.pool.iter() } pub fn iter_mut(&mut self) -> PoolIteratorMut<Weapon> { self.pool.iter_mut() } pub fn get(&self, handle: Handle<Weapon>) -> &Weapon { self.pool.borrow(handle) } pub fn get_mut(&mut self, handle: Handle<Weapon>) -> &mut Weapon { self.pool.borrow_mut(handle) } pub fn update(&mut self, scene: &mut Scene) { for weapon in self.pool.iter_mut() { weapon.update(scene) } } } impl Visit for WeaponContainer { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.pool.visit("Pool", visitor)?; visitor.leave_region() } }
{ if self.ammo != 0 && time.elapsed - self.last_shot_time >= 0.1 { self.ammo -= 1; self.offset = Vec3::new(0.0, 0.0, -0.05); self.last_shot_time = time.elapsed; self.play_shot_sound(resource_manager, sound_context); let (dir, pos) = { let graph = scene.interface().graph; (graph.get(self.model).base().get_look_vector(), self.get_shot_position(graph)) }; match self.kind { WeaponKind::M4 | WeaponKind::Ak47 => { Some(Projectile::new(ProjectileKind::Bullet, resource_manager, scene, dir, pos, self.self_handle, weapon_velocity)) } WeaponKind::PlasmaRifle => {
identifier_body
weapon.rs
use std::{ path::Path, sync::{Arc, Mutex}, }; use crate::{ projectile::{ Projectile, ProjectileKind, }, actor::Actor, HandleFromSelf, GameTime, level::CleanUp, }; use rg3d::{ physics::{RayCastOptions, Physics}, sound::{ source::Source, buffer::BufferKind, context::Context, }, engine::resource_manager::ResourceManager, resource::{ model::Model, }, scene::{ SceneInterfaceMut, node::Node, Scene, graph::Graph, light::{ LightKind, LightBuilder, PointLight, }, base::{BaseBuilder, AsBase}, }, core::{ pool::{ Pool, PoolIterator, PoolIteratorMut, Handle, }, color::Color, visitor::{ Visit, VisitResult, Visitor, }, math::{vec3::Vec3, ray::Ray}, }, }; #[derive(Copy, Clone, PartialEq, Eq)] pub enum WeaponKind { M4, Ak47, PlasmaRifle, } impl WeaponKind { pub fn id(&self) -> u32 { match self { WeaponKind::M4 => 0, WeaponKind::Ak47 => 1, WeaponKind::PlasmaRifle => 2 } } pub fn new(id: u32) -> Result<Self, String> { match id { 0 => Ok(WeaponKind::M4), 1 => Ok(WeaponKind::Ak47), 2 => Ok(WeaponKind::PlasmaRifle), _ => return Err(format!("unknown weapon kind {}", id)) } } } pub struct Weapon { self_handle: Handle<Weapon>, kind: WeaponKind, model: Handle<Node>, laser_dot: Handle<Node>, shot_point: Handle<Node>, offset: Vec3, dest_offset: Vec3, last_shot_time: f64, shot_position: Vec3, owner: Handle<Actor>, ammo: u32, definition: &'static WeaponDefinition, } pub struct WeaponDefinition { model: &'static str, shot_sound: &'static str, ammo: u32, } impl HandleFromSelf<Weapon> for Weapon { fn self_handle(&self) -> Handle<Weapon> { self.self_handle } } impl Default for Weapon { fn default() -> Self { Self { self_handle: Default::default(), kind: WeaponKind::M4, laser_dot: Handle::NONE, model: Handle::NONE, offset: Vec3::ZERO, shot_point: Handle::NONE, dest_offset: Vec3::ZERO, last_shot_time: 0.0, shot_position: Vec3::ZERO, owner: Handle::NONE, ammo: 250, definition: Self::get_definition(WeaponKind::M4), } } } impl Visit for Weapon { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; let mut kind_id = self.kind.id(); kind_id.visit("KindId", visitor)?; if visitor.is_reading() { self.kind = WeaponKind::new(kind_id)? } self.definition = Self::get_definition(self.kind); self.self_handle.visit("SelfHandle", visitor)?; self.model.visit("Model", visitor)?; self.laser_dot.visit("LaserDot", visitor)?; self.offset.visit("Offset", visitor)?; self.dest_offset.visit("DestOffset", visitor)?; self.last_shot_time.visit("LastShotTime", visitor)?; self.owner.visit("Owner", visitor)?; self.ammo.visit("Ammo", visitor)?; visitor.leave_region() } } impl Weapon { pub fn get_definition(kind: WeaponKind) -> &'static WeaponDefinition { match kind { WeaponKind::M4 => { static DEFINITION: WeaponDefinition = WeaponDefinition { model: "data/models/m4.FBX", shot_sound: "data/sounds/m4_shot.wav", ammo: 115, }; &DEFINITION } WeaponKind::Ak47 => { static DEFINITION: WeaponDefinition = WeaponDefinition { model: "data/models/ak47.FBX", shot_sound: "data/sounds/m4_shot.wav", ammo: 100, }; &DEFINITION } WeaponKind::PlasmaRifle => { static DEFINITION: WeaponDefinition = WeaponDefinition { model: "data/models/plasma_rifle.FBX", shot_sound: "data/sounds/plasma_shot.wav", ammo: 40, }; &DEFINITION } } } pub fn new(kind: WeaponKind, resource_manager: &mut ResourceManager, scene: &mut Scene) -> Weapon { let definition = Self::get_definition(kind); let model = Model::instantiate( resource_manager.request_model(Path::new(definition.model)).unwrap(), scene).root; let SceneInterfaceMut { graph,.. } = scene.interface_mut(); let laser_dot = graph.add_node(Node::Light( LightBuilder::new(LightKind::Point(PointLight::new(0.5)), BaseBuilder::new()) .with_color(Color::opaque(255, 0, 0)) .cast_shadows(false) .build())); let shot_point = graph.find_by_name(model, "Weapon:ShotPoint"); if shot_point.is_none() { println!("Shot point not found!"); } Weapon { kind, laser_dot, model, shot_point, definition, ammo: definition.ammo, ..Default::default() } } pub fn set_visibility(&self, visibility: bool, graph: &mut Graph) { graph.get_mut(self.model).base_mut().set_visibility(visibility); graph.get_mut(self.laser_dot).base_mut().set_visibility(visibility); } pub fn get_model(&self) -> Handle<Node> { self.model } pub fn update(&mut self, scene: &mut Scene) { let SceneInterfaceMut { graph, physics,.. } = scene.interface_mut(); self.offset.follow(&self.dest_offset, 0.2); self.update_laser_sight(graph, physics); let node = graph.get_mut(self.model); node.base_mut().get_local_transform_mut().set_position(self.offset); self.shot_position = node.base().get_global_position(); } fn get_shot_position(&self, graph: &Graph) -> Vec3 { if self.shot_point.is_some() { graph.get(self.shot_point).base().get_global_position() } else { // Fallback graph.get(self.model).base().get_global_position() } } pub fn get_kind(&self) -> WeaponKind { self.kind } pub fn add_ammo(&mut self, amount: u32) { self.ammo += amount; } fn update_laser_sight(&self, graph: &mut Graph, physics: &Physics) { let mut laser_dot_position = Vec3::ZERO; let model = graph.get(self.model); let begin = model.base().get_global_position(); let end = begin + model.base().get_look_vector().scale(100.0); if let Some(ray) = Ray::from_two_points(&begin, &end) { let mut result = Vec::new(); if physics.ray_cast(&ray, RayCastOptions::default(), &mut result) { let offset = result[0].normal.normalized().unwrap_or_default().scale(0.2); laser_dot_position = result[0].position + offset; } } graph.get_mut(self.laser_dot).base_mut().get_local_transform_mut().set_position(laser_dot_position); } fn play_shot_sound(&self, resource_manager: &mut ResourceManager, sound_context: Arc<Mutex<Context>>) { let mut sound_context = sound_context.lock().unwrap(); let shot_buffer = resource_manager.request_sound_buffer( Path::new(self.definition.shot_sound), BufferKind::Normal).unwrap(); let mut shot_sound = Source::new_spatial(shot_buffer).unwrap(); shot_sound.set_play_once(true); shot_sound.play(); shot_sound.as_spatial_mut().set_position(&self.shot_position); sound_context.add_source(shot_sound); } pub fn get_ammo(&self) -> u32 { self.ammo } pub fn get_owner(&self) -> Handle<Actor> { self.owner } pub fn set_owner(&mut self, owner: Handle<Actor>) { self.owner = owner; } pub fn try_shoot(&mut self, scene: &mut Scene, resource_manager: &mut ResourceManager, sound_context: Arc<Mutex<Context>>, time: GameTime, weapon_velocity: Vec3) -> Option<Projectile> { if self.ammo!= 0 && time.elapsed - self.last_shot_time >= 0.1 { self.ammo -= 1; self.offset = Vec3::new(0.0, 0.0, -0.05); self.last_shot_time = time.elapsed; self.play_shot_sound(resource_manager, sound_context); let (dir, pos) = { let graph = scene.interface().graph; (graph.get(self.model).base().get_look_vector(), self.get_shot_position(graph)) }; match self.kind { WeaponKind::M4 | WeaponKind::Ak47 => { Some(Projectile::new(ProjectileKind::Bullet, resource_manager, scene, dir, pos, self.self_handle, weapon_velocity)) } WeaponKind::PlasmaRifle => { Some(Projectile::new(ProjectileKind::Plasma, resource_manager, scene, dir, pos, self.self_handle, weapon_velocity)) } } } else { None } } } impl CleanUp for Weapon { fn clean_up(&mut self, scene: &mut Scene) { let SceneInterfaceMut { graph,.. } = scene.interface_mut(); graph.remove_node(self.model); graph.remove_node(self.laser_dot); } } pub struct WeaponContainer { pool: Pool<Weapon> } impl WeaponContainer { pub fn new() -> Self { Self { pool: Pool::new() } } pub fn add(&mut self, weapon: Weapon) -> Handle<Weapon> { let handle = self.pool.spawn(weapon); self.pool.borrow_mut(handle).self_handle = handle; handle } pub fn iter(&self) -> PoolIterator<Weapon> { self.pool.iter() } pub fn iter_mut(&mut self) -> PoolIteratorMut<Weapon> { self.pool.iter_mut() } pub fn get(&self, handle: Handle<Weapon>) -> &Weapon { self.pool.borrow(handle) } pub fn get_mut(&mut self, handle: Handle<Weapon>) -> &mut Weapon { self.pool.borrow_mut(handle) } pub fn update(&mut self, scene: &mut Scene) { for weapon in self.pool.iter_mut() {
} impl Visit for WeaponContainer { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.pool.visit("Pool", visitor)?; visitor.leave_region() } }
weapon.update(scene) } }
random_line_split
weapon.rs
use std::{ path::Path, sync::{Arc, Mutex}, }; use crate::{ projectile::{ Projectile, ProjectileKind, }, actor::Actor, HandleFromSelf, GameTime, level::CleanUp, }; use rg3d::{ physics::{RayCastOptions, Physics}, sound::{ source::Source, buffer::BufferKind, context::Context, }, engine::resource_manager::ResourceManager, resource::{ model::Model, }, scene::{ SceneInterfaceMut, node::Node, Scene, graph::Graph, light::{ LightKind, LightBuilder, PointLight, }, base::{BaseBuilder, AsBase}, }, core::{ pool::{ Pool, PoolIterator, PoolIteratorMut, Handle, }, color::Color, visitor::{ Visit, VisitResult, Visitor, }, math::{vec3::Vec3, ray::Ray}, }, }; #[derive(Copy, Clone, PartialEq, Eq)] pub enum WeaponKind { M4, Ak47, PlasmaRifle, } impl WeaponKind { pub fn id(&self) -> u32 { match self { WeaponKind::M4 => 0, WeaponKind::Ak47 => 1, WeaponKind::PlasmaRifle => 2 } } pub fn new(id: u32) -> Result<Self, String> { match id { 0 => Ok(WeaponKind::M4), 1 => Ok(WeaponKind::Ak47), 2 => Ok(WeaponKind::PlasmaRifle), _ => return Err(format!("unknown weapon kind {}", id)) } } } pub struct Weapon { self_handle: Handle<Weapon>, kind: WeaponKind, model: Handle<Node>, laser_dot: Handle<Node>, shot_point: Handle<Node>, offset: Vec3, dest_offset: Vec3, last_shot_time: f64, shot_position: Vec3, owner: Handle<Actor>, ammo: u32, definition: &'static WeaponDefinition, } pub struct WeaponDefinition { model: &'static str, shot_sound: &'static str, ammo: u32, } impl HandleFromSelf<Weapon> for Weapon { fn self_handle(&self) -> Handle<Weapon> { self.self_handle } } impl Default for Weapon { fn default() -> Self { Self { self_handle: Default::default(), kind: WeaponKind::M4, laser_dot: Handle::NONE, model: Handle::NONE, offset: Vec3::ZERO, shot_point: Handle::NONE, dest_offset: Vec3::ZERO, last_shot_time: 0.0, shot_position: Vec3::ZERO, owner: Handle::NONE, ammo: 250, definition: Self::get_definition(WeaponKind::M4), } } } impl Visit for Weapon { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; let mut kind_id = self.kind.id(); kind_id.visit("KindId", visitor)?; if visitor.is_reading() { self.kind = WeaponKind::new(kind_id)? } self.definition = Self::get_definition(self.kind); self.self_handle.visit("SelfHandle", visitor)?; self.model.visit("Model", visitor)?; self.laser_dot.visit("LaserDot", visitor)?; self.offset.visit("Offset", visitor)?; self.dest_offset.visit("DestOffset", visitor)?; self.last_shot_time.visit("LastShotTime", visitor)?; self.owner.visit("Owner", visitor)?; self.ammo.visit("Ammo", visitor)?; visitor.leave_region() } } impl Weapon { pub fn get_definition(kind: WeaponKind) -> &'static WeaponDefinition { match kind { WeaponKind::M4 => { static DEFINITION: WeaponDefinition = WeaponDefinition { model: "data/models/m4.FBX", shot_sound: "data/sounds/m4_shot.wav", ammo: 115, }; &DEFINITION } WeaponKind::Ak47 => { static DEFINITION: WeaponDefinition = WeaponDefinition { model: "data/models/ak47.FBX", shot_sound: "data/sounds/m4_shot.wav", ammo: 100, }; &DEFINITION } WeaponKind::PlasmaRifle => { static DEFINITION: WeaponDefinition = WeaponDefinition { model: "data/models/plasma_rifle.FBX", shot_sound: "data/sounds/plasma_shot.wav", ammo: 40, }; &DEFINITION } } } pub fn new(kind: WeaponKind, resource_manager: &mut ResourceManager, scene: &mut Scene) -> Weapon { let definition = Self::get_definition(kind); let model = Model::instantiate( resource_manager.request_model(Path::new(definition.model)).unwrap(), scene).root; let SceneInterfaceMut { graph,.. } = scene.interface_mut(); let laser_dot = graph.add_node(Node::Light( LightBuilder::new(LightKind::Point(PointLight::new(0.5)), BaseBuilder::new()) .with_color(Color::opaque(255, 0, 0)) .cast_shadows(false) .build())); let shot_point = graph.find_by_name(model, "Weapon:ShotPoint"); if shot_point.is_none() { println!("Shot point not found!"); } Weapon { kind, laser_dot, model, shot_point, definition, ammo: definition.ammo, ..Default::default() } } pub fn set_visibility(&self, visibility: bool, graph: &mut Graph) { graph.get_mut(self.model).base_mut().set_visibility(visibility); graph.get_mut(self.laser_dot).base_mut().set_visibility(visibility); } pub fn get_model(&self) -> Handle<Node> { self.model } pub fn update(&mut self, scene: &mut Scene) { let SceneInterfaceMut { graph, physics,.. } = scene.interface_mut(); self.offset.follow(&self.dest_offset, 0.2); self.update_laser_sight(graph, physics); let node = graph.get_mut(self.model); node.base_mut().get_local_transform_mut().set_position(self.offset); self.shot_position = node.base().get_global_position(); } fn get_shot_position(&self, graph: &Graph) -> Vec3 { if self.shot_point.is_some() { graph.get(self.shot_point).base().get_global_position() } else { // Fallback graph.get(self.model).base().get_global_position() } } pub fn get_kind(&self) -> WeaponKind { self.kind } pub fn add_ammo(&mut self, amount: u32) { self.ammo += amount; } fn update_laser_sight(&self, graph: &mut Graph, physics: &Physics) { let mut laser_dot_position = Vec3::ZERO; let model = graph.get(self.model); let begin = model.base().get_global_position(); let end = begin + model.base().get_look_vector().scale(100.0); if let Some(ray) = Ray::from_two_points(&begin, &end) { let mut result = Vec::new(); if physics.ray_cast(&ray, RayCastOptions::default(), &mut result) { let offset = result[0].normal.normalized().unwrap_or_default().scale(0.2); laser_dot_position = result[0].position + offset; } } graph.get_mut(self.laser_dot).base_mut().get_local_transform_mut().set_position(laser_dot_position); } fn play_shot_sound(&self, resource_manager: &mut ResourceManager, sound_context: Arc<Mutex<Context>>) { let mut sound_context = sound_context.lock().unwrap(); let shot_buffer = resource_manager.request_sound_buffer( Path::new(self.definition.shot_sound), BufferKind::Normal).unwrap(); let mut shot_sound = Source::new_spatial(shot_buffer).unwrap(); shot_sound.set_play_once(true); shot_sound.play(); shot_sound.as_spatial_mut().set_position(&self.shot_position); sound_context.add_source(shot_sound); } pub fn get_ammo(&self) -> u32 { self.ammo } pub fn get_owner(&self) -> Handle<Actor> { self.owner } pub fn set_owner(&mut self, owner: Handle<Actor>) { self.owner = owner; } pub fn try_shoot(&mut self, scene: &mut Scene, resource_manager: &mut ResourceManager, sound_context: Arc<Mutex<Context>>, time: GameTime, weapon_velocity: Vec3) -> Option<Projectile> { if self.ammo!= 0 && time.elapsed - self.last_shot_time >= 0.1 { self.ammo -= 1; self.offset = Vec3::new(0.0, 0.0, -0.05); self.last_shot_time = time.elapsed; self.play_shot_sound(resource_manager, sound_context); let (dir, pos) = { let graph = scene.interface().graph; (graph.get(self.model).base().get_look_vector(), self.get_shot_position(graph)) }; match self.kind { WeaponKind::M4 | WeaponKind::Ak47 =>
WeaponKind::PlasmaRifle => { Some(Projectile::new(ProjectileKind::Plasma, resource_manager, scene, dir, pos, self.self_handle, weapon_velocity)) } } } else { None } } } impl CleanUp for Weapon { fn clean_up(&mut self, scene: &mut Scene) { let SceneInterfaceMut { graph,.. } = scene.interface_mut(); graph.remove_node(self.model); graph.remove_node(self.laser_dot); } } pub struct WeaponContainer { pool: Pool<Weapon> } impl WeaponContainer { pub fn new() -> Self { Self { pool: Pool::new() } } pub fn add(&mut self, weapon: Weapon) -> Handle<Weapon> { let handle = self.pool.spawn(weapon); self.pool.borrow_mut(handle).self_handle = handle; handle } pub fn iter(&self) -> PoolIterator<Weapon> { self.pool.iter() } pub fn iter_mut(&mut self) -> PoolIteratorMut<Weapon> { self.pool.iter_mut() } pub fn get(&self, handle: Handle<Weapon>) -> &Weapon { self.pool.borrow(handle) } pub fn get_mut(&mut self, handle: Handle<Weapon>) -> &mut Weapon { self.pool.borrow_mut(handle) } pub fn update(&mut self, scene: &mut Scene) { for weapon in self.pool.iter_mut() { weapon.update(scene) } } } impl Visit for WeaponContainer { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.pool.visit("Pool", visitor)?; visitor.leave_region() } }
{ Some(Projectile::new(ProjectileKind::Bullet, resource_manager, scene, dir, pos, self.self_handle, weapon_velocity)) }
conditional_block
package.rs
//! An interpreter for the rust-installer package format. Responsible //! for installing from a directory or tarball to an installation //! prefix, represented by a `Components` instance. use crate::dist::component::components::*; use crate::dist::component::transaction::*; use crate::dist::temp; use crate::errors::*; use crate::utils::notifications::Notification; use crate::utils::utils; use std::collections::HashSet; use std::fmt; use std::io::Read; use std::path::{Path, PathBuf}; /// The current metadata revision used by rust-installer pub const INSTALLER_VERSION: &str = "3"; pub const VERSION_FILE: &str = "rust-installer-version"; pub trait Package: fmt::Debug { fn contains(&self, component: &str, short_name: Option<&str>) -> bool; fn install<'a>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'a>, ) -> Result<Transaction<'a>>; fn components(&self) -> Vec<String>; } #[derive(Debug)] pub struct DirectoryPackage { path: PathBuf, components: HashSet<String>, copy: bool, } impl DirectoryPackage { pub fn new(path: PathBuf, copy: bool) -> Result<Self> { validate_installer_version(&path)?; let content = utils::read_file("package components", &path.join("components"))?; let components = content .lines() .map(std::borrow::ToOwned::to_owned) .collect(); Ok(DirectoryPackage { path, components, copy, }) } } fn validate_installer_version(path: &Path) -> Result<()> { let file = utils::read_file("installer version", &path.join(VERSION_FILE))?; let v = file.trim(); if v == INSTALLER_VERSION { Ok(()) } else { Err(ErrorKind::BadInstallerVersion(v.to_owned()).into()) } } impl Package for DirectoryPackage { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.components.contains(component) || if let Some(n) = short_name { self.components.contains(n) } else { false } } fn install<'a>( &self, target: &Components, name: &str, short_name: Option<&str>, tx: Transaction<'a>, ) -> Result<Transaction<'a>> { let actual_name = if self.components.contains(name) { name } else if let Some(n) = short_name { n } else { name }; let root = self.path.join(actual_name); let manifest = utils::read_file("package manifest", &root.join("manifest.in"))?; let mut builder = target.add(name, tx); for l in manifest.lines() { let part = ComponentPart::decode(l) .ok_or_else(|| ErrorKind::CorruptComponent(name.to_owned()))?; let path = part.1; let src_path = root.join(&path); match &*part.0 { "file" => { if self.copy { builder.copy_file(path.clone(), &src_path)? } else { builder.move_file(path.clone(), &src_path)? } } "dir" => { if self.copy { builder.copy_dir(path.clone(), &src_path)? } else { builder.move_dir(path.clone(), &src_path)? } } _ => return Err(ErrorKind::CorruptComponent(name.to_owned()).into()), } set_file_perms(&target.prefix().path().join(path), &src_path)?; } let tx = builder.finish()?; Ok(tx) } fn components(&self) -> Vec<String> { self.components.iter().cloned().collect() } } // On Unix we need to set up the file permissions correctly so // binaries are executable and directories readable. This shouldn't be // necessary: the source files *should* have the right permissions, // but due to rust-lang/rust#25479 they don't. #[cfg(unix)] fn set_file_perms(dest_path: &Path, src_path: &Path) -> Result<()> { use std::fs::{self, Metadata}; use std::os::unix::fs::PermissionsExt; use walkdir::WalkDir; // Compute whether this entry needs the X bit fn needs_x(meta: &Metadata) -> bool { meta.is_dir() || // Directories need it meta.permissions().mode() & 0o700 == 0o700 // If it is rwx for the user, it gets the X bit } // By convention, anything in the bin/ directory of the package is a binary let is_bin = if let Some(p) = src_path.parent() { p.ends_with("bin") } else { false }; let is_dir = utils::is_directory(dest_path); if is_dir { // Walk the directory setting everything for entry in WalkDir::new(dest_path) { let entry = entry.chain_err(|| ErrorKind::ComponentDirPermissionsFailed)?; let meta = entry .metadata() .chain_err(|| ErrorKind::ComponentDirPermissionsFailed)?; let mut perm = meta.permissions(); perm.set_mode(if needs_x(&meta) { 0o755 } else { 0o644 }); fs::set_permissions(entry.path(), perm) .chain_err(|| ErrorKind::ComponentFilePermissionsFailed)?; } } else { let meta = fs::metadata(dest_path).chain_err(|| ErrorKind::ComponentFilePermissionsFailed)?; let mut perm = meta.permissions(); perm.set_mode(if is_bin || needs_x(&meta) { 0o755 } else { 0o644 }); fs::set_permissions(dest_path, perm) .chain_err(|| ErrorKind::ComponentFilePermissionsFailed)?; } Ok(()) } #[cfg(windows)] fn set_file_perms(_dest_path: &Path, _src_path: &Path) -> Result<()> { Ok(()) } #[derive(Debug)] pub struct TarPackage<'a>(DirectoryPackage, temp::Dir<'a>); impl<'a> TarPackage<'a> { pub fn new<R: Read>( stream: R, temp_cfg: &'a temp::Cfg, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<Self> { let temp_dir = temp_cfg.new_directory()?; let mut archive = tar::Archive::new(stream); // The rust-installer packages unpack to a directory called // $pkgname-$version-$target. Skip that directory when // unpacking. unpack_without_first_dir(&mut archive, &*temp_dir, notify_handler)?; Ok(TarPackage( DirectoryPackage::new(temp_dir.to_owned(), false)?, temp_dir, )) } } #[cfg(windows)] mod unpacker { use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use threadpool; use crate::utils::notifications::Notification; pub struct Unpacker<'a> { n_files: Arc<AtomicUsize>, pool: threadpool::ThreadPool, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, } impl<'a> Unpacker<'a> { pub fn new(notify_handler: Option<&'a dyn Fn(Notification<'_>)>) -> Self
pub fn handle(&mut self, unpacked: tar::Unpacked) { if let tar::Unpacked::File(f) = unpacked { self.n_files.fetch_add(1, Ordering::Relaxed); let n_files = self.n_files.clone(); self.pool.execute(move || { drop(f); n_files.fetch_sub(1, Ordering::Relaxed); }); } } } impl<'a> Drop for Unpacker<'a> { fn drop(&mut self) { // Some explanation is in order. Even though the tar we are reading from (if // any) will have had its FileWithProgress download tracking // completed before we hit drop, that is not true if we are unwinding due to a // failure, where the logical ownership of the progress bar is // ambiguous, and as the tracker itself is abstracted out behind // notifications etc we cannot just query for that. So: we assume no // more reads of the underlying tar will take place: either the // error unwinding will stop reads, or we completed; either way, we // notify finished to the tracker to force a reset to zero; we set // the units to files, show our progress, and set our units back // afterwards. The largest archives today - rust docs - have ~20k // items, and the download tracker's progress is confounded with // actual handling of data today, we synthesis a data buffer and // pretend to have bytes to deliver. self.notify_handler .map(|handler| handler(Notification::DownloadFinished)); self.notify_handler .map(|handler| handler(Notification::DownloadPushUnits("handles"))); let mut prev_files = self.n_files.load(Ordering::Relaxed); self.notify_handler.map(|handler| { handler(Notification::DownloadContentLengthReceived( prev_files as u64, )) }); if prev_files > 50 { println!("Closing {} deferred file handles", prev_files); } let buf: Vec<u8> = vec![0; prev_files]; assert!(32767 > prev_files); let mut current_files = prev_files; while current_files!= 0 { use std::thread::sleep; sleep(std::time::Duration::from_millis(100)); prev_files = current_files; current_files = self.n_files.load(Ordering::Relaxed); let step_count = prev_files - current_files; self.notify_handler.map(|handler| { handler(Notification::DownloadDataReceived(&buf[0..step_count])) }); } self.pool.join(); self.notify_handler .map(|handler| handler(Notification::DownloadFinished)); self.notify_handler .map(|handler| handler(Notification::DownloadPopUnits)); } } } #[cfg(not(windows))] mod unpacker { use crate::utils::notifications::Notification; pub struct Unpacker {} impl Unpacker { pub fn new<'a>(_notify_handler: Option<&'a dyn Fn(Notification<'_>)>) -> Unpacker { Unpacker {} } pub fn handle(&mut self, _unpacked: tar::Unpacked) {} } } fn unpack_without_first_dir<'a, R: Read>( archive: &mut tar::Archive<R>, path: &Path, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<()> { let mut unpacker = unpacker::Unpacker::new(notify_handler); let entries = archive .entries() .chain_err(|| ErrorKind::ExtractingPackage)?; let mut checked_parents: HashSet<PathBuf> = HashSet::new(); for entry in entries { let mut entry = entry.chain_err(|| ErrorKind::ExtractingPackage)?; let relpath = { let path = entry.path(); let path = path.chain_err(|| ErrorKind::ExtractingPackage)?; path.into_owned() }; let mut components = relpath.components(); // Throw away the first path component components.next(); let full_path = path.join(&components.as_path()); // Create the full path to the entry if it does not exist already if let Some(parent) = full_path.parent() { if!checked_parents.contains(parent) { checked_parents.insert(parent.to_owned()); // It would be nice to optimise this stat out, but the tar could be like so: // a/deep/file.txt // a/file.txt // which would require tracking the segments rather than a simple hash. // Until profile shows that one stat per dir is a problem (vs one stat per file) // leave till later. if!parent.exists() { std::fs::create_dir_all(&parent).chain_err(|| ErrorKind::ExtractingPackage)? } } } entry.set_preserve_mtime(false); entry .unpack(&full_path) .map(|unpacked| unpacker.handle(unpacked)) .chain_err(|| ErrorKind::ExtractingPackage)?; } Ok(()) } impl<'a> Package for TarPackage<'a> { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.0.contains(component, short_name) } fn install<'b>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'b>, ) -> Result<Transaction<'b>> { self.0.install(target, component, short_name, tx) } fn components(&self) -> Vec<String> { self.0.components() } } #[derive(Debug)] pub struct TarGzPackage<'a>(TarPackage<'a>); impl<'a> TarGzPackage<'a> { pub fn new<R: Read>( stream: R, temp_cfg: &'a temp::Cfg, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<Self> { let stream = flate2::read::GzDecoder::new(stream); Ok(TarGzPackage(TarPackage::new( stream, temp_cfg, notify_handler, )?)) } } impl<'a> Package for TarGzPackage<'a> { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.0.contains(component, short_name) } fn install<'b>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'b>, ) -> Result<Transaction<'b>> { self.0.install(target, component, short_name, tx) } fn components(&self) -> Vec<String> { self.0.components() } } #[derive(Debug)] pub struct TarXzPackage<'a>(TarPackage<'a>); impl<'a> TarXzPackage<'a> { pub fn new<R: Read>( stream: R, temp_cfg: &'a temp::Cfg, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<Self> { let stream = xz2::read::XzDecoder::new(stream); Ok(TarXzPackage(TarPackage::new( stream, temp_cfg, notify_handler, )?)) } } impl<'a> Package for TarXzPackage<'a> { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.0.contains(component, short_name) } fn install<'b>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'b>, ) -> Result<Transaction<'b>> { self.0.install(target, component, short_name, tx) } fn components(&self) -> Vec<String> { self.0.components() } }
{ // Defaults to hardware thread count threads; this is suitable for // our needs as IO bound operations tend to show up as write latencies // rather than close latencies, so we don't need to look at // more threads to get more IO dispatched at this stage in the process. let pool = threadpool::Builder::new() .thread_name("CloseHandle".into()) .build(); Unpacker { n_files: Arc::new(AtomicUsize::new(0)), pool: pool, notify_handler: notify_handler, } }
identifier_body
package.rs
//! An interpreter for the rust-installer package format. Responsible //! for installing from a directory or tarball to an installation //! prefix, represented by a `Components` instance. use crate::dist::component::components::*; use crate::dist::component::transaction::*; use crate::dist::temp; use crate::errors::*; use crate::utils::notifications::Notification; use crate::utils::utils; use std::collections::HashSet; use std::fmt; use std::io::Read; use std::path::{Path, PathBuf}; /// The current metadata revision used by rust-installer pub const INSTALLER_VERSION: &str = "3"; pub const VERSION_FILE: &str = "rust-installer-version"; pub trait Package: fmt::Debug { fn contains(&self, component: &str, short_name: Option<&str>) -> bool; fn install<'a>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'a>, ) -> Result<Transaction<'a>>; fn components(&self) -> Vec<String>; } #[derive(Debug)] pub struct DirectoryPackage { path: PathBuf, components: HashSet<String>, copy: bool, } impl DirectoryPackage { pub fn new(path: PathBuf, copy: bool) -> Result<Self> { validate_installer_version(&path)?; let content = utils::read_file("package components", &path.join("components"))?; let components = content .lines() .map(std::borrow::ToOwned::to_owned) .collect(); Ok(DirectoryPackage { path, components, copy, }) } } fn validate_installer_version(path: &Path) -> Result<()> { let file = utils::read_file("installer version", &path.join(VERSION_FILE))?; let v = file.trim(); if v == INSTALLER_VERSION { Ok(()) } else { Err(ErrorKind::BadInstallerVersion(v.to_owned()).into()) } } impl Package for DirectoryPackage { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.components.contains(component) || if let Some(n) = short_name { self.components.contains(n) } else { false } } fn install<'a>( &self, target: &Components, name: &str, short_name: Option<&str>, tx: Transaction<'a>, ) -> Result<Transaction<'a>> { let actual_name = if self.components.contains(name) { name } else if let Some(n) = short_name { n } else { name }; let root = self.path.join(actual_name); let manifest = utils::read_file("package manifest", &root.join("manifest.in"))?; let mut builder = target.add(name, tx); for l in manifest.lines() { let part = ComponentPart::decode(l) .ok_or_else(|| ErrorKind::CorruptComponent(name.to_owned()))?; let path = part.1; let src_path = root.join(&path); match &*part.0 { "file" => { if self.copy { builder.copy_file(path.clone(), &src_path)? } else { builder.move_file(path.clone(), &src_path)? } } "dir" => { if self.copy { builder.copy_dir(path.clone(), &src_path)? } else { builder.move_dir(path.clone(), &src_path)? } } _ => return Err(ErrorKind::CorruptComponent(name.to_owned()).into()), } set_file_perms(&target.prefix().path().join(path), &src_path)?; } let tx = builder.finish()?; Ok(tx) } fn components(&self) -> Vec<String> { self.components.iter().cloned().collect() } } // On Unix we need to set up the file permissions correctly so // binaries are executable and directories readable. This shouldn't be // necessary: the source files *should* have the right permissions, // but due to rust-lang/rust#25479 they don't. #[cfg(unix)] fn set_file_perms(dest_path: &Path, src_path: &Path) -> Result<()> { use std::fs::{self, Metadata}; use std::os::unix::fs::PermissionsExt; use walkdir::WalkDir; // Compute whether this entry needs the X bit fn needs_x(meta: &Metadata) -> bool { meta.is_dir() || // Directories need it meta.permissions().mode() & 0o700 == 0o700 // If it is rwx for the user, it gets the X bit } // By convention, anything in the bin/ directory of the package is a binary let is_bin = if let Some(p) = src_path.parent() { p.ends_with("bin") } else { false }; let is_dir = utils::is_directory(dest_path); if is_dir { // Walk the directory setting everything for entry in WalkDir::new(dest_path) { let entry = entry.chain_err(|| ErrorKind::ComponentDirPermissionsFailed)?; let meta = entry .metadata() .chain_err(|| ErrorKind::ComponentDirPermissionsFailed)?; let mut perm = meta.permissions(); perm.set_mode(if needs_x(&meta) { 0o755 } else { 0o644 }); fs::set_permissions(entry.path(), perm) .chain_err(|| ErrorKind::ComponentFilePermissionsFailed)?; } } else { let meta = fs::metadata(dest_path).chain_err(|| ErrorKind::ComponentFilePermissionsFailed)?; let mut perm = meta.permissions(); perm.set_mode(if is_bin || needs_x(&meta) { 0o755 } else { 0o644 }); fs::set_permissions(dest_path, perm) .chain_err(|| ErrorKind::ComponentFilePermissionsFailed)?; } Ok(()) } #[cfg(windows)] fn set_file_perms(_dest_path: &Path, _src_path: &Path) -> Result<()> { Ok(()) } #[derive(Debug)] pub struct TarPackage<'a>(DirectoryPackage, temp::Dir<'a>); impl<'a> TarPackage<'a> { pub fn new<R: Read>( stream: R, temp_cfg: &'a temp::Cfg, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<Self> { let temp_dir = temp_cfg.new_directory()?; let mut archive = tar::Archive::new(stream); // The rust-installer packages unpack to a directory called // $pkgname-$version-$target. Skip that directory when // unpacking. unpack_without_first_dir(&mut archive, &*temp_dir, notify_handler)?; Ok(TarPackage( DirectoryPackage::new(temp_dir.to_owned(), false)?, temp_dir, )) } } #[cfg(windows)] mod unpacker { use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use threadpool; use crate::utils::notifications::Notification; pub struct
<'a> { n_files: Arc<AtomicUsize>, pool: threadpool::ThreadPool, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, } impl<'a> Unpacker<'a> { pub fn new(notify_handler: Option<&'a dyn Fn(Notification<'_>)>) -> Self { // Defaults to hardware thread count threads; this is suitable for // our needs as IO bound operations tend to show up as write latencies // rather than close latencies, so we don't need to look at // more threads to get more IO dispatched at this stage in the process. let pool = threadpool::Builder::new() .thread_name("CloseHandle".into()) .build(); Unpacker { n_files: Arc::new(AtomicUsize::new(0)), pool: pool, notify_handler: notify_handler, } } pub fn handle(&mut self, unpacked: tar::Unpacked) { if let tar::Unpacked::File(f) = unpacked { self.n_files.fetch_add(1, Ordering::Relaxed); let n_files = self.n_files.clone(); self.pool.execute(move || { drop(f); n_files.fetch_sub(1, Ordering::Relaxed); }); } } } impl<'a> Drop for Unpacker<'a> { fn drop(&mut self) { // Some explanation is in order. Even though the tar we are reading from (if // any) will have had its FileWithProgress download tracking // completed before we hit drop, that is not true if we are unwinding due to a // failure, where the logical ownership of the progress bar is // ambiguous, and as the tracker itself is abstracted out behind // notifications etc we cannot just query for that. So: we assume no // more reads of the underlying tar will take place: either the // error unwinding will stop reads, or we completed; either way, we // notify finished to the tracker to force a reset to zero; we set // the units to files, show our progress, and set our units back // afterwards. The largest archives today - rust docs - have ~20k // items, and the download tracker's progress is confounded with // actual handling of data today, we synthesis a data buffer and // pretend to have bytes to deliver. self.notify_handler .map(|handler| handler(Notification::DownloadFinished)); self.notify_handler .map(|handler| handler(Notification::DownloadPushUnits("handles"))); let mut prev_files = self.n_files.load(Ordering::Relaxed); self.notify_handler.map(|handler| { handler(Notification::DownloadContentLengthReceived( prev_files as u64, )) }); if prev_files > 50 { println!("Closing {} deferred file handles", prev_files); } let buf: Vec<u8> = vec![0; prev_files]; assert!(32767 > prev_files); let mut current_files = prev_files; while current_files!= 0 { use std::thread::sleep; sleep(std::time::Duration::from_millis(100)); prev_files = current_files; current_files = self.n_files.load(Ordering::Relaxed); let step_count = prev_files - current_files; self.notify_handler.map(|handler| { handler(Notification::DownloadDataReceived(&buf[0..step_count])) }); } self.pool.join(); self.notify_handler .map(|handler| handler(Notification::DownloadFinished)); self.notify_handler .map(|handler| handler(Notification::DownloadPopUnits)); } } } #[cfg(not(windows))] mod unpacker { use crate::utils::notifications::Notification; pub struct Unpacker {} impl Unpacker { pub fn new<'a>(_notify_handler: Option<&'a dyn Fn(Notification<'_>)>) -> Unpacker { Unpacker {} } pub fn handle(&mut self, _unpacked: tar::Unpacked) {} } } fn unpack_without_first_dir<'a, R: Read>( archive: &mut tar::Archive<R>, path: &Path, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<()> { let mut unpacker = unpacker::Unpacker::new(notify_handler); let entries = archive .entries() .chain_err(|| ErrorKind::ExtractingPackage)?; let mut checked_parents: HashSet<PathBuf> = HashSet::new(); for entry in entries { let mut entry = entry.chain_err(|| ErrorKind::ExtractingPackage)?; let relpath = { let path = entry.path(); let path = path.chain_err(|| ErrorKind::ExtractingPackage)?; path.into_owned() }; let mut components = relpath.components(); // Throw away the first path component components.next(); let full_path = path.join(&components.as_path()); // Create the full path to the entry if it does not exist already if let Some(parent) = full_path.parent() { if!checked_parents.contains(parent) { checked_parents.insert(parent.to_owned()); // It would be nice to optimise this stat out, but the tar could be like so: // a/deep/file.txt // a/file.txt // which would require tracking the segments rather than a simple hash. // Until profile shows that one stat per dir is a problem (vs one stat per file) // leave till later. if!parent.exists() { std::fs::create_dir_all(&parent).chain_err(|| ErrorKind::ExtractingPackage)? } } } entry.set_preserve_mtime(false); entry .unpack(&full_path) .map(|unpacked| unpacker.handle(unpacked)) .chain_err(|| ErrorKind::ExtractingPackage)?; } Ok(()) } impl<'a> Package for TarPackage<'a> { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.0.contains(component, short_name) } fn install<'b>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'b>, ) -> Result<Transaction<'b>> { self.0.install(target, component, short_name, tx) } fn components(&self) -> Vec<String> { self.0.components() } } #[derive(Debug)] pub struct TarGzPackage<'a>(TarPackage<'a>); impl<'a> TarGzPackage<'a> { pub fn new<R: Read>( stream: R, temp_cfg: &'a temp::Cfg, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<Self> { let stream = flate2::read::GzDecoder::new(stream); Ok(TarGzPackage(TarPackage::new( stream, temp_cfg, notify_handler, )?)) } } impl<'a> Package for TarGzPackage<'a> { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.0.contains(component, short_name) } fn install<'b>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'b>, ) -> Result<Transaction<'b>> { self.0.install(target, component, short_name, tx) } fn components(&self) -> Vec<String> { self.0.components() } } #[derive(Debug)] pub struct TarXzPackage<'a>(TarPackage<'a>); impl<'a> TarXzPackage<'a> { pub fn new<R: Read>( stream: R, temp_cfg: &'a temp::Cfg, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<Self> { let stream = xz2::read::XzDecoder::new(stream); Ok(TarXzPackage(TarPackage::new( stream, temp_cfg, notify_handler, )?)) } } impl<'a> Package for TarXzPackage<'a> { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.0.contains(component, short_name) } fn install<'b>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'b>, ) -> Result<Transaction<'b>> { self.0.install(target, component, short_name, tx) } fn components(&self) -> Vec<String> { self.0.components() } }
Unpacker
identifier_name
package.rs
//! An interpreter for the rust-installer package format. Responsible //! for installing from a directory or tarball to an installation //! prefix, represented by a `Components` instance. use crate::dist::component::components::*; use crate::dist::component::transaction::*; use crate::dist::temp; use crate::errors::*; use crate::utils::notifications::Notification; use crate::utils::utils; use std::collections::HashSet; use std::fmt; use std::io::Read; use std::path::{Path, PathBuf}; /// The current metadata revision used by rust-installer pub const INSTALLER_VERSION: &str = "3"; pub const VERSION_FILE: &str = "rust-installer-version"; pub trait Package: fmt::Debug { fn contains(&self, component: &str, short_name: Option<&str>) -> bool; fn install<'a>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'a>, ) -> Result<Transaction<'a>>; fn components(&self) -> Vec<String>; } #[derive(Debug)] pub struct DirectoryPackage { path: PathBuf, components: HashSet<String>, copy: bool, } impl DirectoryPackage { pub fn new(path: PathBuf, copy: bool) -> Result<Self> { validate_installer_version(&path)?; let content = utils::read_file("package components", &path.join("components"))?; let components = content .lines() .map(std::borrow::ToOwned::to_owned) .collect(); Ok(DirectoryPackage { path, components, copy, }) } } fn validate_installer_version(path: &Path) -> Result<()> { let file = utils::read_file("installer version", &path.join(VERSION_FILE))?; let v = file.trim(); if v == INSTALLER_VERSION { Ok(()) } else { Err(ErrorKind::BadInstallerVersion(v.to_owned()).into()) } } impl Package for DirectoryPackage { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.components.contains(component) || if let Some(n) = short_name { self.components.contains(n) } else { false } } fn install<'a>( &self, target: &Components, name: &str, short_name: Option<&str>, tx: Transaction<'a>, ) -> Result<Transaction<'a>> { let actual_name = if self.components.contains(name) { name } else if let Some(n) = short_name { n } else { name }; let root = self.path.join(actual_name); let manifest = utils::read_file("package manifest", &root.join("manifest.in"))?; let mut builder = target.add(name, tx); for l in manifest.lines() { let part = ComponentPart::decode(l) .ok_or_else(|| ErrorKind::CorruptComponent(name.to_owned()))?; let path = part.1; let src_path = root.join(&path); match &*part.0 { "file" => { if self.copy { builder.copy_file(path.clone(), &src_path)? } else { builder.move_file(path.clone(), &src_path)? } } "dir" => { if self.copy { builder.copy_dir(path.clone(), &src_path)? } else { builder.move_dir(path.clone(), &src_path)? } } _ => return Err(ErrorKind::CorruptComponent(name.to_owned()).into()), } set_file_perms(&target.prefix().path().join(path), &src_path)?; } let tx = builder.finish()?; Ok(tx) } fn components(&self) -> Vec<String> { self.components.iter().cloned().collect() } } // On Unix we need to set up the file permissions correctly so // binaries are executable and directories readable. This shouldn't be // necessary: the source files *should* have the right permissions, // but due to rust-lang/rust#25479 they don't. #[cfg(unix)] fn set_file_perms(dest_path: &Path, src_path: &Path) -> Result<()> { use std::fs::{self, Metadata}; use std::os::unix::fs::PermissionsExt; use walkdir::WalkDir; // Compute whether this entry needs the X bit fn needs_x(meta: &Metadata) -> bool { meta.is_dir() || // Directories need it meta.permissions().mode() & 0o700 == 0o700 // If it is rwx for the user, it gets the X bit } // By convention, anything in the bin/ directory of the package is a binary let is_bin = if let Some(p) = src_path.parent() { p.ends_with("bin") } else { false }; let is_dir = utils::is_directory(dest_path); if is_dir { // Walk the directory setting everything for entry in WalkDir::new(dest_path) { let entry = entry.chain_err(|| ErrorKind::ComponentDirPermissionsFailed)?; let meta = entry .metadata() .chain_err(|| ErrorKind::ComponentDirPermissionsFailed)?; let mut perm = meta.permissions(); perm.set_mode(if needs_x(&meta) { 0o755 } else { 0o644 }); fs::set_permissions(entry.path(), perm) .chain_err(|| ErrorKind::ComponentFilePermissionsFailed)?; } } else { let meta = fs::metadata(dest_path).chain_err(|| ErrorKind::ComponentFilePermissionsFailed)?; let mut perm = meta.permissions(); perm.set_mode(if is_bin || needs_x(&meta) { 0o755 } else { 0o644 }); fs::set_permissions(dest_path, perm) .chain_err(|| ErrorKind::ComponentFilePermissionsFailed)?; } Ok(()) } #[cfg(windows)] fn set_file_perms(_dest_path: &Path, _src_path: &Path) -> Result<()> { Ok(()) } #[derive(Debug)] pub struct TarPackage<'a>(DirectoryPackage, temp::Dir<'a>); impl<'a> TarPackage<'a> { pub fn new<R: Read>( stream: R, temp_cfg: &'a temp::Cfg, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<Self> { let temp_dir = temp_cfg.new_directory()?; let mut archive = tar::Archive::new(stream); // The rust-installer packages unpack to a directory called // $pkgname-$version-$target. Skip that directory when // unpacking. unpack_without_first_dir(&mut archive, &*temp_dir, notify_handler)?; Ok(TarPackage( DirectoryPackage::new(temp_dir.to_owned(), false)?, temp_dir, )) } } #[cfg(windows)] mod unpacker { use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use threadpool; use crate::utils::notifications::Notification; pub struct Unpacker<'a> { n_files: Arc<AtomicUsize>, pool: threadpool::ThreadPool, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, } impl<'a> Unpacker<'a> { pub fn new(notify_handler: Option<&'a dyn Fn(Notification<'_>)>) -> Self { // Defaults to hardware thread count threads; this is suitable for // our needs as IO bound operations tend to show up as write latencies // rather than close latencies, so we don't need to look at // more threads to get more IO dispatched at this stage in the process. let pool = threadpool::Builder::new() .thread_name("CloseHandle".into()) .build(); Unpacker { n_files: Arc::new(AtomicUsize::new(0)), pool: pool, notify_handler: notify_handler, } } pub fn handle(&mut self, unpacked: tar::Unpacked) { if let tar::Unpacked::File(f) = unpacked { self.n_files.fetch_add(1, Ordering::Relaxed); let n_files = self.n_files.clone(); self.pool.execute(move || { drop(f); n_files.fetch_sub(1, Ordering::Relaxed); }); } } } impl<'a> Drop for Unpacker<'a> { fn drop(&mut self) { // Some explanation is in order. Even though the tar we are reading from (if // any) will have had its FileWithProgress download tracking // completed before we hit drop, that is not true if we are unwinding due to a // failure, where the logical ownership of the progress bar is // ambiguous, and as the tracker itself is abstracted out behind // notifications etc we cannot just query for that. So: we assume no // more reads of the underlying tar will take place: either the // error unwinding will stop reads, or we completed; either way, we // notify finished to the tracker to force a reset to zero; we set // the units to files, show our progress, and set our units back // afterwards. The largest archives today - rust docs - have ~20k // items, and the download tracker's progress is confounded with // actual handling of data today, we synthesis a data buffer and // pretend to have bytes to deliver. self.notify_handler .map(|handler| handler(Notification::DownloadFinished)); self.notify_handler .map(|handler| handler(Notification::DownloadPushUnits("handles")));
handler(Notification::DownloadContentLengthReceived( prev_files as u64, )) }); if prev_files > 50 { println!("Closing {} deferred file handles", prev_files); } let buf: Vec<u8> = vec![0; prev_files]; assert!(32767 > prev_files); let mut current_files = prev_files; while current_files!= 0 { use std::thread::sleep; sleep(std::time::Duration::from_millis(100)); prev_files = current_files; current_files = self.n_files.load(Ordering::Relaxed); let step_count = prev_files - current_files; self.notify_handler.map(|handler| { handler(Notification::DownloadDataReceived(&buf[0..step_count])) }); } self.pool.join(); self.notify_handler .map(|handler| handler(Notification::DownloadFinished)); self.notify_handler .map(|handler| handler(Notification::DownloadPopUnits)); } } } #[cfg(not(windows))] mod unpacker { use crate::utils::notifications::Notification; pub struct Unpacker {} impl Unpacker { pub fn new<'a>(_notify_handler: Option<&'a dyn Fn(Notification<'_>)>) -> Unpacker { Unpacker {} } pub fn handle(&mut self, _unpacked: tar::Unpacked) {} } } fn unpack_without_first_dir<'a, R: Read>( archive: &mut tar::Archive<R>, path: &Path, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<()> { let mut unpacker = unpacker::Unpacker::new(notify_handler); let entries = archive .entries() .chain_err(|| ErrorKind::ExtractingPackage)?; let mut checked_parents: HashSet<PathBuf> = HashSet::new(); for entry in entries { let mut entry = entry.chain_err(|| ErrorKind::ExtractingPackage)?; let relpath = { let path = entry.path(); let path = path.chain_err(|| ErrorKind::ExtractingPackage)?; path.into_owned() }; let mut components = relpath.components(); // Throw away the first path component components.next(); let full_path = path.join(&components.as_path()); // Create the full path to the entry if it does not exist already if let Some(parent) = full_path.parent() { if!checked_parents.contains(parent) { checked_parents.insert(parent.to_owned()); // It would be nice to optimise this stat out, but the tar could be like so: // a/deep/file.txt // a/file.txt // which would require tracking the segments rather than a simple hash. // Until profile shows that one stat per dir is a problem (vs one stat per file) // leave till later. if!parent.exists() { std::fs::create_dir_all(&parent).chain_err(|| ErrorKind::ExtractingPackage)? } } } entry.set_preserve_mtime(false); entry .unpack(&full_path) .map(|unpacked| unpacker.handle(unpacked)) .chain_err(|| ErrorKind::ExtractingPackage)?; } Ok(()) } impl<'a> Package for TarPackage<'a> { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.0.contains(component, short_name) } fn install<'b>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'b>, ) -> Result<Transaction<'b>> { self.0.install(target, component, short_name, tx) } fn components(&self) -> Vec<String> { self.0.components() } } #[derive(Debug)] pub struct TarGzPackage<'a>(TarPackage<'a>); impl<'a> TarGzPackage<'a> { pub fn new<R: Read>( stream: R, temp_cfg: &'a temp::Cfg, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<Self> { let stream = flate2::read::GzDecoder::new(stream); Ok(TarGzPackage(TarPackage::new( stream, temp_cfg, notify_handler, )?)) } } impl<'a> Package for TarGzPackage<'a> { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.0.contains(component, short_name) } fn install<'b>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'b>, ) -> Result<Transaction<'b>> { self.0.install(target, component, short_name, tx) } fn components(&self) -> Vec<String> { self.0.components() } } #[derive(Debug)] pub struct TarXzPackage<'a>(TarPackage<'a>); impl<'a> TarXzPackage<'a> { pub fn new<R: Read>( stream: R, temp_cfg: &'a temp::Cfg, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<Self> { let stream = xz2::read::XzDecoder::new(stream); Ok(TarXzPackage(TarPackage::new( stream, temp_cfg, notify_handler, )?)) } } impl<'a> Package for TarXzPackage<'a> { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.0.contains(component, short_name) } fn install<'b>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'b>, ) -> Result<Transaction<'b>> { self.0.install(target, component, short_name, tx) } fn components(&self) -> Vec<String> { self.0.components() } }
let mut prev_files = self.n_files.load(Ordering::Relaxed); self.notify_handler.map(|handler| {
random_line_split
package.rs
//! An interpreter for the rust-installer package format. Responsible //! for installing from a directory or tarball to an installation //! prefix, represented by a `Components` instance. use crate::dist::component::components::*; use crate::dist::component::transaction::*; use crate::dist::temp; use crate::errors::*; use crate::utils::notifications::Notification; use crate::utils::utils; use std::collections::HashSet; use std::fmt; use std::io::Read; use std::path::{Path, PathBuf}; /// The current metadata revision used by rust-installer pub const INSTALLER_VERSION: &str = "3"; pub const VERSION_FILE: &str = "rust-installer-version"; pub trait Package: fmt::Debug { fn contains(&self, component: &str, short_name: Option<&str>) -> bool; fn install<'a>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'a>, ) -> Result<Transaction<'a>>; fn components(&self) -> Vec<String>; } #[derive(Debug)] pub struct DirectoryPackage { path: PathBuf, components: HashSet<String>, copy: bool, } impl DirectoryPackage { pub fn new(path: PathBuf, copy: bool) -> Result<Self> { validate_installer_version(&path)?; let content = utils::read_file("package components", &path.join("components"))?; let components = content .lines() .map(std::borrow::ToOwned::to_owned) .collect(); Ok(DirectoryPackage { path, components, copy, }) } } fn validate_installer_version(path: &Path) -> Result<()> { let file = utils::read_file("installer version", &path.join(VERSION_FILE))?; let v = file.trim(); if v == INSTALLER_VERSION { Ok(()) } else { Err(ErrorKind::BadInstallerVersion(v.to_owned()).into()) } } impl Package for DirectoryPackage { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.components.contains(component) || if let Some(n) = short_name { self.components.contains(n) } else { false } } fn install<'a>( &self, target: &Components, name: &str, short_name: Option<&str>, tx: Transaction<'a>, ) -> Result<Transaction<'a>> { let actual_name = if self.components.contains(name) { name } else if let Some(n) = short_name { n } else { name }; let root = self.path.join(actual_name); let manifest = utils::read_file("package manifest", &root.join("manifest.in"))?; let mut builder = target.add(name, tx); for l in manifest.lines() { let part = ComponentPart::decode(l) .ok_or_else(|| ErrorKind::CorruptComponent(name.to_owned()))?; let path = part.1; let src_path = root.join(&path); match &*part.0 { "file" => { if self.copy { builder.copy_file(path.clone(), &src_path)? } else { builder.move_file(path.clone(), &src_path)? } } "dir" => { if self.copy { builder.copy_dir(path.clone(), &src_path)? } else { builder.move_dir(path.clone(), &src_path)? } } _ => return Err(ErrorKind::CorruptComponent(name.to_owned()).into()), } set_file_perms(&target.prefix().path().join(path), &src_path)?; } let tx = builder.finish()?; Ok(tx) } fn components(&self) -> Vec<String> { self.components.iter().cloned().collect() } } // On Unix we need to set up the file permissions correctly so // binaries are executable and directories readable. This shouldn't be // necessary: the source files *should* have the right permissions, // but due to rust-lang/rust#25479 they don't. #[cfg(unix)] fn set_file_perms(dest_path: &Path, src_path: &Path) -> Result<()> { use std::fs::{self, Metadata}; use std::os::unix::fs::PermissionsExt; use walkdir::WalkDir; // Compute whether this entry needs the X bit fn needs_x(meta: &Metadata) -> bool { meta.is_dir() || // Directories need it meta.permissions().mode() & 0o700 == 0o700 // If it is rwx for the user, it gets the X bit } // By convention, anything in the bin/ directory of the package is a binary let is_bin = if let Some(p) = src_path.parent() { p.ends_with("bin") } else { false }; let is_dir = utils::is_directory(dest_path); if is_dir { // Walk the directory setting everything for entry in WalkDir::new(dest_path) { let entry = entry.chain_err(|| ErrorKind::ComponentDirPermissionsFailed)?; let meta = entry .metadata() .chain_err(|| ErrorKind::ComponentDirPermissionsFailed)?; let mut perm = meta.permissions(); perm.set_mode(if needs_x(&meta) { 0o755 } else { 0o644 }); fs::set_permissions(entry.path(), perm) .chain_err(|| ErrorKind::ComponentFilePermissionsFailed)?; } } else { let meta = fs::metadata(dest_path).chain_err(|| ErrorKind::ComponentFilePermissionsFailed)?; let mut perm = meta.permissions(); perm.set_mode(if is_bin || needs_x(&meta)
else { 0o644 }); fs::set_permissions(dest_path, perm) .chain_err(|| ErrorKind::ComponentFilePermissionsFailed)?; } Ok(()) } #[cfg(windows)] fn set_file_perms(_dest_path: &Path, _src_path: &Path) -> Result<()> { Ok(()) } #[derive(Debug)] pub struct TarPackage<'a>(DirectoryPackage, temp::Dir<'a>); impl<'a> TarPackage<'a> { pub fn new<R: Read>( stream: R, temp_cfg: &'a temp::Cfg, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<Self> { let temp_dir = temp_cfg.new_directory()?; let mut archive = tar::Archive::new(stream); // The rust-installer packages unpack to a directory called // $pkgname-$version-$target. Skip that directory when // unpacking. unpack_without_first_dir(&mut archive, &*temp_dir, notify_handler)?; Ok(TarPackage( DirectoryPackage::new(temp_dir.to_owned(), false)?, temp_dir, )) } } #[cfg(windows)] mod unpacker { use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use threadpool; use crate::utils::notifications::Notification; pub struct Unpacker<'a> { n_files: Arc<AtomicUsize>, pool: threadpool::ThreadPool, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, } impl<'a> Unpacker<'a> { pub fn new(notify_handler: Option<&'a dyn Fn(Notification<'_>)>) -> Self { // Defaults to hardware thread count threads; this is suitable for // our needs as IO bound operations tend to show up as write latencies // rather than close latencies, so we don't need to look at // more threads to get more IO dispatched at this stage in the process. let pool = threadpool::Builder::new() .thread_name("CloseHandle".into()) .build(); Unpacker { n_files: Arc::new(AtomicUsize::new(0)), pool: pool, notify_handler: notify_handler, } } pub fn handle(&mut self, unpacked: tar::Unpacked) { if let tar::Unpacked::File(f) = unpacked { self.n_files.fetch_add(1, Ordering::Relaxed); let n_files = self.n_files.clone(); self.pool.execute(move || { drop(f); n_files.fetch_sub(1, Ordering::Relaxed); }); } } } impl<'a> Drop for Unpacker<'a> { fn drop(&mut self) { // Some explanation is in order. Even though the tar we are reading from (if // any) will have had its FileWithProgress download tracking // completed before we hit drop, that is not true if we are unwinding due to a // failure, where the logical ownership of the progress bar is // ambiguous, and as the tracker itself is abstracted out behind // notifications etc we cannot just query for that. So: we assume no // more reads of the underlying tar will take place: either the // error unwinding will stop reads, or we completed; either way, we // notify finished to the tracker to force a reset to zero; we set // the units to files, show our progress, and set our units back // afterwards. The largest archives today - rust docs - have ~20k // items, and the download tracker's progress is confounded with // actual handling of data today, we synthesis a data buffer and // pretend to have bytes to deliver. self.notify_handler .map(|handler| handler(Notification::DownloadFinished)); self.notify_handler .map(|handler| handler(Notification::DownloadPushUnits("handles"))); let mut prev_files = self.n_files.load(Ordering::Relaxed); self.notify_handler.map(|handler| { handler(Notification::DownloadContentLengthReceived( prev_files as u64, )) }); if prev_files > 50 { println!("Closing {} deferred file handles", prev_files); } let buf: Vec<u8> = vec![0; prev_files]; assert!(32767 > prev_files); let mut current_files = prev_files; while current_files!= 0 { use std::thread::sleep; sleep(std::time::Duration::from_millis(100)); prev_files = current_files; current_files = self.n_files.load(Ordering::Relaxed); let step_count = prev_files - current_files; self.notify_handler.map(|handler| { handler(Notification::DownloadDataReceived(&buf[0..step_count])) }); } self.pool.join(); self.notify_handler .map(|handler| handler(Notification::DownloadFinished)); self.notify_handler .map(|handler| handler(Notification::DownloadPopUnits)); } } } #[cfg(not(windows))] mod unpacker { use crate::utils::notifications::Notification; pub struct Unpacker {} impl Unpacker { pub fn new<'a>(_notify_handler: Option<&'a dyn Fn(Notification<'_>)>) -> Unpacker { Unpacker {} } pub fn handle(&mut self, _unpacked: tar::Unpacked) {} } } fn unpack_without_first_dir<'a, R: Read>( archive: &mut tar::Archive<R>, path: &Path, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<()> { let mut unpacker = unpacker::Unpacker::new(notify_handler); let entries = archive .entries() .chain_err(|| ErrorKind::ExtractingPackage)?; let mut checked_parents: HashSet<PathBuf> = HashSet::new(); for entry in entries { let mut entry = entry.chain_err(|| ErrorKind::ExtractingPackage)?; let relpath = { let path = entry.path(); let path = path.chain_err(|| ErrorKind::ExtractingPackage)?; path.into_owned() }; let mut components = relpath.components(); // Throw away the first path component components.next(); let full_path = path.join(&components.as_path()); // Create the full path to the entry if it does not exist already if let Some(parent) = full_path.parent() { if!checked_parents.contains(parent) { checked_parents.insert(parent.to_owned()); // It would be nice to optimise this stat out, but the tar could be like so: // a/deep/file.txt // a/file.txt // which would require tracking the segments rather than a simple hash. // Until profile shows that one stat per dir is a problem (vs one stat per file) // leave till later. if!parent.exists() { std::fs::create_dir_all(&parent).chain_err(|| ErrorKind::ExtractingPackage)? } } } entry.set_preserve_mtime(false); entry .unpack(&full_path) .map(|unpacked| unpacker.handle(unpacked)) .chain_err(|| ErrorKind::ExtractingPackage)?; } Ok(()) } impl<'a> Package for TarPackage<'a> { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.0.contains(component, short_name) } fn install<'b>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'b>, ) -> Result<Transaction<'b>> { self.0.install(target, component, short_name, tx) } fn components(&self) -> Vec<String> { self.0.components() } } #[derive(Debug)] pub struct TarGzPackage<'a>(TarPackage<'a>); impl<'a> TarGzPackage<'a> { pub fn new<R: Read>( stream: R, temp_cfg: &'a temp::Cfg, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<Self> { let stream = flate2::read::GzDecoder::new(stream); Ok(TarGzPackage(TarPackage::new( stream, temp_cfg, notify_handler, )?)) } } impl<'a> Package for TarGzPackage<'a> { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.0.contains(component, short_name) } fn install<'b>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'b>, ) -> Result<Transaction<'b>> { self.0.install(target, component, short_name, tx) } fn components(&self) -> Vec<String> { self.0.components() } } #[derive(Debug)] pub struct TarXzPackage<'a>(TarPackage<'a>); impl<'a> TarXzPackage<'a> { pub fn new<R: Read>( stream: R, temp_cfg: &'a temp::Cfg, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<Self> { let stream = xz2::read::XzDecoder::new(stream); Ok(TarXzPackage(TarPackage::new( stream, temp_cfg, notify_handler, )?)) } } impl<'a> Package for TarXzPackage<'a> { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.0.contains(component, short_name) } fn install<'b>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'b>, ) -> Result<Transaction<'b>> { self.0.install(target, component, short_name, tx) } fn components(&self) -> Vec<String> { self.0.components() } }
{ 0o755 }
conditional_block
config.rs
use crate::utility::location::Location; use crate::exit_on_bad_config; use origen_metal::{config, scrub_path}; use origen_metal::config::{Environment, File}; use std::collections::HashMap; use std::path::{Path, PathBuf}; use crate::om::glob::glob; use std::process::exit; use super::target; const PUBLISHER_OPTIONS: &[&str] = &["system", "package_app", "upload_app"]; const BYPASS_APP_CONFIG_ENV_VAR: &str = "origen_app_bypass_config_lookup"; const APP_CONFIG_PATHS: &str = "origen_app_config_paths"; macro_rules! use_app_config { () => {{ !std::env::var_os($crate::core::application::config::BYPASS_APP_CONFIG_ENV_VAR).is_some() }} } #[derive(Debug, Deserialize)] pub struct CurrentState { pub target: Option<Vec<String>> } impl CurrentState { pub fn build(root: &PathBuf) -> Self { let file = root.join(".origen").join("application.toml"); let mut s = config::Config::builder().set_default("target", None::<Vec<String>>).unwrap(); if file.exists() { s = s.add_source(File::with_name(&format!("{}", file.display()))); } let cb = exit_on_bad_config!(s.build()); let slf: Self = exit_on_bad_config!(cb.try_deserialize()); slf } pub fn apply_to(&mut self, config: &mut Config) { if let Some(t) = self.target.as_ref() { config.target = Some(t.to_owned()) } else { if let Some(t) = &config.target { let clean_defaults = target::set_at_root(t.iter().map( |s| s.as_str() ).collect(), config.root.as_ref().unwrap()); self.target = Some(clean_defaults); } } } pub fn build_and_apply(config: &mut Config) { if use_app_config!() { let mut slf = Self::build(config.root.as_ref().unwrap()); slf.apply_to(config); } } } #[derive(Debug, Deserialize)] // If you add an attribute to this you must also update: // * pyapi/src/lib.rs to convert it to Python // * default function below to define the default value // * add an example of it to src/app_generators/templates/app/config/application.toml pub struct Config { pub name: String, pub target: Option<Vec<String>>, pub mode: String, /// Don't use this unless you know what you're doing, use origen::STATUS::output_dir() instead, since /// that accounts for the output directory being overridden by the current command pub output_directory: Option<String>, /// Don't use this unless you know what you're doing, use origen::STATUS::reference_dir() instead, since /// that accounts for the reference directory being overridden by the current command pub reference_directory: Option<String>, pub website_output_directory: Option<String>, pub website_source_directory: Option<String>, pub website_release_location: Option<Location>, pub website_release_name: Option<String>, pub root: Option<PathBuf>, pub revision_control: Option<HashMap<String, String>>, pub unit_tester: Option<HashMap<String, String>>, pub publisher: Option<HashMap<String, String>>, pub linter: Option<HashMap<String, String>>, pub release_scribe: Option<HashMap<String, String>>, pub app_session_root: Option<String>, pub commands: Option<Vec<String>>, } impl Config { pub fn refresh(&mut self) { let latest = Self::build(self.root.as_ref().unwrap(), false); self.name = latest.name; self.target = latest.target; self.mode = latest.mode; self.reference_directory = latest.reference_directory; self.website_output_directory = latest.website_output_directory; self.website_source_directory = latest.website_source_directory; self.website_release_location = latest.website_release_location; self.website_release_name = latest.website_release_name; self.revision_control = latest.revision_control; self.unit_tester = latest.unit_tester; self.publisher = latest.publisher; self.linter = latest.linter; self.release_scribe = latest.release_scribe; self.app_session_root = latest.app_session_root; self.commands = latest.commands; } /// Builds a new config from all application.toml files found at the given app root pub fn build(root: &Path, default_only: bool) -> Config { log_trace!("Building app config"); let mut s = config::Config::builder() .set_default("target", None::<Vec<String>>) .unwrap() .set_default("mode", "development".to_string()) .unwrap() .set_default("revision_control", None::<HashMap<String, String>>) .unwrap() .set_default("unit_tester", None::<HashMap<String, String>>) .unwrap() .set_default("publisher", None::<HashMap<String, String>>) .unwrap() .set_default("linter", None::<HashMap<String, String>>) .unwrap() .set_default("release_scribe", None::<HashMap<String, String>>) .unwrap() .set_default("app_session_root", None::<String>) .unwrap() .set_default("commands", None::<Vec<String>>) .unwrap(); let mut files: Vec<PathBuf> = Vec::new(); if let Some(paths) = std::env::var_os(APP_CONFIG_PATHS) { log_trace!("Found custom config paths: {:?}", paths); for path in std::env::split_paths(&paths) { log_trace!("Looking for Origen app config file at '{}'", path.display()); if path.is_file() { if let Some(ext) = path.extension() { if ext == "toml" { files.push(path); } else { log_error!( "Expected file {} to have extension '.toml'. Found '{}'", path.display(), ext.to_string_lossy() ) } } else { // accept a file without an extension. will be interpreted as a.toml files.push(path); } } else if path.is_dir() { let f = path.join("application.toml"); if f.exists() { files.push(f); } } else { log_error!( "Config path {} either does not exists or is not accessible", path.display() ); exit(1); } } } if use_app_config!() { let file = root.join("config").join("application.toml"); if file.exists() { files.push(file); } } else { // Bypass Origen's default configuration lookup - use only the enumerated configs log_trace!("Bypassing Origen's App Config Lookup"); } for f in files.iter().rev() { log_trace!("Loading Origen config file from '{}'", f.display()); s = s.add_source(File::with_name(&format!("{}", f.display()))); } s = s.add_source(Environment::with_prefix("origen_app").list_separator(",").with_list_parse_key("target").with_list_parse_key("commands").try_parsing(true)); let cb = exit_on_bad_config!(s.build()); let mut c: Self = exit_on_bad_config!(cb.try_deserialize()); c.root = Some(root.to_path_buf()); // TODO // if let Some(l) = loc { // c.website_release_location = Some(Location::new(&l)); // } log_trace!("Completed building app config"); c.validate_options(); if!default_only { CurrentState::build_and_apply(&mut c); } c } pub fn validate_options(&self) { log_trace!("Validating available options..."); if let Some(targets) = self.target.as_ref() { log_trace!("\tValidating default target..."); for t in targets { target::clean_name(t, "targets", true, self.root.as_ref().unwrap()); } log_trace!("\tValidating default target!"); } log_trace!("\tValidating publisher options..."); for unknown in self.validate_publisher_options() { log_warning!("Unknown Publisher Option '{}'", unknown); } log_trace!("\tFinished validating publisher options"); log_trace!("Finished checking configs!"); } pub fn validate_publisher_options(&self) -> Vec<String>
pub fn cmd_paths(&self) -> Vec<PathBuf> { let mut retn = vec!(); if let Some(cmds) = self.commands.as_ref() { // Load in only the commands explicitly given for cmds_toml in cmds { let ct = self.root.as_ref().unwrap().join("config").join(cmds_toml); if ct.exists() { retn.push(ct.to_owned()); } else { log_error!("Can not locate app commands file '{}'", scrub_path!(ct).display()) } } } else { // Load in any commands from: // 1) app_root/commands.toml // 2) app_root/commands/*/**.toml let commands_toml = self.root.as_ref().unwrap().join("config").join("commands.toml"); // println!("commands toml: {}", commands_toml.display()); if commands_toml.exists() { retn.push(commands_toml); } let mut commands_dir = self.root.as_ref().unwrap().join("config/commands"); if commands_dir.exists() { commands_dir = commands_dir.join("**/*.toml"); for entry in glob(commands_dir.to_str().unwrap()).unwrap() { match entry { Ok(e) => retn.push(e), Err(e) => log_error!("Error processing commands toml: {}", e) } } } } retn } }
{ let mut unknowns: Vec<String> = vec![]; if let Some(p) = &self.publisher { for (opt, _) in p.iter() { if !PUBLISHER_OPTIONS.contains(&opt.as_str()) { unknowns.push(opt.clone()); } } } unknowns }
identifier_body
config.rs
use crate::utility::location::Location; use crate::exit_on_bad_config; use origen_metal::{config, scrub_path}; use origen_metal::config::{Environment, File}; use std::collections::HashMap; use std::path::{Path, PathBuf}; use crate::om::glob::glob; use std::process::exit; use super::target; const PUBLISHER_OPTIONS: &[&str] = &["system", "package_app", "upload_app"]; const BYPASS_APP_CONFIG_ENV_VAR: &str = "origen_app_bypass_config_lookup"; const APP_CONFIG_PATHS: &str = "origen_app_config_paths"; macro_rules! use_app_config { () => {{ !std::env::var_os($crate::core::application::config::BYPASS_APP_CONFIG_ENV_VAR).is_some() }} } #[derive(Debug, Deserialize)] pub struct
{ pub target: Option<Vec<String>> } impl CurrentState { pub fn build(root: &PathBuf) -> Self { let file = root.join(".origen").join("application.toml"); let mut s = config::Config::builder().set_default("target", None::<Vec<String>>).unwrap(); if file.exists() { s = s.add_source(File::with_name(&format!("{}", file.display()))); } let cb = exit_on_bad_config!(s.build()); let slf: Self = exit_on_bad_config!(cb.try_deserialize()); slf } pub fn apply_to(&mut self, config: &mut Config) { if let Some(t) = self.target.as_ref() { config.target = Some(t.to_owned()) } else { if let Some(t) = &config.target { let clean_defaults = target::set_at_root(t.iter().map( |s| s.as_str() ).collect(), config.root.as_ref().unwrap()); self.target = Some(clean_defaults); } } } pub fn build_and_apply(config: &mut Config) { if use_app_config!() { let mut slf = Self::build(config.root.as_ref().unwrap()); slf.apply_to(config); } } } #[derive(Debug, Deserialize)] // If you add an attribute to this you must also update: // * pyapi/src/lib.rs to convert it to Python // * default function below to define the default value // * add an example of it to src/app_generators/templates/app/config/application.toml pub struct Config { pub name: String, pub target: Option<Vec<String>>, pub mode: String, /// Don't use this unless you know what you're doing, use origen::STATUS::output_dir() instead, since /// that accounts for the output directory being overridden by the current command pub output_directory: Option<String>, /// Don't use this unless you know what you're doing, use origen::STATUS::reference_dir() instead, since /// that accounts for the reference directory being overridden by the current command pub reference_directory: Option<String>, pub website_output_directory: Option<String>, pub website_source_directory: Option<String>, pub website_release_location: Option<Location>, pub website_release_name: Option<String>, pub root: Option<PathBuf>, pub revision_control: Option<HashMap<String, String>>, pub unit_tester: Option<HashMap<String, String>>, pub publisher: Option<HashMap<String, String>>, pub linter: Option<HashMap<String, String>>, pub release_scribe: Option<HashMap<String, String>>, pub app_session_root: Option<String>, pub commands: Option<Vec<String>>, } impl Config { pub fn refresh(&mut self) { let latest = Self::build(self.root.as_ref().unwrap(), false); self.name = latest.name; self.target = latest.target; self.mode = latest.mode; self.reference_directory = latest.reference_directory; self.website_output_directory = latest.website_output_directory; self.website_source_directory = latest.website_source_directory; self.website_release_location = latest.website_release_location; self.website_release_name = latest.website_release_name; self.revision_control = latest.revision_control; self.unit_tester = latest.unit_tester; self.publisher = latest.publisher; self.linter = latest.linter; self.release_scribe = latest.release_scribe; self.app_session_root = latest.app_session_root; self.commands = latest.commands; } /// Builds a new config from all application.toml files found at the given app root pub fn build(root: &Path, default_only: bool) -> Config { log_trace!("Building app config"); let mut s = config::Config::builder() .set_default("target", None::<Vec<String>>) .unwrap() .set_default("mode", "development".to_string()) .unwrap() .set_default("revision_control", None::<HashMap<String, String>>) .unwrap() .set_default("unit_tester", None::<HashMap<String, String>>) .unwrap() .set_default("publisher", None::<HashMap<String, String>>) .unwrap() .set_default("linter", None::<HashMap<String, String>>) .unwrap() .set_default("release_scribe", None::<HashMap<String, String>>) .unwrap() .set_default("app_session_root", None::<String>) .unwrap() .set_default("commands", None::<Vec<String>>) .unwrap(); let mut files: Vec<PathBuf> = Vec::new(); if let Some(paths) = std::env::var_os(APP_CONFIG_PATHS) { log_trace!("Found custom config paths: {:?}", paths); for path in std::env::split_paths(&paths) { log_trace!("Looking for Origen app config file at '{}'", path.display()); if path.is_file() { if let Some(ext) = path.extension() { if ext == "toml" { files.push(path); } else { log_error!( "Expected file {} to have extension '.toml'. Found '{}'", path.display(), ext.to_string_lossy() ) } } else { // accept a file without an extension. will be interpreted as a.toml files.push(path); } } else if path.is_dir() { let f = path.join("application.toml"); if f.exists() { files.push(f); } } else { log_error!( "Config path {} either does not exists or is not accessible", path.display() ); exit(1); } } } if use_app_config!() { let file = root.join("config").join("application.toml"); if file.exists() { files.push(file); } } else { // Bypass Origen's default configuration lookup - use only the enumerated configs log_trace!("Bypassing Origen's App Config Lookup"); } for f in files.iter().rev() { log_trace!("Loading Origen config file from '{}'", f.display()); s = s.add_source(File::with_name(&format!("{}", f.display()))); } s = s.add_source(Environment::with_prefix("origen_app").list_separator(",").with_list_parse_key("target").with_list_parse_key("commands").try_parsing(true)); let cb = exit_on_bad_config!(s.build()); let mut c: Self = exit_on_bad_config!(cb.try_deserialize()); c.root = Some(root.to_path_buf()); // TODO // if let Some(l) = loc { // c.website_release_location = Some(Location::new(&l)); // } log_trace!("Completed building app config"); c.validate_options(); if!default_only { CurrentState::build_and_apply(&mut c); } c } pub fn validate_options(&self) { log_trace!("Validating available options..."); if let Some(targets) = self.target.as_ref() { log_trace!("\tValidating default target..."); for t in targets { target::clean_name(t, "targets", true, self.root.as_ref().unwrap()); } log_trace!("\tValidating default target!"); } log_trace!("\tValidating publisher options..."); for unknown in self.validate_publisher_options() { log_warning!("Unknown Publisher Option '{}'", unknown); } log_trace!("\tFinished validating publisher options"); log_trace!("Finished checking configs!"); } pub fn validate_publisher_options(&self) -> Vec<String> { let mut unknowns: Vec<String> = vec![]; if let Some(p) = &self.publisher { for (opt, _) in p.iter() { if!PUBLISHER_OPTIONS.contains(&opt.as_str()) { unknowns.push(opt.clone()); } } } unknowns } pub fn cmd_paths(&self) -> Vec<PathBuf> { let mut retn = vec!(); if let Some(cmds) = self.commands.as_ref() { // Load in only the commands explicitly given for cmds_toml in cmds { let ct = self.root.as_ref().unwrap().join("config").join(cmds_toml); if ct.exists() { retn.push(ct.to_owned()); } else { log_error!("Can not locate app commands file '{}'", scrub_path!(ct).display()) } } } else { // Load in any commands from: // 1) app_root/commands.toml // 2) app_root/commands/*/**.toml let commands_toml = self.root.as_ref().unwrap().join("config").join("commands.toml"); // println!("commands toml: {}", commands_toml.display()); if commands_toml.exists() { retn.push(commands_toml); } let mut commands_dir = self.root.as_ref().unwrap().join("config/commands"); if commands_dir.exists() { commands_dir = commands_dir.join("**/*.toml"); for entry in glob(commands_dir.to_str().unwrap()).unwrap() { match entry { Ok(e) => retn.push(e), Err(e) => log_error!("Error processing commands toml: {}", e) } } } } retn } }
CurrentState
identifier_name
config.rs
use crate::utility::location::Location; use crate::exit_on_bad_config; use origen_metal::{config, scrub_path}; use origen_metal::config::{Environment, File}; use std::collections::HashMap; use std::path::{Path, PathBuf}; use crate::om::glob::glob; use std::process::exit; use super::target; const PUBLISHER_OPTIONS: &[&str] = &["system", "package_app", "upload_app"]; const BYPASS_APP_CONFIG_ENV_VAR: &str = "origen_app_bypass_config_lookup"; const APP_CONFIG_PATHS: &str = "origen_app_config_paths"; macro_rules! use_app_config { () => {{ !std::env::var_os($crate::core::application::config::BYPASS_APP_CONFIG_ENV_VAR).is_some() }} } #[derive(Debug, Deserialize)] pub struct CurrentState { pub target: Option<Vec<String>> } impl CurrentState { pub fn build(root: &PathBuf) -> Self { let file = root.join(".origen").join("application.toml"); let mut s = config::Config::builder().set_default("target", None::<Vec<String>>).unwrap(); if file.exists() { s = s.add_source(File::with_name(&format!("{}", file.display()))); } let cb = exit_on_bad_config!(s.build()); let slf: Self = exit_on_bad_config!(cb.try_deserialize()); slf } pub fn apply_to(&mut self, config: &mut Config) { if let Some(t) = self.target.as_ref() { config.target = Some(t.to_owned()) } else { if let Some(t) = &config.target { let clean_defaults = target::set_at_root(t.iter().map( |s| s.as_str() ).collect(), config.root.as_ref().unwrap()); self.target = Some(clean_defaults); } } } pub fn build_and_apply(config: &mut Config) { if use_app_config!() { let mut slf = Self::build(config.root.as_ref().unwrap()); slf.apply_to(config); } } } #[derive(Debug, Deserialize)] // If you add an attribute to this you must also update: // * pyapi/src/lib.rs to convert it to Python // * default function below to define the default value // * add an example of it to src/app_generators/templates/app/config/application.toml pub struct Config { pub name: String, pub target: Option<Vec<String>>, pub mode: String, /// Don't use this unless you know what you're doing, use origen::STATUS::output_dir() instead, since /// that accounts for the output directory being overridden by the current command pub output_directory: Option<String>, /// Don't use this unless you know what you're doing, use origen::STATUS::reference_dir() instead, since /// that accounts for the reference directory being overridden by the current command pub reference_directory: Option<String>, pub website_output_directory: Option<String>, pub website_source_directory: Option<String>, pub website_release_location: Option<Location>, pub website_release_name: Option<String>, pub root: Option<PathBuf>, pub revision_control: Option<HashMap<String, String>>, pub unit_tester: Option<HashMap<String, String>>, pub publisher: Option<HashMap<String, String>>, pub linter: Option<HashMap<String, String>>, pub release_scribe: Option<HashMap<String, String>>, pub app_session_root: Option<String>, pub commands: Option<Vec<String>>, } impl Config { pub fn refresh(&mut self) { let latest = Self::build(self.root.as_ref().unwrap(), false); self.name = latest.name; self.target = latest.target; self.mode = latest.mode; self.reference_directory = latest.reference_directory; self.website_output_directory = latest.website_output_directory; self.website_source_directory = latest.website_source_directory; self.website_release_location = latest.website_release_location; self.website_release_name = latest.website_release_name; self.revision_control = latest.revision_control; self.unit_tester = latest.unit_tester; self.publisher = latest.publisher; self.linter = latest.linter; self.release_scribe = latest.release_scribe; self.app_session_root = latest.app_session_root; self.commands = latest.commands; } /// Builds a new config from all application.toml files found at the given app root pub fn build(root: &Path, default_only: bool) -> Config { log_trace!("Building app config"); let mut s = config::Config::builder() .set_default("target", None::<Vec<String>>) .unwrap() .set_default("mode", "development".to_string()) .unwrap() .set_default("revision_control", None::<HashMap<String, String>>) .unwrap() .set_default("unit_tester", None::<HashMap<String, String>>) .unwrap() .set_default("publisher", None::<HashMap<String, String>>) .unwrap() .set_default("linter", None::<HashMap<String, String>>) .unwrap() .set_default("release_scribe", None::<HashMap<String, String>>) .unwrap() .set_default("app_session_root", None::<String>) .unwrap() .set_default("commands", None::<Vec<String>>) .unwrap(); let mut files: Vec<PathBuf> = Vec::new(); if let Some(paths) = std::env::var_os(APP_CONFIG_PATHS) { log_trace!("Found custom config paths: {:?}", paths); for path in std::env::split_paths(&paths) { log_trace!("Looking for Origen app config file at '{}'", path.display()); if path.is_file() { if let Some(ext) = path.extension() { if ext == "toml" { files.push(path); } else { log_error!( "Expected file {} to have extension '.toml'. Found '{}'", path.display(), ext.to_string_lossy() ) } } else { // accept a file without an extension. will be interpreted as a.toml files.push(path); } } else if path.is_dir() { let f = path.join("application.toml"); if f.exists() { files.push(f); } } else
} } if use_app_config!() { let file = root.join("config").join("application.toml"); if file.exists() { files.push(file); } } else { // Bypass Origen's default configuration lookup - use only the enumerated configs log_trace!("Bypassing Origen's App Config Lookup"); } for f in files.iter().rev() { log_trace!("Loading Origen config file from '{}'", f.display()); s = s.add_source(File::with_name(&format!("{}", f.display()))); } s = s.add_source(Environment::with_prefix("origen_app").list_separator(",").with_list_parse_key("target").with_list_parse_key("commands").try_parsing(true)); let cb = exit_on_bad_config!(s.build()); let mut c: Self = exit_on_bad_config!(cb.try_deserialize()); c.root = Some(root.to_path_buf()); // TODO // if let Some(l) = loc { // c.website_release_location = Some(Location::new(&l)); // } log_trace!("Completed building app config"); c.validate_options(); if!default_only { CurrentState::build_and_apply(&mut c); } c } pub fn validate_options(&self) { log_trace!("Validating available options..."); if let Some(targets) = self.target.as_ref() { log_trace!("\tValidating default target..."); for t in targets { target::clean_name(t, "targets", true, self.root.as_ref().unwrap()); } log_trace!("\tValidating default target!"); } log_trace!("\tValidating publisher options..."); for unknown in self.validate_publisher_options() { log_warning!("Unknown Publisher Option '{}'", unknown); } log_trace!("\tFinished validating publisher options"); log_trace!("Finished checking configs!"); } pub fn validate_publisher_options(&self) -> Vec<String> { let mut unknowns: Vec<String> = vec![]; if let Some(p) = &self.publisher { for (opt, _) in p.iter() { if!PUBLISHER_OPTIONS.contains(&opt.as_str()) { unknowns.push(opt.clone()); } } } unknowns } pub fn cmd_paths(&self) -> Vec<PathBuf> { let mut retn = vec!(); if let Some(cmds) = self.commands.as_ref() { // Load in only the commands explicitly given for cmds_toml in cmds { let ct = self.root.as_ref().unwrap().join("config").join(cmds_toml); if ct.exists() { retn.push(ct.to_owned()); } else { log_error!("Can not locate app commands file '{}'", scrub_path!(ct).display()) } } } else { // Load in any commands from: // 1) app_root/commands.toml // 2) app_root/commands/*/**.toml let commands_toml = self.root.as_ref().unwrap().join("config").join("commands.toml"); // println!("commands toml: {}", commands_toml.display()); if commands_toml.exists() { retn.push(commands_toml); } let mut commands_dir = self.root.as_ref().unwrap().join("config/commands"); if commands_dir.exists() { commands_dir = commands_dir.join("**/*.toml"); for entry in glob(commands_dir.to_str().unwrap()).unwrap() { match entry { Ok(e) => retn.push(e), Err(e) => log_error!("Error processing commands toml: {}", e) } } } } retn } }
{ log_error!( "Config path {} either does not exists or is not accessible", path.display() ); exit(1); }
conditional_block
config.rs
use crate::utility::location::Location; use crate::exit_on_bad_config; use origen_metal::{config, scrub_path}; use origen_metal::config::{Environment, File}; use std::collections::HashMap; use std::path::{Path, PathBuf}; use crate::om::glob::glob; use std::process::exit; use super::target; const PUBLISHER_OPTIONS: &[&str] = &["system", "package_app", "upload_app"]; const BYPASS_APP_CONFIG_ENV_VAR: &str = "origen_app_bypass_config_lookup"; const APP_CONFIG_PATHS: &str = "origen_app_config_paths"; macro_rules! use_app_config { () => {{ !std::env::var_os($crate::core::application::config::BYPASS_APP_CONFIG_ENV_VAR).is_some() }} } #[derive(Debug, Deserialize)] pub struct CurrentState { pub target: Option<Vec<String>> } impl CurrentState { pub fn build(root: &PathBuf) -> Self { let file = root.join(".origen").join("application.toml"); let mut s = config::Config::builder().set_default("target", None::<Vec<String>>).unwrap(); if file.exists() { s = s.add_source(File::with_name(&format!("{}", file.display()))); } let cb = exit_on_bad_config!(s.build()); let slf: Self = exit_on_bad_config!(cb.try_deserialize()); slf } pub fn apply_to(&mut self, config: &mut Config) { if let Some(t) = self.target.as_ref() { config.target = Some(t.to_owned()) } else { if let Some(t) = &config.target { let clean_defaults = target::set_at_root(t.iter().map( |s| s.as_str() ).collect(), config.root.as_ref().unwrap()); self.target = Some(clean_defaults); }
let mut slf = Self::build(config.root.as_ref().unwrap()); slf.apply_to(config); } } } #[derive(Debug, Deserialize)] // If you add an attribute to this you must also update: // * pyapi/src/lib.rs to convert it to Python // * default function below to define the default value // * add an example of it to src/app_generators/templates/app/config/application.toml pub struct Config { pub name: String, pub target: Option<Vec<String>>, pub mode: String, /// Don't use this unless you know what you're doing, use origen::STATUS::output_dir() instead, since /// that accounts for the output directory being overridden by the current command pub output_directory: Option<String>, /// Don't use this unless you know what you're doing, use origen::STATUS::reference_dir() instead, since /// that accounts for the reference directory being overridden by the current command pub reference_directory: Option<String>, pub website_output_directory: Option<String>, pub website_source_directory: Option<String>, pub website_release_location: Option<Location>, pub website_release_name: Option<String>, pub root: Option<PathBuf>, pub revision_control: Option<HashMap<String, String>>, pub unit_tester: Option<HashMap<String, String>>, pub publisher: Option<HashMap<String, String>>, pub linter: Option<HashMap<String, String>>, pub release_scribe: Option<HashMap<String, String>>, pub app_session_root: Option<String>, pub commands: Option<Vec<String>>, } impl Config { pub fn refresh(&mut self) { let latest = Self::build(self.root.as_ref().unwrap(), false); self.name = latest.name; self.target = latest.target; self.mode = latest.mode; self.reference_directory = latest.reference_directory; self.website_output_directory = latest.website_output_directory; self.website_source_directory = latest.website_source_directory; self.website_release_location = latest.website_release_location; self.website_release_name = latest.website_release_name; self.revision_control = latest.revision_control; self.unit_tester = latest.unit_tester; self.publisher = latest.publisher; self.linter = latest.linter; self.release_scribe = latest.release_scribe; self.app_session_root = latest.app_session_root; self.commands = latest.commands; } /// Builds a new config from all application.toml files found at the given app root pub fn build(root: &Path, default_only: bool) -> Config { log_trace!("Building app config"); let mut s = config::Config::builder() .set_default("target", None::<Vec<String>>) .unwrap() .set_default("mode", "development".to_string()) .unwrap() .set_default("revision_control", None::<HashMap<String, String>>) .unwrap() .set_default("unit_tester", None::<HashMap<String, String>>) .unwrap() .set_default("publisher", None::<HashMap<String, String>>) .unwrap() .set_default("linter", None::<HashMap<String, String>>) .unwrap() .set_default("release_scribe", None::<HashMap<String, String>>) .unwrap() .set_default("app_session_root", None::<String>) .unwrap() .set_default("commands", None::<Vec<String>>) .unwrap(); let mut files: Vec<PathBuf> = Vec::new(); if let Some(paths) = std::env::var_os(APP_CONFIG_PATHS) { log_trace!("Found custom config paths: {:?}", paths); for path in std::env::split_paths(&paths) { log_trace!("Looking for Origen app config file at '{}'", path.display()); if path.is_file() { if let Some(ext) = path.extension() { if ext == "toml" { files.push(path); } else { log_error!( "Expected file {} to have extension '.toml'. Found '{}'", path.display(), ext.to_string_lossy() ) } } else { // accept a file without an extension. will be interpreted as a.toml files.push(path); } } else if path.is_dir() { let f = path.join("application.toml"); if f.exists() { files.push(f); } } else { log_error!( "Config path {} either does not exists or is not accessible", path.display() ); exit(1); } } } if use_app_config!() { let file = root.join("config").join("application.toml"); if file.exists() { files.push(file); } } else { // Bypass Origen's default configuration lookup - use only the enumerated configs log_trace!("Bypassing Origen's App Config Lookup"); } for f in files.iter().rev() { log_trace!("Loading Origen config file from '{}'", f.display()); s = s.add_source(File::with_name(&format!("{}", f.display()))); } s = s.add_source(Environment::with_prefix("origen_app").list_separator(",").with_list_parse_key("target").with_list_parse_key("commands").try_parsing(true)); let cb = exit_on_bad_config!(s.build()); let mut c: Self = exit_on_bad_config!(cb.try_deserialize()); c.root = Some(root.to_path_buf()); // TODO // if let Some(l) = loc { // c.website_release_location = Some(Location::new(&l)); // } log_trace!("Completed building app config"); c.validate_options(); if!default_only { CurrentState::build_and_apply(&mut c); } c } pub fn validate_options(&self) { log_trace!("Validating available options..."); if let Some(targets) = self.target.as_ref() { log_trace!("\tValidating default target..."); for t in targets { target::clean_name(t, "targets", true, self.root.as_ref().unwrap()); } log_trace!("\tValidating default target!"); } log_trace!("\tValidating publisher options..."); for unknown in self.validate_publisher_options() { log_warning!("Unknown Publisher Option '{}'", unknown); } log_trace!("\tFinished validating publisher options"); log_trace!("Finished checking configs!"); } pub fn validate_publisher_options(&self) -> Vec<String> { let mut unknowns: Vec<String> = vec![]; if let Some(p) = &self.publisher { for (opt, _) in p.iter() { if!PUBLISHER_OPTIONS.contains(&opt.as_str()) { unknowns.push(opt.clone()); } } } unknowns } pub fn cmd_paths(&self) -> Vec<PathBuf> { let mut retn = vec!(); if let Some(cmds) = self.commands.as_ref() { // Load in only the commands explicitly given for cmds_toml in cmds { let ct = self.root.as_ref().unwrap().join("config").join(cmds_toml); if ct.exists() { retn.push(ct.to_owned()); } else { log_error!("Can not locate app commands file '{}'", scrub_path!(ct).display()) } } } else { // Load in any commands from: // 1) app_root/commands.toml // 2) app_root/commands/*/**.toml let commands_toml = self.root.as_ref().unwrap().join("config").join("commands.toml"); // println!("commands toml: {}", commands_toml.display()); if commands_toml.exists() { retn.push(commands_toml); } let mut commands_dir = self.root.as_ref().unwrap().join("config/commands"); if commands_dir.exists() { commands_dir = commands_dir.join("**/*.toml"); for entry in glob(commands_dir.to_str().unwrap()).unwrap() { match entry { Ok(e) => retn.push(e), Err(e) => log_error!("Error processing commands toml: {}", e) } } } } retn } }
} } pub fn build_and_apply(config: &mut Config) { if use_app_config!() {
random_line_split
beacon_chain_builder.rs
use crate::{BeaconChain, BeaconChainTypes}; use eth2_hashing::hash; use lighthouse_bootstrap::Bootstrapper; use merkle_proof::MerkleTree; use rayon::prelude::*; use slog::Logger; use ssz::{Decode, Encode}; use state_processing::initialize_beacon_state_from_eth1; use std::fs::File; use std::io::prelude::*; use std::path::PathBuf; use std::sync::Arc; use std::time::SystemTime; use tree_hash::{SignedRoot, TreeHash}; use types::{ BeaconBlock, BeaconState, ChainSpec, Deposit, DepositData, Domain, EthSpec, Fork, Hash256, Keypair, PublicKey, Signature, }; enum BuildStrategy<T: BeaconChainTypes> { FromGenesis { genesis_state: Box<BeaconState<T::EthSpec>>, genesis_block: Box<BeaconBlock<T::EthSpec>>, }, LoadFromStore, } pub struct BeaconChainBuilder<T: BeaconChainTypes> { build_strategy: BuildStrategy<T>, spec: ChainSpec, log: Logger, } impl<T: BeaconChainTypes> BeaconChainBuilder<T> { pub fn recent_genesis( keypairs: &[Keypair], minutes: u64, spec: ChainSpec, log: Logger, ) -> Result<Self, String> { Self::quick_start(recent_genesis_time(minutes), keypairs, spec, log) } pub fn quick_start( genesis_time: u64, keypairs: &[Keypair], spec: ChainSpec, log: Logger, ) -> Result<Self, String> { let genesis_state = interop_genesis_state(keypairs, genesis_time, &spec)?; Ok(Self::from_genesis_state(genesis_state, spec, log)) } pub fn yaml_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result<Self, String> { let file = File::open(file.clone()) .map_err(|e| format!("Unable to open YAML genesis state file {:?}: {:?}", file, e))?; let genesis_state = serde_yaml::from_reader(file) .map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))?; Ok(Self::from_genesis_state(genesis_state, spec, log)) } pub fn ssz_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result<Self, String> { let mut file = File::open(file.clone()) .map_err(|e| format!("Unable to open SSZ genesis state file {:?}: {:?}", file, e))?; let mut bytes = vec![]; file.read_to_end(&mut bytes) .map_err(|e| format!("Failed to read SSZ file: {:?}", e))?; let genesis_state = BeaconState::from_ssz_bytes(&bytes) .map_err(|e| format!("Unable to parse SSZ genesis state file: {:?}", e))?; Ok(Self::from_genesis_state(genesis_state, spec, log)) } pub fn json_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result<Self, String> { let file = File::open(file.clone()) .map_err(|e| format!("Unable to open JSON genesis state file {:?}: {:?}", file, e))?; let genesis_state = serde_json::from_reader(file) .map_err(|e| format!("Unable to parse JSON genesis state file: {:?}", e))?; Ok(Self::from_genesis_state(genesis_state, spec, log)) } pub fn http_bootstrap(server: &str, spec: ChainSpec, log: Logger) -> Result<Self, String> { let bootstrapper = Bootstrapper::connect(server.to_string(), &log) .map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?; let (genesis_state, genesis_block) = bootstrapper .genesis() .map_err(|e| format!("Failed to bootstrap genesis state: {}", e))?; Ok(Self { build_strategy: BuildStrategy::FromGenesis { genesis_block: Box::new(genesis_block), genesis_state: Box::new(genesis_state), }, spec, log, }) } fn from_genesis_state( genesis_state: BeaconState<T::EthSpec>, spec: ChainSpec, log: Logger, ) -> Self { Self { build_strategy: BuildStrategy::FromGenesis { genesis_block: Box::new(genesis_block(&genesis_state, &spec)), genesis_state: Box::new(genesis_state), }, spec, log, } } pub fn from_store(spec: ChainSpec, log: Logger) -> Self { Self { build_strategy: BuildStrategy::LoadFromStore, spec, log, } } pub fn build( self, store: Arc<T::Store>, eth1_backend: T::Eth1Chain, event_handler: T::EventHandler, ) -> Result<BeaconChain<T>, String> { Ok(match self.build_strategy { BuildStrategy::LoadFromStore => { BeaconChain::from_store(store, eth1_backend, event_handler, self.spec, self.log) .map_err(|e| format!("Error loading BeaconChain from database: {:?}", e))? .ok_or_else(|| "Unable to find exising BeaconChain in database.".to_string())? } BuildStrategy::FromGenesis { genesis_block, genesis_state, } => BeaconChain::from_genesis( store, eth1_backend, event_handler, genesis_state.as_ref().clone(), genesis_block.as_ref().clone(), self.spec, self.log, ) .map_err(|e| format!("Failed to initialize new beacon chain: {:?}", e))?, }) } } fn genesis_block<T: EthSpec>(genesis_state: &BeaconState<T>, spec: &ChainSpec) -> BeaconBlock<T> { let mut genesis_block = BeaconBlock::empty(&spec); genesis_block.state_root = genesis_state.canonical_root(); genesis_block } /// Builds a genesis state as defined by the Eth2 interop procedure (see below). /// /// Reference: /// https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start fn interop_genesis_state<T: EthSpec>( keypairs: &[Keypair], genesis_time: u64, spec: &ChainSpec, ) -> Result<BeaconState<T>, String> { let eth1_block_hash = Hash256::from_slice(&[0x42; 32]); let eth1_timestamp = 2_u64.pow(40); let amount = spec.max_effective_balance; let withdrawal_credentials = |pubkey: &PublicKey| { let mut credentials = hash(&pubkey.as_ssz_bytes()); credentials[0] = spec.bls_withdrawal_prefix_byte; Hash256::from_slice(&credentials) }; let datas = keypairs .into_par_iter() .map(|keypair| { let mut data = DepositData { withdrawal_credentials: withdrawal_credentials(&keypair.pk), pubkey: keypair.pk.clone().into(), amount, signature: Signature::empty_signature().into(), }; let domain = spec.get_domain( spec.genesis_slot.epoch(T::slots_per_epoch()), Domain::Deposit, &Fork::default(), ); data.signature = Signature::new(&data.signed_root()[..], domain, &keypair.sk).into(); data }) .collect::<Vec<_>>(); let deposit_root_leaves = datas .par_iter() .map(|data| Hash256::from_slice(&data.tree_hash_root())) .collect::<Vec<_>>(); let mut proofs = vec![]; let depth = spec.deposit_contract_tree_depth as usize; let mut tree = MerkleTree::create(&[], depth); for (i, deposit_leaf) in deposit_root_leaves.iter().enumerate() { if let Err(_) = tree.push_leaf(*deposit_leaf, depth)
let (_, mut proof) = tree.generate_proof(i, depth); proof.push(Hash256::from_slice(&int_to_bytes32(i + 1))); assert_eq!( proof.len(), depth + 1, "Deposit proof should be correct len" ); proofs.push(proof); } let deposits = datas .into_par_iter() .zip(proofs.into_par_iter()) .map(|(data, proof)| (data, proof.into())) .map(|(data, proof)| Deposit { proof, data }) .collect::<Vec<_>>(); let mut state = initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits, spec) .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; state.genesis_time = genesis_time; // Invalid all the caches after all the manual state surgery. state.drop_all_caches(); Ok(state) } /// Returns `int` as little-endian bytes with a length of 32. fn int_to_bytes32(int: usize) -> Vec<u8> { let mut vec = int.to_le_bytes().to_vec(); vec.resize(32, 0); vec } /// Returns the system time, mod 30 minutes. /// /// Used for easily creating testnets. fn recent_genesis_time(minutes: u64) -> u64 { let now = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .unwrap() .as_secs(); let secs_after_last_period = now.checked_rem(minutes * 60).unwrap_or(0); now - secs_after_last_period } #[cfg(test)] mod test { use super::*; use types::{test_utils::generate_deterministic_keypairs, EthSpec, MinimalEthSpec}; type TestEthSpec = MinimalEthSpec; #[test] fn interop_state() { let validator_count = 16; let genesis_time = 42; let spec = &TestEthSpec::default_spec(); let keypairs = generate_deterministic_keypairs(validator_count); let state = interop_genesis_state::<TestEthSpec>(&keypairs, genesis_time, spec) .expect("should build state"); assert_eq!( state.eth1_data.block_hash, Hash256::from_slice(&[0x42; 32]), "eth1 block hash should be co-ordinated junk" ); assert_eq!( state.genesis_time, genesis_time, "genesis time should be as specified" ); for b in &state.balances { assert_eq!( *b, spec.max_effective_balance, "validator balances should be max effective balance" ); } for v in &state.validators { let creds = v.withdrawal_credentials.as_bytes(); assert_eq!( creds[0], spec.bls_withdrawal_prefix_byte, "first byte of withdrawal creds should be bls prefix" ); assert_eq!( &creds[1..], &hash(&v.pubkey.as_ssz_bytes())[1..], "rest of withdrawal creds should be pubkey hash" ) } assert_eq!( state.balances.len(), validator_count, "validator balances len should be correct" ); assert_eq!( state.validators.len(), validator_count, "validator count should be correct" ); } }
{ return Err(String::from("Failed to push leaf")); }
conditional_block
beacon_chain_builder.rs
use crate::{BeaconChain, BeaconChainTypes}; use eth2_hashing::hash; use lighthouse_bootstrap::Bootstrapper; use merkle_proof::MerkleTree; use rayon::prelude::*; use slog::Logger; use ssz::{Decode, Encode}; use state_processing::initialize_beacon_state_from_eth1; use std::fs::File; use std::io::prelude::*; use std::path::PathBuf; use std::sync::Arc; use std::time::SystemTime; use tree_hash::{SignedRoot, TreeHash}; use types::{ BeaconBlock, BeaconState, ChainSpec, Deposit, DepositData, Domain, EthSpec, Fork, Hash256, Keypair, PublicKey, Signature, }; enum BuildStrategy<T: BeaconChainTypes> { FromGenesis { genesis_state: Box<BeaconState<T::EthSpec>>, genesis_block: Box<BeaconBlock<T::EthSpec>>, }, LoadFromStore, } pub struct BeaconChainBuilder<T: BeaconChainTypes> { build_strategy: BuildStrategy<T>, spec: ChainSpec, log: Logger, } impl<T: BeaconChainTypes> BeaconChainBuilder<T> { pub fn recent_genesis( keypairs: &[Keypair], minutes: u64, spec: ChainSpec, log: Logger, ) -> Result<Self, String> { Self::quick_start(recent_genesis_time(minutes), keypairs, spec, log) } pub fn quick_start( genesis_time: u64, keypairs: &[Keypair], spec: ChainSpec, log: Logger, ) -> Result<Self, String> { let genesis_state = interop_genesis_state(keypairs, genesis_time, &spec)?; Ok(Self::from_genesis_state(genesis_state, spec, log)) } pub fn yaml_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result<Self, String> { let file = File::open(file.clone()) .map_err(|e| format!("Unable to open YAML genesis state file {:?}: {:?}", file, e))?; let genesis_state = serde_yaml::from_reader(file) .map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))?; Ok(Self::from_genesis_state(genesis_state, spec, log)) } pub fn ssz_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result<Self, String> { let mut file = File::open(file.clone()) .map_err(|e| format!("Unable to open SSZ genesis state file {:?}: {:?}", file, e))?; let mut bytes = vec![]; file.read_to_end(&mut bytes) .map_err(|e| format!("Failed to read SSZ file: {:?}", e))?; let genesis_state = BeaconState::from_ssz_bytes(&bytes) .map_err(|e| format!("Unable to parse SSZ genesis state file: {:?}", e))?; Ok(Self::from_genesis_state(genesis_state, spec, log)) } pub fn json_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result<Self, String> { let file = File::open(file.clone()) .map_err(|e| format!("Unable to open JSON genesis state file {:?}: {:?}", file, e))?; let genesis_state = serde_json::from_reader(file) .map_err(|e| format!("Unable to parse JSON genesis state file: {:?}", e))?; Ok(Self::from_genesis_state(genesis_state, spec, log)) } pub fn http_bootstrap(server: &str, spec: ChainSpec, log: Logger) -> Result<Self, String> { let bootstrapper = Bootstrapper::connect(server.to_string(), &log) .map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?; let (genesis_state, genesis_block) = bootstrapper .genesis() .map_err(|e| format!("Failed to bootstrap genesis state: {}", e))?; Ok(Self { build_strategy: BuildStrategy::FromGenesis { genesis_block: Box::new(genesis_block), genesis_state: Box::new(genesis_state), }, spec, log, }) } fn from_genesis_state( genesis_state: BeaconState<T::EthSpec>, spec: ChainSpec, log: Logger, ) -> Self { Self { build_strategy: BuildStrategy::FromGenesis { genesis_block: Box::new(genesis_block(&genesis_state, &spec)), genesis_state: Box::new(genesis_state), }, spec, log, } } pub fn from_store(spec: ChainSpec, log: Logger) -> Self { Self { build_strategy: BuildStrategy::LoadFromStore, spec, log, } } pub fn build( self, store: Arc<T::Store>, eth1_backend: T::Eth1Chain, event_handler: T::EventHandler, ) -> Result<BeaconChain<T>, String> { Ok(match self.build_strategy { BuildStrategy::LoadFromStore => { BeaconChain::from_store(store, eth1_backend, event_handler, self.spec, self.log) .map_err(|e| format!("Error loading BeaconChain from database: {:?}", e))? .ok_or_else(|| "Unable to find exising BeaconChain in database.".to_string())? } BuildStrategy::FromGenesis { genesis_block, genesis_state, } => BeaconChain::from_genesis( store, eth1_backend, event_handler, genesis_state.as_ref().clone(), genesis_block.as_ref().clone(), self.spec, self.log, ) .map_err(|e| format!("Failed to initialize new beacon chain: {:?}", e))?, }) } } fn genesis_block<T: EthSpec>(genesis_state: &BeaconState<T>, spec: &ChainSpec) -> BeaconBlock<T> { let mut genesis_block = BeaconBlock::empty(&spec); genesis_block.state_root = genesis_state.canonical_root(); genesis_block } /// Builds a genesis state as defined by the Eth2 interop procedure (see below). /// /// Reference: /// https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start fn interop_genesis_state<T: EthSpec>( keypairs: &[Keypair], genesis_time: u64, spec: &ChainSpec, ) -> Result<BeaconState<T>, String> { let eth1_block_hash = Hash256::from_slice(&[0x42; 32]); let eth1_timestamp = 2_u64.pow(40); let amount = spec.max_effective_balance; let withdrawal_credentials = |pubkey: &PublicKey| { let mut credentials = hash(&pubkey.as_ssz_bytes()); credentials[0] = spec.bls_withdrawal_prefix_byte; Hash256::from_slice(&credentials) }; let datas = keypairs .into_par_iter() .map(|keypair| { let mut data = DepositData { withdrawal_credentials: withdrawal_credentials(&keypair.pk), pubkey: keypair.pk.clone().into(), amount, signature: Signature::empty_signature().into(), }; let domain = spec.get_domain( spec.genesis_slot.epoch(T::slots_per_epoch()), Domain::Deposit, &Fork::default(), ); data.signature = Signature::new(&data.signed_root()[..], domain, &keypair.sk).into(); data }) .collect::<Vec<_>>(); let deposit_root_leaves = datas .par_iter() .map(|data| Hash256::from_slice(&data.tree_hash_root())) .collect::<Vec<_>>(); let mut proofs = vec![]; let depth = spec.deposit_contract_tree_depth as usize; let mut tree = MerkleTree::create(&[], depth); for (i, deposit_leaf) in deposit_root_leaves.iter().enumerate() { if let Err(_) = tree.push_leaf(*deposit_leaf, depth) { return Err(String::from("Failed to push leaf")); } let (_, mut proof) = tree.generate_proof(i, depth); proof.push(Hash256::from_slice(&int_to_bytes32(i + 1))); assert_eq!( proof.len(), depth + 1, "Deposit proof should be correct len" ); proofs.push(proof); } let deposits = datas .into_par_iter() .zip(proofs.into_par_iter()) .map(|(data, proof)| (data, proof.into())) .map(|(data, proof)| Deposit { proof, data }) .collect::<Vec<_>>(); let mut state = initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits, spec) .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; state.genesis_time = genesis_time; // Invalid all the caches after all the manual state surgery. state.drop_all_caches(); Ok(state) } /// Returns `int` as little-endian bytes with a length of 32. fn int_to_bytes32(int: usize) -> Vec<u8> { let mut vec = int.to_le_bytes().to_vec(); vec.resize(32, 0); vec } /// Returns the system time, mod 30 minutes. /// /// Used for easily creating testnets. fn recent_genesis_time(minutes: u64) -> u64 { let now = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .unwrap() .as_secs(); let secs_after_last_period = now.checked_rem(minutes * 60).unwrap_or(0); now - secs_after_last_period } #[cfg(test)] mod test { use super::*; use types::{test_utils::generate_deterministic_keypairs, EthSpec, MinimalEthSpec}; type TestEthSpec = MinimalEthSpec; #[test] fn interop_state() { let validator_count = 16; let genesis_time = 42; let spec = &TestEthSpec::default_spec(); let keypairs = generate_deterministic_keypairs(validator_count); let state = interop_genesis_state::<TestEthSpec>(&keypairs, genesis_time, spec) .expect("should build state"); assert_eq!( state.eth1_data.block_hash, Hash256::from_slice(&[0x42; 32]), "eth1 block hash should be co-ordinated junk" ); assert_eq!( state.genesis_time, genesis_time, "genesis time should be as specified" ); for b in &state.balances { assert_eq!( *b, spec.max_effective_balance,
let creds = v.withdrawal_credentials.as_bytes(); assert_eq!( creds[0], spec.bls_withdrawal_prefix_byte, "first byte of withdrawal creds should be bls prefix" ); assert_eq!( &creds[1..], &hash(&v.pubkey.as_ssz_bytes())[1..], "rest of withdrawal creds should be pubkey hash" ) } assert_eq!( state.balances.len(), validator_count, "validator balances len should be correct" ); assert_eq!( state.validators.len(), validator_count, "validator count should be correct" ); } }
"validator balances should be max effective balance" ); } for v in &state.validators {
random_line_split
beacon_chain_builder.rs
use crate::{BeaconChain, BeaconChainTypes}; use eth2_hashing::hash; use lighthouse_bootstrap::Bootstrapper; use merkle_proof::MerkleTree; use rayon::prelude::*; use slog::Logger; use ssz::{Decode, Encode}; use state_processing::initialize_beacon_state_from_eth1; use std::fs::File; use std::io::prelude::*; use std::path::PathBuf; use std::sync::Arc; use std::time::SystemTime; use tree_hash::{SignedRoot, TreeHash}; use types::{ BeaconBlock, BeaconState, ChainSpec, Deposit, DepositData, Domain, EthSpec, Fork, Hash256, Keypair, PublicKey, Signature, }; enum BuildStrategy<T: BeaconChainTypes> { FromGenesis { genesis_state: Box<BeaconState<T::EthSpec>>, genesis_block: Box<BeaconBlock<T::EthSpec>>, }, LoadFromStore, } pub struct BeaconChainBuilder<T: BeaconChainTypes> { build_strategy: BuildStrategy<T>, spec: ChainSpec, log: Logger, } impl<T: BeaconChainTypes> BeaconChainBuilder<T> { pub fn recent_genesis( keypairs: &[Keypair], minutes: u64, spec: ChainSpec, log: Logger, ) -> Result<Self, String> { Self::quick_start(recent_genesis_time(minutes), keypairs, spec, log) } pub fn quick_start( genesis_time: u64, keypairs: &[Keypair], spec: ChainSpec, log: Logger, ) -> Result<Self, String> { let genesis_state = interop_genesis_state(keypairs, genesis_time, &spec)?; Ok(Self::from_genesis_state(genesis_state, spec, log)) } pub fn yaml_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result<Self, String> { let file = File::open(file.clone()) .map_err(|e| format!("Unable to open YAML genesis state file {:?}: {:?}", file, e))?; let genesis_state = serde_yaml::from_reader(file) .map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))?; Ok(Self::from_genesis_state(genesis_state, spec, log)) } pub fn
(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result<Self, String> { let mut file = File::open(file.clone()) .map_err(|e| format!("Unable to open SSZ genesis state file {:?}: {:?}", file, e))?; let mut bytes = vec![]; file.read_to_end(&mut bytes) .map_err(|e| format!("Failed to read SSZ file: {:?}", e))?; let genesis_state = BeaconState::from_ssz_bytes(&bytes) .map_err(|e| format!("Unable to parse SSZ genesis state file: {:?}", e))?; Ok(Self::from_genesis_state(genesis_state, spec, log)) } pub fn json_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result<Self, String> { let file = File::open(file.clone()) .map_err(|e| format!("Unable to open JSON genesis state file {:?}: {:?}", file, e))?; let genesis_state = serde_json::from_reader(file) .map_err(|e| format!("Unable to parse JSON genesis state file: {:?}", e))?; Ok(Self::from_genesis_state(genesis_state, spec, log)) } pub fn http_bootstrap(server: &str, spec: ChainSpec, log: Logger) -> Result<Self, String> { let bootstrapper = Bootstrapper::connect(server.to_string(), &log) .map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?; let (genesis_state, genesis_block) = bootstrapper .genesis() .map_err(|e| format!("Failed to bootstrap genesis state: {}", e))?; Ok(Self { build_strategy: BuildStrategy::FromGenesis { genesis_block: Box::new(genesis_block), genesis_state: Box::new(genesis_state), }, spec, log, }) } fn from_genesis_state( genesis_state: BeaconState<T::EthSpec>, spec: ChainSpec, log: Logger, ) -> Self { Self { build_strategy: BuildStrategy::FromGenesis { genesis_block: Box::new(genesis_block(&genesis_state, &spec)), genesis_state: Box::new(genesis_state), }, spec, log, } } pub fn from_store(spec: ChainSpec, log: Logger) -> Self { Self { build_strategy: BuildStrategy::LoadFromStore, spec, log, } } pub fn build( self, store: Arc<T::Store>, eth1_backend: T::Eth1Chain, event_handler: T::EventHandler, ) -> Result<BeaconChain<T>, String> { Ok(match self.build_strategy { BuildStrategy::LoadFromStore => { BeaconChain::from_store(store, eth1_backend, event_handler, self.spec, self.log) .map_err(|e| format!("Error loading BeaconChain from database: {:?}", e))? .ok_or_else(|| "Unable to find exising BeaconChain in database.".to_string())? } BuildStrategy::FromGenesis { genesis_block, genesis_state, } => BeaconChain::from_genesis( store, eth1_backend, event_handler, genesis_state.as_ref().clone(), genesis_block.as_ref().clone(), self.spec, self.log, ) .map_err(|e| format!("Failed to initialize new beacon chain: {:?}", e))?, }) } } fn genesis_block<T: EthSpec>(genesis_state: &BeaconState<T>, spec: &ChainSpec) -> BeaconBlock<T> { let mut genesis_block = BeaconBlock::empty(&spec); genesis_block.state_root = genesis_state.canonical_root(); genesis_block } /// Builds a genesis state as defined by the Eth2 interop procedure (see below). /// /// Reference: /// https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start fn interop_genesis_state<T: EthSpec>( keypairs: &[Keypair], genesis_time: u64, spec: &ChainSpec, ) -> Result<BeaconState<T>, String> { let eth1_block_hash = Hash256::from_slice(&[0x42; 32]); let eth1_timestamp = 2_u64.pow(40); let amount = spec.max_effective_balance; let withdrawal_credentials = |pubkey: &PublicKey| { let mut credentials = hash(&pubkey.as_ssz_bytes()); credentials[0] = spec.bls_withdrawal_prefix_byte; Hash256::from_slice(&credentials) }; let datas = keypairs .into_par_iter() .map(|keypair| { let mut data = DepositData { withdrawal_credentials: withdrawal_credentials(&keypair.pk), pubkey: keypair.pk.clone().into(), amount, signature: Signature::empty_signature().into(), }; let domain = spec.get_domain( spec.genesis_slot.epoch(T::slots_per_epoch()), Domain::Deposit, &Fork::default(), ); data.signature = Signature::new(&data.signed_root()[..], domain, &keypair.sk).into(); data }) .collect::<Vec<_>>(); let deposit_root_leaves = datas .par_iter() .map(|data| Hash256::from_slice(&data.tree_hash_root())) .collect::<Vec<_>>(); let mut proofs = vec![]; let depth = spec.deposit_contract_tree_depth as usize; let mut tree = MerkleTree::create(&[], depth); for (i, deposit_leaf) in deposit_root_leaves.iter().enumerate() { if let Err(_) = tree.push_leaf(*deposit_leaf, depth) { return Err(String::from("Failed to push leaf")); } let (_, mut proof) = tree.generate_proof(i, depth); proof.push(Hash256::from_slice(&int_to_bytes32(i + 1))); assert_eq!( proof.len(), depth + 1, "Deposit proof should be correct len" ); proofs.push(proof); } let deposits = datas .into_par_iter() .zip(proofs.into_par_iter()) .map(|(data, proof)| (data, proof.into())) .map(|(data, proof)| Deposit { proof, data }) .collect::<Vec<_>>(); let mut state = initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits, spec) .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; state.genesis_time = genesis_time; // Invalid all the caches after all the manual state surgery. state.drop_all_caches(); Ok(state) } /// Returns `int` as little-endian bytes with a length of 32. fn int_to_bytes32(int: usize) -> Vec<u8> { let mut vec = int.to_le_bytes().to_vec(); vec.resize(32, 0); vec } /// Returns the system time, mod 30 minutes. /// /// Used for easily creating testnets. fn recent_genesis_time(minutes: u64) -> u64 { let now = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .unwrap() .as_secs(); let secs_after_last_period = now.checked_rem(minutes * 60).unwrap_or(0); now - secs_after_last_period } #[cfg(test)] mod test { use super::*; use types::{test_utils::generate_deterministic_keypairs, EthSpec, MinimalEthSpec}; type TestEthSpec = MinimalEthSpec; #[test] fn interop_state() { let validator_count = 16; let genesis_time = 42; let spec = &TestEthSpec::default_spec(); let keypairs = generate_deterministic_keypairs(validator_count); let state = interop_genesis_state::<TestEthSpec>(&keypairs, genesis_time, spec) .expect("should build state"); assert_eq!( state.eth1_data.block_hash, Hash256::from_slice(&[0x42; 32]), "eth1 block hash should be co-ordinated junk" ); assert_eq!( state.genesis_time, genesis_time, "genesis time should be as specified" ); for b in &state.balances { assert_eq!( *b, spec.max_effective_balance, "validator balances should be max effective balance" ); } for v in &state.validators { let creds = v.withdrawal_credentials.as_bytes(); assert_eq!( creds[0], spec.bls_withdrawal_prefix_byte, "first byte of withdrawal creds should be bls prefix" ); assert_eq!( &creds[1..], &hash(&v.pubkey.as_ssz_bytes())[1..], "rest of withdrawal creds should be pubkey hash" ) } assert_eq!( state.balances.len(), validator_count, "validator balances len should be correct" ); assert_eq!( state.validators.len(), validator_count, "validator count should be correct" ); } }
ssz_state
identifier_name
beacon_chain_builder.rs
use crate::{BeaconChain, BeaconChainTypes}; use eth2_hashing::hash; use lighthouse_bootstrap::Bootstrapper; use merkle_proof::MerkleTree; use rayon::prelude::*; use slog::Logger; use ssz::{Decode, Encode}; use state_processing::initialize_beacon_state_from_eth1; use std::fs::File; use std::io::prelude::*; use std::path::PathBuf; use std::sync::Arc; use std::time::SystemTime; use tree_hash::{SignedRoot, TreeHash}; use types::{ BeaconBlock, BeaconState, ChainSpec, Deposit, DepositData, Domain, EthSpec, Fork, Hash256, Keypair, PublicKey, Signature, }; enum BuildStrategy<T: BeaconChainTypes> { FromGenesis { genesis_state: Box<BeaconState<T::EthSpec>>, genesis_block: Box<BeaconBlock<T::EthSpec>>, }, LoadFromStore, } pub struct BeaconChainBuilder<T: BeaconChainTypes> { build_strategy: BuildStrategy<T>, spec: ChainSpec, log: Logger, } impl<T: BeaconChainTypes> BeaconChainBuilder<T> { pub fn recent_genesis( keypairs: &[Keypair], minutes: u64, spec: ChainSpec, log: Logger, ) -> Result<Self, String> { Self::quick_start(recent_genesis_time(minutes), keypairs, spec, log) } pub fn quick_start( genesis_time: u64, keypairs: &[Keypair], spec: ChainSpec, log: Logger, ) -> Result<Self, String> { let genesis_state = interop_genesis_state(keypairs, genesis_time, &spec)?; Ok(Self::from_genesis_state(genesis_state, spec, log)) } pub fn yaml_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result<Self, String> { let file = File::open(file.clone()) .map_err(|e| format!("Unable to open YAML genesis state file {:?}: {:?}", file, e))?; let genesis_state = serde_yaml::from_reader(file) .map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))?; Ok(Self::from_genesis_state(genesis_state, spec, log)) } pub fn ssz_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result<Self, String> { let mut file = File::open(file.clone()) .map_err(|e| format!("Unable to open SSZ genesis state file {:?}: {:?}", file, e))?; let mut bytes = vec![]; file.read_to_end(&mut bytes) .map_err(|e| format!("Failed to read SSZ file: {:?}", e))?; let genesis_state = BeaconState::from_ssz_bytes(&bytes) .map_err(|e| format!("Unable to parse SSZ genesis state file: {:?}", e))?; Ok(Self::from_genesis_state(genesis_state, spec, log)) } pub fn json_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result<Self, String>
pub fn http_bootstrap(server: &str, spec: ChainSpec, log: Logger) -> Result<Self, String> { let bootstrapper = Bootstrapper::connect(server.to_string(), &log) .map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?; let (genesis_state, genesis_block) = bootstrapper .genesis() .map_err(|e| format!("Failed to bootstrap genesis state: {}", e))?; Ok(Self { build_strategy: BuildStrategy::FromGenesis { genesis_block: Box::new(genesis_block), genesis_state: Box::new(genesis_state), }, spec, log, }) } fn from_genesis_state( genesis_state: BeaconState<T::EthSpec>, spec: ChainSpec, log: Logger, ) -> Self { Self { build_strategy: BuildStrategy::FromGenesis { genesis_block: Box::new(genesis_block(&genesis_state, &spec)), genesis_state: Box::new(genesis_state), }, spec, log, } } pub fn from_store(spec: ChainSpec, log: Logger) -> Self { Self { build_strategy: BuildStrategy::LoadFromStore, spec, log, } } pub fn build( self, store: Arc<T::Store>, eth1_backend: T::Eth1Chain, event_handler: T::EventHandler, ) -> Result<BeaconChain<T>, String> { Ok(match self.build_strategy { BuildStrategy::LoadFromStore => { BeaconChain::from_store(store, eth1_backend, event_handler, self.spec, self.log) .map_err(|e| format!("Error loading BeaconChain from database: {:?}", e))? .ok_or_else(|| "Unable to find exising BeaconChain in database.".to_string())? } BuildStrategy::FromGenesis { genesis_block, genesis_state, } => BeaconChain::from_genesis( store, eth1_backend, event_handler, genesis_state.as_ref().clone(), genesis_block.as_ref().clone(), self.spec, self.log, ) .map_err(|e| format!("Failed to initialize new beacon chain: {:?}", e))?, }) } } fn genesis_block<T: EthSpec>(genesis_state: &BeaconState<T>, spec: &ChainSpec) -> BeaconBlock<T> { let mut genesis_block = BeaconBlock::empty(&spec); genesis_block.state_root = genesis_state.canonical_root(); genesis_block } /// Builds a genesis state as defined by the Eth2 interop procedure (see below). /// /// Reference: /// https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start fn interop_genesis_state<T: EthSpec>( keypairs: &[Keypair], genesis_time: u64, spec: &ChainSpec, ) -> Result<BeaconState<T>, String> { let eth1_block_hash = Hash256::from_slice(&[0x42; 32]); let eth1_timestamp = 2_u64.pow(40); let amount = spec.max_effective_balance; let withdrawal_credentials = |pubkey: &PublicKey| { let mut credentials = hash(&pubkey.as_ssz_bytes()); credentials[0] = spec.bls_withdrawal_prefix_byte; Hash256::from_slice(&credentials) }; let datas = keypairs .into_par_iter() .map(|keypair| { let mut data = DepositData { withdrawal_credentials: withdrawal_credentials(&keypair.pk), pubkey: keypair.pk.clone().into(), amount, signature: Signature::empty_signature().into(), }; let domain = spec.get_domain( spec.genesis_slot.epoch(T::slots_per_epoch()), Domain::Deposit, &Fork::default(), ); data.signature = Signature::new(&data.signed_root()[..], domain, &keypair.sk).into(); data }) .collect::<Vec<_>>(); let deposit_root_leaves = datas .par_iter() .map(|data| Hash256::from_slice(&data.tree_hash_root())) .collect::<Vec<_>>(); let mut proofs = vec![]; let depth = spec.deposit_contract_tree_depth as usize; let mut tree = MerkleTree::create(&[], depth); for (i, deposit_leaf) in deposit_root_leaves.iter().enumerate() { if let Err(_) = tree.push_leaf(*deposit_leaf, depth) { return Err(String::from("Failed to push leaf")); } let (_, mut proof) = tree.generate_proof(i, depth); proof.push(Hash256::from_slice(&int_to_bytes32(i + 1))); assert_eq!( proof.len(), depth + 1, "Deposit proof should be correct len" ); proofs.push(proof); } let deposits = datas .into_par_iter() .zip(proofs.into_par_iter()) .map(|(data, proof)| (data, proof.into())) .map(|(data, proof)| Deposit { proof, data }) .collect::<Vec<_>>(); let mut state = initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits, spec) .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; state.genesis_time = genesis_time; // Invalid all the caches after all the manual state surgery. state.drop_all_caches(); Ok(state) } /// Returns `int` as little-endian bytes with a length of 32. fn int_to_bytes32(int: usize) -> Vec<u8> { let mut vec = int.to_le_bytes().to_vec(); vec.resize(32, 0); vec } /// Returns the system time, mod 30 minutes. /// /// Used for easily creating testnets. fn recent_genesis_time(minutes: u64) -> u64 { let now = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .unwrap() .as_secs(); let secs_after_last_period = now.checked_rem(minutes * 60).unwrap_or(0); now - secs_after_last_period } #[cfg(test)] mod test { use super::*; use types::{test_utils::generate_deterministic_keypairs, EthSpec, MinimalEthSpec}; type TestEthSpec = MinimalEthSpec; #[test] fn interop_state() { let validator_count = 16; let genesis_time = 42; let spec = &TestEthSpec::default_spec(); let keypairs = generate_deterministic_keypairs(validator_count); let state = interop_genesis_state::<TestEthSpec>(&keypairs, genesis_time, spec) .expect("should build state"); assert_eq!( state.eth1_data.block_hash, Hash256::from_slice(&[0x42; 32]), "eth1 block hash should be co-ordinated junk" ); assert_eq!( state.genesis_time, genesis_time, "genesis time should be as specified" ); for b in &state.balances { assert_eq!( *b, spec.max_effective_balance, "validator balances should be max effective balance" ); } for v in &state.validators { let creds = v.withdrawal_credentials.as_bytes(); assert_eq!( creds[0], spec.bls_withdrawal_prefix_byte, "first byte of withdrawal creds should be bls prefix" ); assert_eq!( &creds[1..], &hash(&v.pubkey.as_ssz_bytes())[1..], "rest of withdrawal creds should be pubkey hash" ) } assert_eq!( state.balances.len(), validator_count, "validator balances len should be correct" ); assert_eq!( state.validators.len(), validator_count, "validator count should be correct" ); } }
{ let file = File::open(file.clone()) .map_err(|e| format!("Unable to open JSON genesis state file {:?}: {:?}", file, e))?; let genesis_state = serde_json::from_reader(file) .map_err(|e| format!("Unable to parse JSON genesis state file: {:?}", e))?; Ok(Self::from_genesis_state(genesis_state, spec, log)) }
identifier_body
networking.rs
use std::io::{Read, Write, Result, BufRead, BufReader, BufWriter}; use std::fs::File; use std::net::{TcpListener, TcpStream}; use std::mem::size_of; use std::sync::Arc; use std::sync::mpsc::{Sender, Receiver, channel}; use std::thread; use std::thread::sleep_ms; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use communication::communicator::{Binary, Process}; use drain::DrainExt; // TODO : Much of this only relates to BinaryWriter/BinaryReader based communication, not networking. // TODO : Could be moved somewhere less networking-specific. #[derive(Copy, Clone)] pub struct
{ pub graph: u64, // graph identifier pub channel: u64, // index of channel pub source: u64, // index of worker sending message pub target: u64, // index of worker receiving message pub length: u64, // number of bytes in message } impl MessageHeader { // returns a header when there is enough supporting data fn try_read(bytes: &mut &[u8]) -> Option<MessageHeader> { if bytes.len() > size_of::<MessageHeader>() { // capture original in case we need to rewind let original = *bytes; // unclear what order struct initializers run in, so... let graph = bytes.read_u64::<LittleEndian>().unwrap(); let channel = bytes.read_u64::<LittleEndian>().unwrap(); let source = bytes.read_u64::<LittleEndian>().unwrap(); let target = bytes.read_u64::<LittleEndian>().unwrap(); let length = bytes.read_u64::<LittleEndian>().unwrap(); if bytes.len() >= length as usize { Some(MessageHeader { graph: graph, channel: channel, source: source, target: target, length: length, }) } else { // rewind the reader *bytes = original; None } } else { None } } fn write_to<W: Write>(&self, writer: &mut W) -> Result<()> { try!(writer.write_u64::<LittleEndian>(self.graph)); try!(writer.write_u64::<LittleEndian>(self.channel)); try!(writer.write_u64::<LittleEndian>(self.source)); try!(writer.write_u64::<LittleEndian>(self.target)); try!(writer.write_u64::<LittleEndian>(self.length)); Ok(()) } } // // structure in charge of receiving data from a Reader, for example the network // struct BinaryReceiver<R: Read> { // reader: R, // the generic reader // buffer: Vec<u8>, // current working buffer // double: Vec<u8>, // second working buffer // staging: Vec<u8>, // 1 << 20 of buffer to read into // targets: Switchboard<(Sender<Vec<u8>>, Receiver<Vec<u8>>)>, // } // // impl<R: Read> BinaryReceiver<R> { // fn new(reader: R, channels: Receiver<((u64, u64, u64), (Sender<Vec<u8>>, Receiver<Vec<u8>>))>) -> BinaryReceiver<R> { // BinaryReceiver { // reader: reader, // buffer: Vec::new(), // double: Vec::new(), // staging: vec![0u8; 1 << 20], // targets: Switchboard::new(channels), // } // } // // fn recv_loop(&mut self) { // loop { // // // attempt to read some more bytes into our buffer // // TODO : We read in to self.staging because extending a Vec<u8> is hard without // // TODO : using set_len, which is unsafe. // // TODO : Could consider optimizing for the self.buffer.len() == 0 case, swapping // // TODO : self.staging with self.buffer, rather than using write_all. // let read = self.reader.read(&mut self.staging[..]).unwrap_or(0); // self.buffer.write_all(&self.staging[..read]).unwrap(); // <-- shouldn't fail // // { // // get a view of available bytes // let mut slice = &self.buffer[..]; // // while let Some(header) = MessageHeader::try_read(&mut slice) { // // let h_len = header.length as usize; // length in bytes // let target = self.targets.ensure(header.target, header.graph, header.channel); // let mut buffer = target.1.try_recv().unwrap_or(Vec::new()); // // buffer.clear(); // buffer.write_all(&slice[..h_len]).unwrap(); // slice = &slice[h_len..]; // // target.0.send(buffer).unwrap(); // } // // // TODO: way inefficient... =/ Fix! :D // // if slice.len() < self.buffer.len() { // self.double.clear(); // self.double.write_all(slice).unwrap(); // // } // } // // // if self.double.len() > 0 { // mem::swap(&mut self.buffer, &mut self.double); // // self.double.clear(); // // } // } // } // } // structure in charge of receiving data from a Reader, for example the network struct BinaryReceiver<R: Read> { reader: R, // the generic reader buffer: Vec<u8>, // current working buffer length: usize, targets: Switchboard<(Sender<Vec<u8>>, Receiver<Vec<u8>>)>, } impl<R: Read> BinaryReceiver<R> { fn new(reader: R, channels: Receiver<((u64, u64, u64), (Sender<Vec<u8>>, Receiver<Vec<u8>>))>) -> BinaryReceiver<R> { BinaryReceiver { reader: reader, buffer: vec![0u8; 1 << 20], length: 0, targets: Switchboard::new(channels), } } fn recv_loop(&mut self) { loop { // if we've mostly filled our buffer and still can't read a whole message from it, // we'll need more space / to read more at once. let's double the buffer! if self.length >= self.buffer.len() / 2 { self.buffer.extend(::std::iter::repeat(0u8).take(self.length)); } // attempt to read some more bytes into our buffer let read = self.reader.read(&mut self.buffer[self.length..]).unwrap_or(0); self.length += read; let remaining = { let mut slice = &self.buffer[..self.length]; while let Some(header) = MessageHeader::try_read(&mut slice) { let h_len = header.length as usize; // length in bytes let target = &mut self.targets.ensure(header.target, header.graph, header.channel).0; target.send(slice[..h_len].to_vec()).unwrap(); slice = &slice[h_len..]; } slice.len() }; // we consumed bytes, must shift to beginning. // this should optimize to copy_overlapping; // would just do that if it weren't unsafe =/ if remaining < self.length { for index in 0..remaining { self.buffer[index] = self.buffer[index + self.length - remaining]; } self.length = remaining; } } } } // structure in charge of sending data to a Writer, for example the network struct BinarySender<W: Write> { id: u64, // destination process writer: W, sources: Receiver<(MessageHeader, Vec<u8>)>, returns: Switchboard<Sender<Vec<u8>>>, } impl<W: Write> BinarySender<W> { fn new(id: u64, writer: W, sources: Receiver<(MessageHeader, Vec<u8>)>, channels: Receiver<((u64, u64, u64), Sender<Vec<u8>>)>) -> BinarySender<W> { BinarySender { id: id, writer: writer, sources: sources, returns: Switchboard::new(channels), } } fn send_loop(&mut self) { let mut stash = Vec::new(); // block until data to recv while let Ok((header, buffer)) = self.sources.recv() { stash.push((header, buffer)); // collect any additional outstanding data to send while let Ok((header, buffer)) = self.sources.try_recv() { stash.push((header, buffer)); } // println!("send loop to process {}:\tstarting", self.id); for (mut header, mut buffer) in stash.drain_temp() { header.length = buffer.len() as u64; // <-- is this really our job? O.o header.write_to(&mut self.writer).unwrap(); self.writer.write_all(&buffer[..]).unwrap(); buffer.clear(); // self.returns.ensure(header.source, header.graph, header.channel).send(buffer).unwrap(); } self.writer.flush().unwrap(); // <-- because writer is buffered } } } struct Switchboard<T:Send> { source: Receiver<((u64, u64, u64), T)>, buffer: Vec<Vec<Vec<Option<T>>>>, } impl<T:Send> Switchboard<T> { pub fn new(source: Receiver<((u64, u64, u64), T)>) -> Switchboard<T> { Switchboard { source: source, buffer: Vec::new(), } } pub fn ensure(&mut self, a: u64, b: u64, c: u64) -> &mut T { let a = a as usize; let b = b as usize; let c = c as usize; while self.buffer.len() <= a { self.buffer.push(Vec::new()); } while self.buffer[a].len() <= b { self.buffer[a].push(Vec::new()); } while self.buffer[a][b].len() <= c { self.buffer[a][b].push(None); } while let None = self.buffer[a][b][c] { let ((x, y, z), s) = self.source.recv().unwrap(); let x = x as usize; let y = y as usize; let z = z as usize; while self.buffer.len() <= x { self.buffer.push(Vec::new()); } while self.buffer[x].len() <= y { self.buffer[x].push(Vec::new()); } while self.buffer[x][y].len() <= z { self.buffer[x][y].push(None); } self.buffer[x][y][z] = Some(s); } // we've just ensured that this is not None self.buffer[a][b][c].as_mut().unwrap() } } pub fn initialize_networking_from_file(filename: &str, my_index: u64, workers: u64) -> Result<Vec<Binary>> { let reader = BufReader::new(try!(File::open(filename))); let mut addresses = Vec::new(); for line in reader.lines() { addresses.push(try!(line)); } initialize_networking(addresses, my_index, workers) } pub fn initialize_networking(addresses: Vec<String>, my_index: u64, workers: u64) -> Result<Vec<Binary>> { let processes = addresses.len() as u64; let hosts1 = Arc::new(addresses); let hosts2 = hosts1.clone(); let start_task = thread::spawn(move || start_connections(hosts1, my_index)); let await_task = thread::spawn(move || await_connections(hosts2, my_index)); let mut results = try!(start_task.join().unwrap()); results.push(None); let mut to_extend = try!(await_task.join().unwrap()); results.extend(to_extend.drain_temp()); println!("worker {}:\tinitialization complete", my_index); let mut writers = Vec::new(); // handles to the BinarySenders (to present new channels) let mut readers = Vec::new(); // handles to the BinaryReceivers (to present new channels) let mut senders = Vec::new(); // destinations for serialized data (to send serialized data) // for each process, if a stream exists (i.e. not local)... for index in (0..results.len()) { if let Some(stream) = results[index].take() { let (writer_channels_s, writer_channels_r) = channel(); let (reader_channels_s, reader_channels_r) = channel(); let (sender_channels_s, sender_channels_r) = channel(); writers.push(writer_channels_s); // readers.push(reader_channels_s); // senders.push(sender_channels_s); // let mut sender = BinarySender::new(index as u64, BufWriter::with_capacity(1 << 20, stream.try_clone().unwrap()), sender_channels_r, writer_channels_r); let mut recver = BinaryReceiver::new(stream.try_clone().unwrap(), reader_channels_r); // start senders and receivers associated with this stream thread::Builder::new().name(format!("send thread {}", index)) .spawn(move || sender.send_loop()) .unwrap(); thread::Builder::new().name(format!("recv thread {}", index)) .spawn(move || recver.recv_loop()) .unwrap(); } } let proc_comms = Process::new_vector(workers); let mut results = Vec::new(); for (index, proc_comm) in proc_comms.into_iter().enumerate() { results.push(Binary { inner: proc_comm, index: my_index * workers + index as u64, peers: workers * processes, graph: 0, // TODO : Fix this allocated: 0, writers: writers.clone(), readers: readers.clone(), senders: senders.clone(), }); } return Ok(results); } // result contains connections [0, my_index - 1]. fn start_connections(addresses: Arc<Vec<String>>, my_index: u64) -> Result<Vec<Option<TcpStream>>> { let mut results: Vec<_> = (0..my_index).map(|_| None).collect(); for index in (0..my_index) { let mut connected = false; while!connected { match TcpStream::connect(&addresses[index as usize][..]) { Ok(mut stream) => { try!(stream.write_u64::<LittleEndian>(my_index)); results[index as usize] = Some(stream); println!("worker {}:\tconnection to worker {}", my_index, index); connected = true; }, Err(error) => { println!("worker {}:\terror connecting to worker {}: {}; retrying", my_index, index, error); sleep_ms(1000); }, } } } return Ok(results); } // result contains connections [my_index + 1, addresses.len() - 1]. fn await_connections(addresses: Arc<Vec<String>>, my_index: u64) -> Result<Vec<Option<TcpStream>>> { let mut results: Vec<_> = (0..(addresses.len() - my_index as usize - 1)).map(|_| None).collect(); let listener = try!(TcpListener::bind(&addresses[my_index as usize][..])); for _ in (my_index as usize + 1.. addresses.len()) { let mut stream = try!(listener.accept()).0; let identifier = try!(stream.read_u64::<LittleEndian>()) as usize; results[identifier - my_index as usize - 1] = Some(stream); println!("worker {}:\tconnection from worker {}", my_index, identifier); } return Ok(results); }
MessageHeader
identifier_name
networking.rs
use std::io::{Read, Write, Result, BufRead, BufReader, BufWriter}; use std::fs::File; use std::net::{TcpListener, TcpStream}; use std::mem::size_of; use std::sync::Arc; use std::sync::mpsc::{Sender, Receiver, channel}; use std::thread; use std::thread::sleep_ms; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use communication::communicator::{Binary, Process}; use drain::DrainExt; // TODO : Much of this only relates to BinaryWriter/BinaryReader based communication, not networking. // TODO : Could be moved somewhere less networking-specific. #[derive(Copy, Clone)] pub struct MessageHeader { pub graph: u64, // graph identifier pub channel: u64, // index of channel pub source: u64, // index of worker sending message pub target: u64, // index of worker receiving message pub length: u64, // number of bytes in message } impl MessageHeader { // returns a header when there is enough supporting data fn try_read(bytes: &mut &[u8]) -> Option<MessageHeader> { if bytes.len() > size_of::<MessageHeader>() { // capture original in case we need to rewind let original = *bytes; // unclear what order struct initializers run in, so... let graph = bytes.read_u64::<LittleEndian>().unwrap(); let channel = bytes.read_u64::<LittleEndian>().unwrap(); let source = bytes.read_u64::<LittleEndian>().unwrap(); let target = bytes.read_u64::<LittleEndian>().unwrap(); let length = bytes.read_u64::<LittleEndian>().unwrap(); if bytes.len() >= length as usize { Some(MessageHeader { graph: graph, channel: channel, source: source, target: target, length: length, }) } else { // rewind the reader *bytes = original; None } } else { None } } fn write_to<W: Write>(&self, writer: &mut W) -> Result<()> { try!(writer.write_u64::<LittleEndian>(self.graph)); try!(writer.write_u64::<LittleEndian>(self.channel)); try!(writer.write_u64::<LittleEndian>(self.source)); try!(writer.write_u64::<LittleEndian>(self.target)); try!(writer.write_u64::<LittleEndian>(self.length)); Ok(()) } } // // structure in charge of receiving data from a Reader, for example the network // struct BinaryReceiver<R: Read> { // reader: R, // the generic reader // buffer: Vec<u8>, // current working buffer // double: Vec<u8>, // second working buffer // staging: Vec<u8>, // 1 << 20 of buffer to read into // targets: Switchboard<(Sender<Vec<u8>>, Receiver<Vec<u8>>)>, // } // // impl<R: Read> BinaryReceiver<R> { // fn new(reader: R, channels: Receiver<((u64, u64, u64), (Sender<Vec<u8>>, Receiver<Vec<u8>>))>) -> BinaryReceiver<R> { // BinaryReceiver { // reader: reader, // buffer: Vec::new(), // double: Vec::new(), // staging: vec![0u8; 1 << 20], // targets: Switchboard::new(channels), // } // } // // fn recv_loop(&mut self) { // loop { // // // attempt to read some more bytes into our buffer // // TODO : We read in to self.staging because extending a Vec<u8> is hard without // // TODO : using set_len, which is unsafe. // // TODO : Could consider optimizing for the self.buffer.len() == 0 case, swapping // // TODO : self.staging with self.buffer, rather than using write_all. // let read = self.reader.read(&mut self.staging[..]).unwrap_or(0); // self.buffer.write_all(&self.staging[..read]).unwrap(); // <-- shouldn't fail // // { // // get a view of available bytes // let mut slice = &self.buffer[..]; // // while let Some(header) = MessageHeader::try_read(&mut slice) { // // let h_len = header.length as usize; // length in bytes // let target = self.targets.ensure(header.target, header.graph, header.channel); // let mut buffer = target.1.try_recv().unwrap_or(Vec::new()); // // buffer.clear(); // buffer.write_all(&slice[..h_len]).unwrap(); // slice = &slice[h_len..]; // // target.0.send(buffer).unwrap(); // } // // // TODO: way inefficient... =/ Fix! :D // // if slice.len() < self.buffer.len() { // self.double.clear(); // self.double.write_all(slice).unwrap(); // // } // } // // // if self.double.len() > 0 { // mem::swap(&mut self.buffer, &mut self.double); // // self.double.clear(); // // } // } // } // } // structure in charge of receiving data from a Reader, for example the network struct BinaryReceiver<R: Read> { reader: R, // the generic reader buffer: Vec<u8>, // current working buffer length: usize, targets: Switchboard<(Sender<Vec<u8>>, Receiver<Vec<u8>>)>, } impl<R: Read> BinaryReceiver<R> { fn new(reader: R, channels: Receiver<((u64, u64, u64), (Sender<Vec<u8>>, Receiver<Vec<u8>>))>) -> BinaryReceiver<R> { BinaryReceiver { reader: reader, buffer: vec![0u8; 1 << 20], length: 0, targets: Switchboard::new(channels), } } fn recv_loop(&mut self) { loop { // if we've mostly filled our buffer and still can't read a whole message from it,
// we'll need more space / to read more at once. let's double the buffer! if self.length >= self.buffer.len() / 2 { self.buffer.extend(::std::iter::repeat(0u8).take(self.length)); } // attempt to read some more bytes into our buffer let read = self.reader.read(&mut self.buffer[self.length..]).unwrap_or(0); self.length += read; let remaining = { let mut slice = &self.buffer[..self.length]; while let Some(header) = MessageHeader::try_read(&mut slice) { let h_len = header.length as usize; // length in bytes let target = &mut self.targets.ensure(header.target, header.graph, header.channel).0; target.send(slice[..h_len].to_vec()).unwrap(); slice = &slice[h_len..]; } slice.len() }; // we consumed bytes, must shift to beginning. // this should optimize to copy_overlapping; // would just do that if it weren't unsafe =/ if remaining < self.length { for index in 0..remaining { self.buffer[index] = self.buffer[index + self.length - remaining]; } self.length = remaining; } } } } // structure in charge of sending data to a Writer, for example the network struct BinarySender<W: Write> { id: u64, // destination process writer: W, sources: Receiver<(MessageHeader, Vec<u8>)>, returns: Switchboard<Sender<Vec<u8>>>, } impl<W: Write> BinarySender<W> { fn new(id: u64, writer: W, sources: Receiver<(MessageHeader, Vec<u8>)>, channels: Receiver<((u64, u64, u64), Sender<Vec<u8>>)>) -> BinarySender<W> { BinarySender { id: id, writer: writer, sources: sources, returns: Switchboard::new(channels), } } fn send_loop(&mut self) { let mut stash = Vec::new(); // block until data to recv while let Ok((header, buffer)) = self.sources.recv() { stash.push((header, buffer)); // collect any additional outstanding data to send while let Ok((header, buffer)) = self.sources.try_recv() { stash.push((header, buffer)); } // println!("send loop to process {}:\tstarting", self.id); for (mut header, mut buffer) in stash.drain_temp() { header.length = buffer.len() as u64; // <-- is this really our job? O.o header.write_to(&mut self.writer).unwrap(); self.writer.write_all(&buffer[..]).unwrap(); buffer.clear(); // self.returns.ensure(header.source, header.graph, header.channel).send(buffer).unwrap(); } self.writer.flush().unwrap(); // <-- because writer is buffered } } } struct Switchboard<T:Send> { source: Receiver<((u64, u64, u64), T)>, buffer: Vec<Vec<Vec<Option<T>>>>, } impl<T:Send> Switchboard<T> { pub fn new(source: Receiver<((u64, u64, u64), T)>) -> Switchboard<T> { Switchboard { source: source, buffer: Vec::new(), } } pub fn ensure(&mut self, a: u64, b: u64, c: u64) -> &mut T { let a = a as usize; let b = b as usize; let c = c as usize; while self.buffer.len() <= a { self.buffer.push(Vec::new()); } while self.buffer[a].len() <= b { self.buffer[a].push(Vec::new()); } while self.buffer[a][b].len() <= c { self.buffer[a][b].push(None); } while let None = self.buffer[a][b][c] { let ((x, y, z), s) = self.source.recv().unwrap(); let x = x as usize; let y = y as usize; let z = z as usize; while self.buffer.len() <= x { self.buffer.push(Vec::new()); } while self.buffer[x].len() <= y { self.buffer[x].push(Vec::new()); } while self.buffer[x][y].len() <= z { self.buffer[x][y].push(None); } self.buffer[x][y][z] = Some(s); } // we've just ensured that this is not None self.buffer[a][b][c].as_mut().unwrap() } } pub fn initialize_networking_from_file(filename: &str, my_index: u64, workers: u64) -> Result<Vec<Binary>> { let reader = BufReader::new(try!(File::open(filename))); let mut addresses = Vec::new(); for line in reader.lines() { addresses.push(try!(line)); } initialize_networking(addresses, my_index, workers) } pub fn initialize_networking(addresses: Vec<String>, my_index: u64, workers: u64) -> Result<Vec<Binary>> { let processes = addresses.len() as u64; let hosts1 = Arc::new(addresses); let hosts2 = hosts1.clone(); let start_task = thread::spawn(move || start_connections(hosts1, my_index)); let await_task = thread::spawn(move || await_connections(hosts2, my_index)); let mut results = try!(start_task.join().unwrap()); results.push(None); let mut to_extend = try!(await_task.join().unwrap()); results.extend(to_extend.drain_temp()); println!("worker {}:\tinitialization complete", my_index); let mut writers = Vec::new(); // handles to the BinarySenders (to present new channels) let mut readers = Vec::new(); // handles to the BinaryReceivers (to present new channels) let mut senders = Vec::new(); // destinations for serialized data (to send serialized data) // for each process, if a stream exists (i.e. not local)... for index in (0..results.len()) { if let Some(stream) = results[index].take() { let (writer_channels_s, writer_channels_r) = channel(); let (reader_channels_s, reader_channels_r) = channel(); let (sender_channels_s, sender_channels_r) = channel(); writers.push(writer_channels_s); // readers.push(reader_channels_s); // senders.push(sender_channels_s); // let mut sender = BinarySender::new(index as u64, BufWriter::with_capacity(1 << 20, stream.try_clone().unwrap()), sender_channels_r, writer_channels_r); let mut recver = BinaryReceiver::new(stream.try_clone().unwrap(), reader_channels_r); // start senders and receivers associated with this stream thread::Builder::new().name(format!("send thread {}", index)) .spawn(move || sender.send_loop()) .unwrap(); thread::Builder::new().name(format!("recv thread {}", index)) .spawn(move || recver.recv_loop()) .unwrap(); } } let proc_comms = Process::new_vector(workers); let mut results = Vec::new(); for (index, proc_comm) in proc_comms.into_iter().enumerate() { results.push(Binary { inner: proc_comm, index: my_index * workers + index as u64, peers: workers * processes, graph: 0, // TODO : Fix this allocated: 0, writers: writers.clone(), readers: readers.clone(), senders: senders.clone(), }); } return Ok(results); } // result contains connections [0, my_index - 1]. fn start_connections(addresses: Arc<Vec<String>>, my_index: u64) -> Result<Vec<Option<TcpStream>>> { let mut results: Vec<_> = (0..my_index).map(|_| None).collect(); for index in (0..my_index) { let mut connected = false; while!connected { match TcpStream::connect(&addresses[index as usize][..]) { Ok(mut stream) => { try!(stream.write_u64::<LittleEndian>(my_index)); results[index as usize] = Some(stream); println!("worker {}:\tconnection to worker {}", my_index, index); connected = true; }, Err(error) => { println!("worker {}:\terror connecting to worker {}: {}; retrying", my_index, index, error); sleep_ms(1000); }, } } } return Ok(results); } // result contains connections [my_index + 1, addresses.len() - 1]. fn await_connections(addresses: Arc<Vec<String>>, my_index: u64) -> Result<Vec<Option<TcpStream>>> { let mut results: Vec<_> = (0..(addresses.len() - my_index as usize - 1)).map(|_| None).collect(); let listener = try!(TcpListener::bind(&addresses[my_index as usize][..])); for _ in (my_index as usize + 1.. addresses.len()) { let mut stream = try!(listener.accept()).0; let identifier = try!(stream.read_u64::<LittleEndian>()) as usize; results[identifier - my_index as usize - 1] = Some(stream); println!("worker {}:\tconnection from worker {}", my_index, identifier); } return Ok(results); }
random_line_split
networking.rs
use std::io::{Read, Write, Result, BufRead, BufReader, BufWriter}; use std::fs::File; use std::net::{TcpListener, TcpStream}; use std::mem::size_of; use std::sync::Arc; use std::sync::mpsc::{Sender, Receiver, channel}; use std::thread; use std::thread::sleep_ms; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use communication::communicator::{Binary, Process}; use drain::DrainExt; // TODO : Much of this only relates to BinaryWriter/BinaryReader based communication, not networking. // TODO : Could be moved somewhere less networking-specific. #[derive(Copy, Clone)] pub struct MessageHeader { pub graph: u64, // graph identifier pub channel: u64, // index of channel pub source: u64, // index of worker sending message pub target: u64, // index of worker receiving message pub length: u64, // number of bytes in message } impl MessageHeader { // returns a header when there is enough supporting data fn try_read(bytes: &mut &[u8]) -> Option<MessageHeader> { if bytes.len() > size_of::<MessageHeader>() { // capture original in case we need to rewind let original = *bytes; // unclear what order struct initializers run in, so... let graph = bytes.read_u64::<LittleEndian>().unwrap(); let channel = bytes.read_u64::<LittleEndian>().unwrap(); let source = bytes.read_u64::<LittleEndian>().unwrap(); let target = bytes.read_u64::<LittleEndian>().unwrap(); let length = bytes.read_u64::<LittleEndian>().unwrap(); if bytes.len() >= length as usize { Some(MessageHeader { graph: graph, channel: channel, source: source, target: target, length: length, }) } else { // rewind the reader *bytes = original; None } } else { None } } fn write_to<W: Write>(&self, writer: &mut W) -> Result<()> { try!(writer.write_u64::<LittleEndian>(self.graph)); try!(writer.write_u64::<LittleEndian>(self.channel)); try!(writer.write_u64::<LittleEndian>(self.source)); try!(writer.write_u64::<LittleEndian>(self.target)); try!(writer.write_u64::<LittleEndian>(self.length)); Ok(()) } } // // structure in charge of receiving data from a Reader, for example the network // struct BinaryReceiver<R: Read> { // reader: R, // the generic reader // buffer: Vec<u8>, // current working buffer // double: Vec<u8>, // second working buffer // staging: Vec<u8>, // 1 << 20 of buffer to read into // targets: Switchboard<(Sender<Vec<u8>>, Receiver<Vec<u8>>)>, // } // // impl<R: Read> BinaryReceiver<R> { // fn new(reader: R, channels: Receiver<((u64, u64, u64), (Sender<Vec<u8>>, Receiver<Vec<u8>>))>) -> BinaryReceiver<R> { // BinaryReceiver { // reader: reader, // buffer: Vec::new(), // double: Vec::new(), // staging: vec![0u8; 1 << 20], // targets: Switchboard::new(channels), // } // } // // fn recv_loop(&mut self) { // loop { // // // attempt to read some more bytes into our buffer // // TODO : We read in to self.staging because extending a Vec<u8> is hard without // // TODO : using set_len, which is unsafe. // // TODO : Could consider optimizing for the self.buffer.len() == 0 case, swapping // // TODO : self.staging with self.buffer, rather than using write_all. // let read = self.reader.read(&mut self.staging[..]).unwrap_or(0); // self.buffer.write_all(&self.staging[..read]).unwrap(); // <-- shouldn't fail // // { // // get a view of available bytes // let mut slice = &self.buffer[..]; // // while let Some(header) = MessageHeader::try_read(&mut slice) { // // let h_len = header.length as usize; // length in bytes // let target = self.targets.ensure(header.target, header.graph, header.channel); // let mut buffer = target.1.try_recv().unwrap_or(Vec::new()); // // buffer.clear(); // buffer.write_all(&slice[..h_len]).unwrap(); // slice = &slice[h_len..]; // // target.0.send(buffer).unwrap(); // } // // // TODO: way inefficient... =/ Fix! :D // // if slice.len() < self.buffer.len() { // self.double.clear(); // self.double.write_all(slice).unwrap(); // // } // } // // // if self.double.len() > 0 { // mem::swap(&mut self.buffer, &mut self.double); // // self.double.clear(); // // } // } // } // } // structure in charge of receiving data from a Reader, for example the network struct BinaryReceiver<R: Read> { reader: R, // the generic reader buffer: Vec<u8>, // current working buffer length: usize, targets: Switchboard<(Sender<Vec<u8>>, Receiver<Vec<u8>>)>, } impl<R: Read> BinaryReceiver<R> { fn new(reader: R, channels: Receiver<((u64, u64, u64), (Sender<Vec<u8>>, Receiver<Vec<u8>>))>) -> BinaryReceiver<R> { BinaryReceiver { reader: reader, buffer: vec![0u8; 1 << 20], length: 0, targets: Switchboard::new(channels), } } fn recv_loop(&mut self) { loop { // if we've mostly filled our buffer and still can't read a whole message from it, // we'll need more space / to read more at once. let's double the buffer! if self.length >= self.buffer.len() / 2 { self.buffer.extend(::std::iter::repeat(0u8).take(self.length)); } // attempt to read some more bytes into our buffer let read = self.reader.read(&mut self.buffer[self.length..]).unwrap_or(0); self.length += read; let remaining = { let mut slice = &self.buffer[..self.length]; while let Some(header) = MessageHeader::try_read(&mut slice) { let h_len = header.length as usize; // length in bytes let target = &mut self.targets.ensure(header.target, header.graph, header.channel).0; target.send(slice[..h_len].to_vec()).unwrap(); slice = &slice[h_len..]; } slice.len() }; // we consumed bytes, must shift to beginning. // this should optimize to copy_overlapping; // would just do that if it weren't unsafe =/ if remaining < self.length { for index in 0..remaining { self.buffer[index] = self.buffer[index + self.length - remaining]; } self.length = remaining; } } } } // structure in charge of sending data to a Writer, for example the network struct BinarySender<W: Write> { id: u64, // destination process writer: W, sources: Receiver<(MessageHeader, Vec<u8>)>, returns: Switchboard<Sender<Vec<u8>>>, } impl<W: Write> BinarySender<W> { fn new(id: u64, writer: W, sources: Receiver<(MessageHeader, Vec<u8>)>, channels: Receiver<((u64, u64, u64), Sender<Vec<u8>>)>) -> BinarySender<W> { BinarySender { id: id, writer: writer, sources: sources, returns: Switchboard::new(channels), } } fn send_loop(&mut self) { let mut stash = Vec::new(); // block until data to recv while let Ok((header, buffer)) = self.sources.recv() { stash.push((header, buffer)); // collect any additional outstanding data to send while let Ok((header, buffer)) = self.sources.try_recv() { stash.push((header, buffer)); } // println!("send loop to process {}:\tstarting", self.id); for (mut header, mut buffer) in stash.drain_temp() { header.length = buffer.len() as u64; // <-- is this really our job? O.o header.write_to(&mut self.writer).unwrap(); self.writer.write_all(&buffer[..]).unwrap(); buffer.clear(); // self.returns.ensure(header.source, header.graph, header.channel).send(buffer).unwrap(); } self.writer.flush().unwrap(); // <-- because writer is buffered } } } struct Switchboard<T:Send> { source: Receiver<((u64, u64, u64), T)>, buffer: Vec<Vec<Vec<Option<T>>>>, } impl<T:Send> Switchboard<T> { pub fn new(source: Receiver<((u64, u64, u64), T)>) -> Switchboard<T> { Switchboard { source: source, buffer: Vec::new(), } } pub fn ensure(&mut self, a: u64, b: u64, c: u64) -> &mut T { let a = a as usize; let b = b as usize; let c = c as usize; while self.buffer.len() <= a { self.buffer.push(Vec::new()); } while self.buffer[a].len() <= b { self.buffer[a].push(Vec::new()); } while self.buffer[a][b].len() <= c { self.buffer[a][b].push(None); } while let None = self.buffer[a][b][c] { let ((x, y, z), s) = self.source.recv().unwrap(); let x = x as usize; let y = y as usize; let z = z as usize; while self.buffer.len() <= x { self.buffer.push(Vec::new()); } while self.buffer[x].len() <= y { self.buffer[x].push(Vec::new()); } while self.buffer[x][y].len() <= z { self.buffer[x][y].push(None); } self.buffer[x][y][z] = Some(s); } // we've just ensured that this is not None self.buffer[a][b][c].as_mut().unwrap() } } pub fn initialize_networking_from_file(filename: &str, my_index: u64, workers: u64) -> Result<Vec<Binary>> { let reader = BufReader::new(try!(File::open(filename))); let mut addresses = Vec::new(); for line in reader.lines() { addresses.push(try!(line)); } initialize_networking(addresses, my_index, workers) } pub fn initialize_networking(addresses: Vec<String>, my_index: u64, workers: u64) -> Result<Vec<Binary>> { let processes = addresses.len() as u64; let hosts1 = Arc::new(addresses); let hosts2 = hosts1.clone(); let start_task = thread::spawn(move || start_connections(hosts1, my_index)); let await_task = thread::spawn(move || await_connections(hosts2, my_index)); let mut results = try!(start_task.join().unwrap()); results.push(None); let mut to_extend = try!(await_task.join().unwrap()); results.extend(to_extend.drain_temp()); println!("worker {}:\tinitialization complete", my_index); let mut writers = Vec::new(); // handles to the BinarySenders (to present new channels) let mut readers = Vec::new(); // handles to the BinaryReceivers (to present new channels) let mut senders = Vec::new(); // destinations for serialized data (to send serialized data) // for each process, if a stream exists (i.e. not local)... for index in (0..results.len()) { if let Some(stream) = results[index].take() { let (writer_channels_s, writer_channels_r) = channel(); let (reader_channels_s, reader_channels_r) = channel(); let (sender_channels_s, sender_channels_r) = channel(); writers.push(writer_channels_s); // readers.push(reader_channels_s); // senders.push(sender_channels_s); // let mut sender = BinarySender::new(index as u64, BufWriter::with_capacity(1 << 20, stream.try_clone().unwrap()), sender_channels_r, writer_channels_r); let mut recver = BinaryReceiver::new(stream.try_clone().unwrap(), reader_channels_r); // start senders and receivers associated with this stream thread::Builder::new().name(format!("send thread {}", index)) .spawn(move || sender.send_loop()) .unwrap(); thread::Builder::new().name(format!("recv thread {}", index)) .spawn(move || recver.recv_loop()) .unwrap(); } } let proc_comms = Process::new_vector(workers); let mut results = Vec::new(); for (index, proc_comm) in proc_comms.into_iter().enumerate() { results.push(Binary { inner: proc_comm, index: my_index * workers + index as u64, peers: workers * processes, graph: 0, // TODO : Fix this allocated: 0, writers: writers.clone(), readers: readers.clone(), senders: senders.clone(), }); } return Ok(results); } // result contains connections [0, my_index - 1]. fn start_connections(addresses: Arc<Vec<String>>, my_index: u64) -> Result<Vec<Option<TcpStream>>> { let mut results: Vec<_> = (0..my_index).map(|_| None).collect(); for index in (0..my_index) { let mut connected = false; while!connected { match TcpStream::connect(&addresses[index as usize][..]) { Ok(mut stream) => { try!(stream.write_u64::<LittleEndian>(my_index)); results[index as usize] = Some(stream); println!("worker {}:\tconnection to worker {}", my_index, index); connected = true; }, Err(error) => { println!("worker {}:\terror connecting to worker {}: {}; retrying", my_index, index, error); sleep_ms(1000); }, } } } return Ok(results); } // result contains connections [my_index + 1, addresses.len() - 1]. fn await_connections(addresses: Arc<Vec<String>>, my_index: u64) -> Result<Vec<Option<TcpStream>>>
{ let mut results: Vec<_> = (0..(addresses.len() - my_index as usize - 1)).map(|_| None).collect(); let listener = try!(TcpListener::bind(&addresses[my_index as usize][..])); for _ in (my_index as usize + 1 .. addresses.len()) { let mut stream = try!(listener.accept()).0; let identifier = try!(stream.read_u64::<LittleEndian>()) as usize; results[identifier - my_index as usize - 1] = Some(stream); println!("worker {}:\tconnection from worker {}", my_index, identifier); } return Ok(results); }
identifier_body
traits.rs
use core::marker::Sized; use embedded_hal::{ blocking::{delay::*, spi::Write}, digital::v2::*, }; /// All commands need to have this trait which gives the address of the command /// which needs to be send via SPI with activated CommandsPin (Data/Command Pin in CommandMode) pub(crate) trait Command: Copy { fn address(self) -> u8; } /// Seperates the different LUT for the Display Refresh process #[derive(Debug, Clone, PartialEq, Eq, Copy, Default)] pub enum RefreshLut { /// The "normal" full Lookuptable for the Refresh-Sequence #[default] Full, /// The quick LUT where not the full refresh sequence is followed. /// This might lead to some Quick, } pub(crate) trait InternalWiAdditions<SPI, CS, BUSY, DC, RST, DELAY> where SPI: Write<u8>, CS: OutputPin, BUSY: InputPin, DC: OutputPin, RST: OutputPin, DELAY: DelayUs<u32>, { /// This initialises the EPD and powers it up /// /// This function is already called from /// - [new()](WaveshareDisplay::new()) /// - [`wake_up`] /// /// /// This function calls [reset](WaveshareDisplay::reset), /// so you don't need to call reset your self when trying to wake your device up /// after setting it to sleep. fn init(&mut self, spi: &mut SPI, delay: &mut DELAY) -> Result<(), SPI::Error>; } /// Functions to interact with three color panels pub trait WaveshareThreeColorDisplay<SPI, CS, BUSY, DC, RST, DELAY>: WaveshareDisplay<SPI, CS, BUSY, DC, RST, DELAY> where SPI: Write<u8>, CS: OutputPin, BUSY: InputPin, DC: OutputPin, RST: OutputPin, DELAY: DelayUs<u32>, { /// Transmit data to the SRAM of the EPD /// /// Updates both the black and the secondary color layers fn update_color_frame( &mut self, spi: &mut SPI, delay: &mut DELAY, black: &[u8], chromatic: &[u8], ) -> Result<(), SPI::Error>; /// Update only the black/white data of the display. /// /// This must be finished by calling `update_chromatic_frame`. fn update_achromatic_frame( &mut self, spi: &mut SPI, delay: &mut DELAY, black: &[u8], ) -> Result<(), SPI::Error>; /// Update only the chromatic data of the display. /// /// This should be preceded by a call to `update_achromatic_frame`. /// This data takes precedence over the black/white data. fn update_chromatic_frame( &mut self, spi: &mut SPI, delay: &mut DELAY, chromatic: &[u8], ) -> Result<(), SPI::Error>; } /// All the functions to interact with the EPDs /// /// This trait includes all public functions to use the EPDs /// /// # Example /// ///```rust, no_run ///# use embedded_hal_mock::*; ///# fn main() -> Result<(), MockError> { ///use embedded_graphics::{ /// pixelcolor::BinaryColor::On as Black, prelude::*, primitives::{Line, PrimitiveStyle}, ///}; ///use epd_waveshare::{epd4in2::*, prelude::*}; ///# ///# let expectations = []; ///# let mut spi = spi::Mock::new(&expectations); ///# let expectations = []; ///# let cs_pin = pin::Mock::new(&expectations); ///# let busy_in = pin::Mock::new(&expectations); ///# let dc = pin::Mock::new(&expectations); ///# let rst = pin::Mock::new(&expectations); ///# let mut delay = delay::MockNoop::new(); /// ///// Setup EPD ///let mut epd = Epd4in2::new(&mut spi, cs_pin, busy_in, dc, rst, &mut delay, None)?; /// ///// Use display graphics from embedded-graphics ///let mut display = Display4in2::default(); /// ///// Use embedded graphics for drawing a line /// ///let _ = Line::new(Point::new(0, 120), Point::new(0, 295)) /// .into_styled(PrimitiveStyle::with_stroke(Color::Black, 1)) /// .draw(&mut display); /// /// // Display updated frame ///epd.update_frame(&mut spi, &display.buffer(), &mut delay)?; ///epd.display_frame(&mut spi, &mut delay)?; /// ///// Set the EPD to sleep ///epd.sleep(&mut spi, &mut delay)?; ///# Ok(()) ///# } ///``` pub trait WaveshareDisplay<SPI, CS, BUSY, DC, RST, DELAY> where SPI: Write<u8>, CS: OutputPin, BUSY: InputPin, DC: OutputPin, RST: OutputPin, DELAY: DelayUs<u32>, { /// The Color Type used by the Display type DisplayColor; /// Creates a new driver from a SPI peripheral, CS Pin, Busy InputPin, DC /// /// `delay_us` is the number of us the idle loop should sleep on. /// Setting it to 0 implies busy waiting. /// Setting it to None means a default value is used. /// /// This already initialises the device. fn new( spi: &mut SPI, cs: CS, busy: BUSY, dc: DC, rst: RST, delay: &mut DELAY, delay_us: Option<u32>, ) -> Result<Self, SPI::Error> where Self: Sized; /// Let the device enter deep-sleep mode to save power. /// /// The deep sleep mode returns to standby with a hardware reset. fn sleep(&mut self, spi: &mut SPI, delay: &mut DELAY) -> Result<(), SPI::Error>; /// Wakes the device up from sleep /// /// Also reintialises the device if necessary. fn wake_up(&mut self, spi: &mut SPI, delay: &mut DELAY) -> Result<(), SPI::Error>; /// Sets the backgroundcolor for various commands like [clear_frame](WaveshareDisplay::clear_frame) fn set_background_color(&mut self, color: Self::DisplayColor); /// Get current background color fn background_color(&self) -> &Self::DisplayColor; /// Get the width of the display fn width(&self) -> u32; /// Get the height of the display fn height(&self) -> u32; /// Transmit a full frame to the SRAM of the EPD fn update_frame( &mut self, spi: &mut SPI, buffer: &[u8], delay: &mut DELAY, ) -> Result<(), SPI::Error>; /// Transmits partial data to the SRAM of the EPD /// /// (x,y) is the top left corner /// /// BUFFER needs to be of size: width / 8 * height! #[allow(clippy::too_many_arguments)] fn update_partial_frame( &mut self, spi: &mut SPI, delay: &mut DELAY, buffer: &[u8], x: u32, y: u32, width: u32, height: u32, ) -> Result<(), SPI::Error>; /// Displays the frame data from SRAM /// /// This function waits until the device isn`t busy anymore fn display_frame(&mut self, spi: &mut SPI, delay: &mut DELAY) -> Result<(), SPI::Error>; /// Provide a combined update&display and save some time (skipping a busy check in between) fn update_and_display_frame( &mut self, spi: &mut SPI, buffer: &[u8], delay: &mut DELAY, ) -> Result<(), SPI::Error>; /// Clears the frame buffer on the EPD with the declared background color /// /// The background color can be changed with [`WaveshareDisplay::set_background_color`] fn clear_frame(&mut self, spi: &mut SPI, delay: &mut DELAY) -> Result<(), SPI::Error>; /// Trait for using various Waveforms from different LUTs /// E.g. for partial refreshes /// /// A full refresh is needed after a certain amount of quick refreshes! /// /// WARNING: Quick Refresh might lead to ghosting-effects/problems with your display. Especially for the 4.2in Display! /// /// If None is used the old value will be loaded on the LUTs once more fn set_lut( &mut self, spi: &mut SPI, delay: &mut DELAY, refresh_rate: Option<RefreshLut>, ) -> Result<(), SPI::Error>; /// Wait until the display has stopped processing data /// /// You can call this to make sure a frame is displayed before goin further fn wait_until_idle(&mut self, spi: &mut SPI, delay: &mut DELAY) -> Result<(), SPI::Error>; } /// Allows quick refresh support for displays that support it; lets you send both /// old and new frame data to support this. /// /// When using the quick refresh look-up table, the display must receive separate display /// buffer data marked as old, and new. This is used to determine which pixels need to change, /// and how they will change. This isn't required when using full refreshes. /// /// (todo: Example ommitted due to CI failures.) /// Example: ///```rust, no_run ///# use embedded_hal_mock::*; ///# fn main() -> Result<(), MockError> { ///# use embedded_graphics::{ ///# pixelcolor::BinaryColor::On as Black, prelude::*, primitives::{Line, PrimitiveStyle}, ///# }; ///# use epd_waveshare::{epd4in2::*, prelude::*}; ///# use epd_waveshare::graphics::VarDisplay; ///# ///# let expectations = []; ///# let mut spi = spi::Mock::new(&expectations); ///# let expectations = []; ///# let cs_pin = pin::Mock::new(&expectations); ///# let busy_in = pin::Mock::new(&expectations); ///# let dc = pin::Mock::new(&expectations); ///# let rst = pin::Mock::new(&expectations); ///# let mut delay = delay::MockNoop::new(); ///# ///# // Setup EPD ///# let mut epd = Epd4in2::new(&mut spi, cs_pin, busy_in, dc, rst, &mut delay, None)?; ///let (x, y, frame_width, frame_height) = (20, 40, 80,80); /// ///let mut buffer = [DEFAULT_BACKGROUND_COLOR.get_byte_value(); 80 / 8 * 80]; ///let mut display = VarDisplay::new(frame_width, frame_height, &mut buffer,false).unwrap(); /// ///epd.update_partial_old_frame(&mut spi, &mut delay, display.buffer(), x, y, frame_width, frame_height) /// .ok(); /// ///display.clear(Color::White).ok(); ///// Execute drawing commands here. /// ///epd.update_partial_new_frame(&mut spi, &mut delay, display.buffer(), x, y, frame_width, frame_height) /// .ok(); ///# Ok(()) ///# } ///``` pub trait QuickRefresh<SPI, CS, BUSY, DC, RST, DELAY> where SPI: Write<u8>, CS: OutputPin, BUSY: InputPin, DC: OutputPin, RST: OutputPin, DELAY: DelayUs<u32>, { /// Updates the old frame. fn update_old_frame( &mut self, spi: &mut SPI, buffer: &[u8], delay: &mut DELAY, ) -> Result<(), SPI::Error>; /// Updates the new frame. fn update_new_frame( &mut self, spi: &mut SPI, buffer: &[u8], delay: &mut DELAY, ) -> Result<(), SPI::Error>;
/// Updates and displays the new frame. fn update_and_display_new_frame( &mut self, spi: &mut SPI, buffer: &[u8], delay: &mut DELAY, ) -> Result<(), SPI::Error>; /// Updates the old frame for a portion of the display. #[allow(clippy::too_many_arguments)] fn update_partial_old_frame( &mut self, spi: &mut SPI, delay: &mut DELAY, buffer: &[u8], x: u32, y: u32, width: u32, height: u32, ) -> Result<(), SPI::Error>; /// Updates the new frame for a portion of the display. #[allow(clippy::too_many_arguments)] fn update_partial_new_frame( &mut self, spi: &mut SPI, delay: &mut DELAY, buffer: &[u8], x: u32, y: u32, width: u32, height: u32, ) -> Result<(), SPI::Error>; /// Clears the partial frame buffer on the EPD with the declared background color /// The background color can be changed with [`WaveshareDisplay::set_background_color`] fn clear_partial_frame( &mut self, spi: &mut SPI, delay: &mut DELAY, x: u32, y: u32, width: u32, height: u32, ) -> Result<(), SPI::Error>; }
/// Displays the new frame fn display_new_frame(&mut self, spi: &mut SPI, _delay: &mut DELAY) -> Result<(), SPI::Error>;
random_line_split
traits.rs
use core::marker::Sized; use embedded_hal::{ blocking::{delay::*, spi::Write}, digital::v2::*, }; /// All commands need to have this trait which gives the address of the command /// which needs to be send via SPI with activated CommandsPin (Data/Command Pin in CommandMode) pub(crate) trait Command: Copy { fn address(self) -> u8; } /// Seperates the different LUT for the Display Refresh process #[derive(Debug, Clone, PartialEq, Eq, Copy, Default)] pub enum
{ /// The "normal" full Lookuptable for the Refresh-Sequence #[default] Full, /// The quick LUT where not the full refresh sequence is followed. /// This might lead to some Quick, } pub(crate) trait InternalWiAdditions<SPI, CS, BUSY, DC, RST, DELAY> where SPI: Write<u8>, CS: OutputPin, BUSY: InputPin, DC: OutputPin, RST: OutputPin, DELAY: DelayUs<u32>, { /// This initialises the EPD and powers it up /// /// This function is already called from /// - [new()](WaveshareDisplay::new()) /// - [`wake_up`] /// /// /// This function calls [reset](WaveshareDisplay::reset), /// so you don't need to call reset your self when trying to wake your device up /// after setting it to sleep. fn init(&mut self, spi: &mut SPI, delay: &mut DELAY) -> Result<(), SPI::Error>; } /// Functions to interact with three color panels pub trait WaveshareThreeColorDisplay<SPI, CS, BUSY, DC, RST, DELAY>: WaveshareDisplay<SPI, CS, BUSY, DC, RST, DELAY> where SPI: Write<u8>, CS: OutputPin, BUSY: InputPin, DC: OutputPin, RST: OutputPin, DELAY: DelayUs<u32>, { /// Transmit data to the SRAM of the EPD /// /// Updates both the black and the secondary color layers fn update_color_frame( &mut self, spi: &mut SPI, delay: &mut DELAY, black: &[u8], chromatic: &[u8], ) -> Result<(), SPI::Error>; /// Update only the black/white data of the display. /// /// This must be finished by calling `update_chromatic_frame`. fn update_achromatic_frame( &mut self, spi: &mut SPI, delay: &mut DELAY, black: &[u8], ) -> Result<(), SPI::Error>; /// Update only the chromatic data of the display. /// /// This should be preceded by a call to `update_achromatic_frame`. /// This data takes precedence over the black/white data. fn update_chromatic_frame( &mut self, spi: &mut SPI, delay: &mut DELAY, chromatic: &[u8], ) -> Result<(), SPI::Error>; } /// All the functions to interact with the EPDs /// /// This trait includes all public functions to use the EPDs /// /// # Example /// ///```rust, no_run ///# use embedded_hal_mock::*; ///# fn main() -> Result<(), MockError> { ///use embedded_graphics::{ /// pixelcolor::BinaryColor::On as Black, prelude::*, primitives::{Line, PrimitiveStyle}, ///}; ///use epd_waveshare::{epd4in2::*, prelude::*}; ///# ///# let expectations = []; ///# let mut spi = spi::Mock::new(&expectations); ///# let expectations = []; ///# let cs_pin = pin::Mock::new(&expectations); ///# let busy_in = pin::Mock::new(&expectations); ///# let dc = pin::Mock::new(&expectations); ///# let rst = pin::Mock::new(&expectations); ///# let mut delay = delay::MockNoop::new(); /// ///// Setup EPD ///let mut epd = Epd4in2::new(&mut spi, cs_pin, busy_in, dc, rst, &mut delay, None)?; /// ///// Use display graphics from embedded-graphics ///let mut display = Display4in2::default(); /// ///// Use embedded graphics for drawing a line /// ///let _ = Line::new(Point::new(0, 120), Point::new(0, 295)) /// .into_styled(PrimitiveStyle::with_stroke(Color::Black, 1)) /// .draw(&mut display); /// /// // Display updated frame ///epd.update_frame(&mut spi, &display.buffer(), &mut delay)?; ///epd.display_frame(&mut spi, &mut delay)?; /// ///// Set the EPD to sleep ///epd.sleep(&mut spi, &mut delay)?; ///# Ok(()) ///# } ///``` pub trait WaveshareDisplay<SPI, CS, BUSY, DC, RST, DELAY> where SPI: Write<u8>, CS: OutputPin, BUSY: InputPin, DC: OutputPin, RST: OutputPin, DELAY: DelayUs<u32>, { /// The Color Type used by the Display type DisplayColor; /// Creates a new driver from a SPI peripheral, CS Pin, Busy InputPin, DC /// /// `delay_us` is the number of us the idle loop should sleep on. /// Setting it to 0 implies busy waiting. /// Setting it to None means a default value is used. /// /// This already initialises the device. fn new( spi: &mut SPI, cs: CS, busy: BUSY, dc: DC, rst: RST, delay: &mut DELAY, delay_us: Option<u32>, ) -> Result<Self, SPI::Error> where Self: Sized; /// Let the device enter deep-sleep mode to save power. /// /// The deep sleep mode returns to standby with a hardware reset. fn sleep(&mut self, spi: &mut SPI, delay: &mut DELAY) -> Result<(), SPI::Error>; /// Wakes the device up from sleep /// /// Also reintialises the device if necessary. fn wake_up(&mut self, spi: &mut SPI, delay: &mut DELAY) -> Result<(), SPI::Error>; /// Sets the backgroundcolor for various commands like [clear_frame](WaveshareDisplay::clear_frame) fn set_background_color(&mut self, color: Self::DisplayColor); /// Get current background color fn background_color(&self) -> &Self::DisplayColor; /// Get the width of the display fn width(&self) -> u32; /// Get the height of the display fn height(&self) -> u32; /// Transmit a full frame to the SRAM of the EPD fn update_frame( &mut self, spi: &mut SPI, buffer: &[u8], delay: &mut DELAY, ) -> Result<(), SPI::Error>; /// Transmits partial data to the SRAM of the EPD /// /// (x,y) is the top left corner /// /// BUFFER needs to be of size: width / 8 * height! #[allow(clippy::too_many_arguments)] fn update_partial_frame( &mut self, spi: &mut SPI, delay: &mut DELAY, buffer: &[u8], x: u32, y: u32, width: u32, height: u32, ) -> Result<(), SPI::Error>; /// Displays the frame data from SRAM /// /// This function waits until the device isn`t busy anymore fn display_frame(&mut self, spi: &mut SPI, delay: &mut DELAY) -> Result<(), SPI::Error>; /// Provide a combined update&display and save some time (skipping a busy check in between) fn update_and_display_frame( &mut self, spi: &mut SPI, buffer: &[u8], delay: &mut DELAY, ) -> Result<(), SPI::Error>; /// Clears the frame buffer on the EPD with the declared background color /// /// The background color can be changed with [`WaveshareDisplay::set_background_color`] fn clear_frame(&mut self, spi: &mut SPI, delay: &mut DELAY) -> Result<(), SPI::Error>; /// Trait for using various Waveforms from different LUTs /// E.g. for partial refreshes /// /// A full refresh is needed after a certain amount of quick refreshes! /// /// WARNING: Quick Refresh might lead to ghosting-effects/problems with your display. Especially for the 4.2in Display! /// /// If None is used the old value will be loaded on the LUTs once more fn set_lut( &mut self, spi: &mut SPI, delay: &mut DELAY, refresh_rate: Option<RefreshLut>, ) -> Result<(), SPI::Error>; /// Wait until the display has stopped processing data /// /// You can call this to make sure a frame is displayed before goin further fn wait_until_idle(&mut self, spi: &mut SPI, delay: &mut DELAY) -> Result<(), SPI::Error>; } /// Allows quick refresh support for displays that support it; lets you send both /// old and new frame data to support this. /// /// When using the quick refresh look-up table, the display must receive separate display /// buffer data marked as old, and new. This is used to determine which pixels need to change, /// and how they will change. This isn't required when using full refreshes. /// /// (todo: Example ommitted due to CI failures.) /// Example: ///```rust, no_run ///# use embedded_hal_mock::*; ///# fn main() -> Result<(), MockError> { ///# use embedded_graphics::{ ///# pixelcolor::BinaryColor::On as Black, prelude::*, primitives::{Line, PrimitiveStyle}, ///# }; ///# use epd_waveshare::{epd4in2::*, prelude::*}; ///# use epd_waveshare::graphics::VarDisplay; ///# ///# let expectations = []; ///# let mut spi = spi::Mock::new(&expectations); ///# let expectations = []; ///# let cs_pin = pin::Mock::new(&expectations); ///# let busy_in = pin::Mock::new(&expectations); ///# let dc = pin::Mock::new(&expectations); ///# let rst = pin::Mock::new(&expectations); ///# let mut delay = delay::MockNoop::new(); ///# ///# // Setup EPD ///# let mut epd = Epd4in2::new(&mut spi, cs_pin, busy_in, dc, rst, &mut delay, None)?; ///let (x, y, frame_width, frame_height) = (20, 40, 80,80); /// ///let mut buffer = [DEFAULT_BACKGROUND_COLOR.get_byte_value(); 80 / 8 * 80]; ///let mut display = VarDisplay::new(frame_width, frame_height, &mut buffer,false).unwrap(); /// ///epd.update_partial_old_frame(&mut spi, &mut delay, display.buffer(), x, y, frame_width, frame_height) /// .ok(); /// ///display.clear(Color::White).ok(); ///// Execute drawing commands here. /// ///epd.update_partial_new_frame(&mut spi, &mut delay, display.buffer(), x, y, frame_width, frame_height) /// .ok(); ///# Ok(()) ///# } ///``` pub trait QuickRefresh<SPI, CS, BUSY, DC, RST, DELAY> where SPI: Write<u8>, CS: OutputPin, BUSY: InputPin, DC: OutputPin, RST: OutputPin, DELAY: DelayUs<u32>, { /// Updates the old frame. fn update_old_frame( &mut self, spi: &mut SPI, buffer: &[u8], delay: &mut DELAY, ) -> Result<(), SPI::Error>; /// Updates the new frame. fn update_new_frame( &mut self, spi: &mut SPI, buffer: &[u8], delay: &mut DELAY, ) -> Result<(), SPI::Error>; /// Displays the new frame fn display_new_frame(&mut self, spi: &mut SPI, _delay: &mut DELAY) -> Result<(), SPI::Error>; /// Updates and displays the new frame. fn update_and_display_new_frame( &mut self, spi: &mut SPI, buffer: &[u8], delay: &mut DELAY, ) -> Result<(), SPI::Error>; /// Updates the old frame for a portion of the display. #[allow(clippy::too_many_arguments)] fn update_partial_old_frame( &mut self, spi: &mut SPI, delay: &mut DELAY, buffer: &[u8], x: u32, y: u32, width: u32, height: u32, ) -> Result<(), SPI::Error>; /// Updates the new frame for a portion of the display. #[allow(clippy::too_many_arguments)] fn update_partial_new_frame( &mut self, spi: &mut SPI, delay: &mut DELAY, buffer: &[u8], x: u32, y: u32, width: u32, height: u32, ) -> Result<(), SPI::Error>; /// Clears the partial frame buffer on the EPD with the declared background color /// The background color can be changed with [`WaveshareDisplay::set_background_color`] fn clear_partial_frame( &mut self, spi: &mut SPI, delay: &mut DELAY, x: u32, y: u32, width: u32, height: u32, ) -> Result<(), SPI::Error>; }
RefreshLut
identifier_name