file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
channel_router.rs
// Copyright 2019 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::cmp; use std::collections::HashMap; struct Ranges { ranges: Vec<std::ops::Range<usize>>, } impl Ranges { fn new() -> Self { Ranges { ranges: Vec::new() } } fn add(&mut self, start: usize, end: usize) { let (start, end) = (cmp::min(start, end), cmp::max(start, end) + 1); self.ranges.push(std::ops::Range { start, end }); } fn contains(&self, start: usize, end: usize) -> bool { let (start, end) = (cmp::min(start, end), cmp::max(start, end)); (start..=end).any(|v| self.ranges.iter().any(|r| r.contains(&v))) } fn contains_range(&self, range: &std::ops::Range<usize>) -> bool { self.contains(range.start, range.end) } fn range_sum(&self) -> usize { self.ranges.iter().map(|r| r.end - r.start).sum() } } #[derive(Copy, Clone, PartialEq, Debug)] pub enum ChannelState { Free, // Occupied means no connection. This is the same as a constant false. Occupied, // Constant true. Constant, Net(usize), } pub type ChannelLayout = [ChannelState]; impl ChannelState { pub fn
(&self) -> bool { self == &ChannelState::Free } pub fn contains_net(&self) -> bool { matches!(self, ChannelState::Net(_)) } pub fn is_constant_on(&self) -> bool { matches!(self, ChannelState::Constant) } } #[derive(Copy, Clone, PartialEq, Debug)] pub enum ChannelOp { Move, Copy, } #[derive(Debug, Clone)] pub struct WireConnection { pub from: usize, pub to: Vec<usize>, pub mode: ChannelOp, } #[derive(Debug)] pub struct ChannelSubState { pub wires: Vec<WireConnection>, pub occupancy_map: bitmap::Bitmap<Vec<usize>, bitmap::OneBit>, } #[derive(Debug)] struct Task { net: usize, from: usize, to: Vec<usize>, } impl Task { fn channel_range_required(&self) -> std::ops::Range<usize> { let from = [self.from]; let min = self.to.iter().chain(&from).min().unwrap(); let max = self.to.iter().chain(&from).max().unwrap(); std::ops::Range { start: *min, end: max + 1, } } fn channel_width_required(&self) -> usize { let r = self.channel_range_required(); r.end - r.start } fn occupied_target_pins(&self, layout: &ChannelLayout) -> Vec<usize> { let mut occupied = Vec::new(); for &idx in &self.to { if layout[idx].contains_net() && layout[idx]!= ChannelState::Net(self.net) { occupied.push(idx); } } occupied } // Returns how 'good' a new 'from' position is for this task (when evicting) // so that we can prefer nice spots. fn eviction_cost(&self, new_pos: usize) -> usize { let min = self.to.iter().min().unwrap(); let max = self.to.iter().max().unwrap(); let dist = (self.from as isize - new_pos as isize).abs() as usize; if new_pos > *max { 2 * (new_pos - *max) + dist } else if new_pos < *min { 2 * (*min - new_pos) + dist } else { dist } } } #[derive(Default)] struct RouteTasks { // source idx -> vec<target idx> tasks: HashMap<usize, Vec<usize>>, } impl RouteTasks { fn add(&mut self, from: usize, to: usize) { if let Some(k) = self.tasks.get_mut(&from) { k.push(to); } else { self.tasks.insert(from, vec![to]); } } fn into_tasks(mut self, src: &ChannelLayout) -> Vec<Task> { self.tasks .drain() .map(|(k, v)| { let net = match src[k] { ChannelState::Net(i) => i, _ => unreachable!(), }; Task { net, from: k, to: v, } }) .collect::<Vec<_>>() } } pub fn route_channel(start: &ChannelLayout, end: &ChannelLayout) -> Vec<ChannelSubState> { let mut state = start.to_owned(); // Expand the state to be at least end.len() wide. while state.len() < end.len() { state.push(ChannelState::Free); } let mut tasks = RouteTasks::default(); for end_idx in 0..end.len() { if!end[end_idx].contains_net() || end[end_idx] == state[end_idx] { continue; } let state_idx = state .iter() .position(|v| v == &end[end_idx]) .unwrap_or_else(|| panic!("Required field '{:?}' not found", end[end_idx])); tasks.add(state_idx, end_idx); } let mut tasks = tasks.into_tasks(&state); // Order by how much of the channel this task occupies. tasks.sort_by_key(|k| k.channel_width_required()); let mut steps: Vec<ChannelSubState> = Vec::new(); loop { // Ranges of the channel that is currently occupied. let mut ranges = Ranges::new(); // Instruction on how to connect pins in the current part of the channel. let mut wires = Vec::new(); // To detect if we were unable to do anything due to blocked pins. let old_task_len = tasks.len(); tasks = tasks .drain(0..tasks.len()) .filter(|task| { // Speed things up by only 'enforcing' 50% channel utilization. if ranges.range_sum() > (cmp::max(state.len(), end.len()) / 2) { return true; } // Do we have the required part of the channel available? if ranges.contains_range(&task.channel_range_required()) { return true; } let blocking_pins = task.occupied_target_pins(&state); if blocking_pins.is_empty() { // Targets are free, directly move (or copy) it there. let keep = if task.from >= end.len() || state[task.from]!= end[task.from] { state[task.from] = ChannelState::Free; false } else { true }; wires.push(WireConnection { from: task.from, to: task.to.clone(), mode: if keep { ChannelOp::Copy } else { ChannelOp::Move }, }); let r = task.channel_range_required(); // -1 here since.add() + channel_range_required() will do +1. ranges.add(r.start, r.end - 1); for &to in &task.to { state[to] = ChannelState::Net(task.net); } // We successfully handled this one. return false; } true }) .collect::<Vec<_>>(); // We were unable to handle any tasks -> we need to evict some channels. if old_task_len == tasks.len() { // Find available positions where we can evict to. let mut free_positions = state .iter() .enumerate() .filter(|(_, v)|!v.contains_net()) .map(|(k, _)| k) .filter(|&k| k >= end.len() ||!end[k].contains_net()) .collect::<Vec<_>>(); if free_positions.is_empty() { println!("[!] No free positions found, expanding channel"); // Make sure that we have some room, scaling with the number of // remaining tasks as a random tradeoff. for _ in 0..(tasks.len() / 10 + 1) { state.push(ChannelState::Free); free_positions.push(state.len() - 1); } } for task_idx in 0..tasks.len() { let blocking_pins = tasks[task_idx].occupied_target_pins(&state); for to_evict in blocking_pins { // Find corresponding task. let task_idx_to_evict = tasks .iter() .position(|t| t.from == to_evict) .unwrap_or_else(|| panic!("Could not find task blocking {}", to_evict)); // Find a good place for this task to evict to. free_positions.sort_by(|&a, &b| { // Comparing in the opposite order on purpose here so // that we can use pop() later. tasks[task_idx_to_evict] .eviction_cost(b) .cmp(&tasks[task_idx_to_evict].eviction_cost(a)) }); let from = tasks[task_idx_to_evict].from; let new_pos = *free_positions.last().unwrap(); // Check whether the space is actually available. let req_range = std::ops::Range { start: cmp::min(from, new_pos), end: cmp::max(from, new_pos) + 1, }; if!ranges.contains_range(&req_range) { free_positions.pop(); ranges.add(from, new_pos); wires.push(WireConnection { from, to: vec![new_pos], mode: ChannelOp::Move, }); tasks[task_idx_to_evict].from = new_pos; state[new_pos] = ChannelState::Net(tasks[task_idx_to_evict].net); state[to_evict] = ChannelState::Free; } } } } let mut bitmap = bitmap::Bitmap::from_storage(state.len(), (), vec![0; (state.len() + 63) / 64]) .unwrap(); for idx in state .iter() .enumerate() .filter(|(_, v)| v.contains_net()) .map(|(k, _)| k) { bitmap.set(idx, 1); } steps.push(ChannelSubState { wires, occupancy_map: bitmap, }); if tasks.is_empty() { return steps; } } }
is_free
identifier_name
channel_router.rs
// Copyright 2019 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::cmp; use std::collections::HashMap; struct Ranges { ranges: Vec<std::ops::Range<usize>>, } impl Ranges { fn new() -> Self { Ranges { ranges: Vec::new() } } fn add(&mut self, start: usize, end: usize) { let (start, end) = (cmp::min(start, end), cmp::max(start, end) + 1); self.ranges.push(std::ops::Range { start, end }); } fn contains(&self, start: usize, end: usize) -> bool { let (start, end) = (cmp::min(start, end), cmp::max(start, end)); (start..=end).any(|v| self.ranges.iter().any(|r| r.contains(&v))) } fn contains_range(&self, range: &std::ops::Range<usize>) -> bool { self.contains(range.start, range.end) } fn range_sum(&self) -> usize { self.ranges.iter().map(|r| r.end - r.start).sum() } } #[derive(Copy, Clone, PartialEq, Debug)] pub enum ChannelState { Free, // Occupied means no connection. This is the same as a constant false. Occupied, // Constant true. Constant, Net(usize), } pub type ChannelLayout = [ChannelState]; impl ChannelState { pub fn is_free(&self) -> bool { self == &ChannelState::Free } pub fn contains_net(&self) -> bool { matches!(self, ChannelState::Net(_)) } pub fn is_constant_on(&self) -> bool { matches!(self, ChannelState::Constant) } } #[derive(Copy, Clone, PartialEq, Debug)] pub enum ChannelOp { Move, Copy, } #[derive(Debug, Clone)] pub struct WireConnection { pub from: usize, pub to: Vec<usize>, pub mode: ChannelOp, } #[derive(Debug)] pub struct ChannelSubState { pub wires: Vec<WireConnection>, pub occupancy_map: bitmap::Bitmap<Vec<usize>, bitmap::OneBit>, } #[derive(Debug)] struct Task { net: usize, from: usize, to: Vec<usize>, } impl Task { fn channel_range_required(&self) -> std::ops::Range<usize> { let from = [self.from]; let min = self.to.iter().chain(&from).min().unwrap(); let max = self.to.iter().chain(&from).max().unwrap(); std::ops::Range { start: *min, end: max + 1, } } fn channel_width_required(&self) -> usize { let r = self.channel_range_required(); r.end - r.start } fn occupied_target_pins(&self, layout: &ChannelLayout) -> Vec<usize> { let mut occupied = Vec::new(); for &idx in &self.to { if layout[idx].contains_net() && layout[idx]!= ChannelState::Net(self.net) { occupied.push(idx); } } occupied } // Returns how 'good' a new 'from' position is for this task (when evicting) // so that we can prefer nice spots. fn eviction_cost(&self, new_pos: usize) -> usize { let min = self.to.iter().min().unwrap(); let max = self.to.iter().max().unwrap(); let dist = (self.from as isize - new_pos as isize).abs() as usize; if new_pos > *max { 2 * (new_pos - *max) + dist } else if new_pos < *min { 2 * (*min - new_pos) + dist } else { dist } } } #[derive(Default)] struct RouteTasks { // source idx -> vec<target idx> tasks: HashMap<usize, Vec<usize>>, } impl RouteTasks { fn add(&mut self, from: usize, to: usize) {
} else { self.tasks.insert(from, vec![to]); } } fn into_tasks(mut self, src: &ChannelLayout) -> Vec<Task> { self.tasks .drain() .map(|(k, v)| { let net = match src[k] { ChannelState::Net(i) => i, _ => unreachable!(), }; Task { net, from: k, to: v, } }) .collect::<Vec<_>>() } } pub fn route_channel(start: &ChannelLayout, end: &ChannelLayout) -> Vec<ChannelSubState> { let mut state = start.to_owned(); // Expand the state to be at least end.len() wide. while state.len() < end.len() { state.push(ChannelState::Free); } let mut tasks = RouteTasks::default(); for end_idx in 0..end.len() { if!end[end_idx].contains_net() || end[end_idx] == state[end_idx] { continue; } let state_idx = state .iter() .position(|v| v == &end[end_idx]) .unwrap_or_else(|| panic!("Required field '{:?}' not found", end[end_idx])); tasks.add(state_idx, end_idx); } let mut tasks = tasks.into_tasks(&state); // Order by how much of the channel this task occupies. tasks.sort_by_key(|k| k.channel_width_required()); let mut steps: Vec<ChannelSubState> = Vec::new(); loop { // Ranges of the channel that is currently occupied. let mut ranges = Ranges::new(); // Instruction on how to connect pins in the current part of the channel. let mut wires = Vec::new(); // To detect if we were unable to do anything due to blocked pins. let old_task_len = tasks.len(); tasks = tasks .drain(0..tasks.len()) .filter(|task| { // Speed things up by only 'enforcing' 50% channel utilization. if ranges.range_sum() > (cmp::max(state.len(), end.len()) / 2) { return true; } // Do we have the required part of the channel available? if ranges.contains_range(&task.channel_range_required()) { return true; } let blocking_pins = task.occupied_target_pins(&state); if blocking_pins.is_empty() { // Targets are free, directly move (or copy) it there. let keep = if task.from >= end.len() || state[task.from]!= end[task.from] { state[task.from] = ChannelState::Free; false } else { true }; wires.push(WireConnection { from: task.from, to: task.to.clone(), mode: if keep { ChannelOp::Copy } else { ChannelOp::Move }, }); let r = task.channel_range_required(); // -1 here since.add() + channel_range_required() will do +1. ranges.add(r.start, r.end - 1); for &to in &task.to { state[to] = ChannelState::Net(task.net); } // We successfully handled this one. return false; } true }) .collect::<Vec<_>>(); // We were unable to handle any tasks -> we need to evict some channels. if old_task_len == tasks.len() { // Find available positions where we can evict to. let mut free_positions = state .iter() .enumerate() .filter(|(_, v)|!v.contains_net()) .map(|(k, _)| k) .filter(|&k| k >= end.len() ||!end[k].contains_net()) .collect::<Vec<_>>(); if free_positions.is_empty() { println!("[!] No free positions found, expanding channel"); // Make sure that we have some room, scaling with the number of // remaining tasks as a random tradeoff. for _ in 0..(tasks.len() / 10 + 1) { state.push(ChannelState::Free); free_positions.push(state.len() - 1); } } for task_idx in 0..tasks.len() { let blocking_pins = tasks[task_idx].occupied_target_pins(&state); for to_evict in blocking_pins { // Find corresponding task. let task_idx_to_evict = tasks .iter() .position(|t| t.from == to_evict) .unwrap_or_else(|| panic!("Could not find task blocking {}", to_evict)); // Find a good place for this task to evict to. free_positions.sort_by(|&a, &b| { // Comparing in the opposite order on purpose here so // that we can use pop() later. tasks[task_idx_to_evict] .eviction_cost(b) .cmp(&tasks[task_idx_to_evict].eviction_cost(a)) }); let from = tasks[task_idx_to_evict].from; let new_pos = *free_positions.last().unwrap(); // Check whether the space is actually available. let req_range = std::ops::Range { start: cmp::min(from, new_pos), end: cmp::max(from, new_pos) + 1, }; if!ranges.contains_range(&req_range) { free_positions.pop(); ranges.add(from, new_pos); wires.push(WireConnection { from, to: vec![new_pos], mode: ChannelOp::Move, }); tasks[task_idx_to_evict].from = new_pos; state[new_pos] = ChannelState::Net(tasks[task_idx_to_evict].net); state[to_evict] = ChannelState::Free; } } } } let mut bitmap = bitmap::Bitmap::from_storage(state.len(), (), vec![0; (state.len() + 63) / 64]) .unwrap(); for idx in state .iter() .enumerate() .filter(|(_, v)| v.contains_net()) .map(|(k, _)| k) { bitmap.set(idx, 1); } steps.push(ChannelSubState { wires, occupancy_map: bitmap, }); if tasks.is_empty() { return steps; } } }
if let Some(k) = self.tasks.get_mut(&from) { k.push(to);
random_line_split
channel_router.rs
// Copyright 2019 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::cmp; use std::collections::HashMap; struct Ranges { ranges: Vec<std::ops::Range<usize>>, } impl Ranges { fn new() -> Self { Ranges { ranges: Vec::new() } } fn add(&mut self, start: usize, end: usize) { let (start, end) = (cmp::min(start, end), cmp::max(start, end) + 1); self.ranges.push(std::ops::Range { start, end }); } fn contains(&self, start: usize, end: usize) -> bool { let (start, end) = (cmp::min(start, end), cmp::max(start, end)); (start..=end).any(|v| self.ranges.iter().any(|r| r.contains(&v))) } fn contains_range(&self, range: &std::ops::Range<usize>) -> bool { self.contains(range.start, range.end) } fn range_sum(&self) -> usize
} #[derive(Copy, Clone, PartialEq, Debug)] pub enum ChannelState { Free, // Occupied means no connection. This is the same as a constant false. Occupied, // Constant true. Constant, Net(usize), } pub type ChannelLayout = [ChannelState]; impl ChannelState { pub fn is_free(&self) -> bool { self == &ChannelState::Free } pub fn contains_net(&self) -> bool { matches!(self, ChannelState::Net(_)) } pub fn is_constant_on(&self) -> bool { matches!(self, ChannelState::Constant) } } #[derive(Copy, Clone, PartialEq, Debug)] pub enum ChannelOp { Move, Copy, } #[derive(Debug, Clone)] pub struct WireConnection { pub from: usize, pub to: Vec<usize>, pub mode: ChannelOp, } #[derive(Debug)] pub struct ChannelSubState { pub wires: Vec<WireConnection>, pub occupancy_map: bitmap::Bitmap<Vec<usize>, bitmap::OneBit>, } #[derive(Debug)] struct Task { net: usize, from: usize, to: Vec<usize>, } impl Task { fn channel_range_required(&self) -> std::ops::Range<usize> { let from = [self.from]; let min = self.to.iter().chain(&from).min().unwrap(); let max = self.to.iter().chain(&from).max().unwrap(); std::ops::Range { start: *min, end: max + 1, } } fn channel_width_required(&self) -> usize { let r = self.channel_range_required(); r.end - r.start } fn occupied_target_pins(&self, layout: &ChannelLayout) -> Vec<usize> { let mut occupied = Vec::new(); for &idx in &self.to { if layout[idx].contains_net() && layout[idx]!= ChannelState::Net(self.net) { occupied.push(idx); } } occupied } // Returns how 'good' a new 'from' position is for this task (when evicting) // so that we can prefer nice spots. fn eviction_cost(&self, new_pos: usize) -> usize { let min = self.to.iter().min().unwrap(); let max = self.to.iter().max().unwrap(); let dist = (self.from as isize - new_pos as isize).abs() as usize; if new_pos > *max { 2 * (new_pos - *max) + dist } else if new_pos < *min { 2 * (*min - new_pos) + dist } else { dist } } } #[derive(Default)] struct RouteTasks { // source idx -> vec<target idx> tasks: HashMap<usize, Vec<usize>>, } impl RouteTasks { fn add(&mut self, from: usize, to: usize) { if let Some(k) = self.tasks.get_mut(&from) { k.push(to); } else { self.tasks.insert(from, vec![to]); } } fn into_tasks(mut self, src: &ChannelLayout) -> Vec<Task> { self.tasks .drain() .map(|(k, v)| { let net = match src[k] { ChannelState::Net(i) => i, _ => unreachable!(), }; Task { net, from: k, to: v, } }) .collect::<Vec<_>>() } } pub fn route_channel(start: &ChannelLayout, end: &ChannelLayout) -> Vec<ChannelSubState> { let mut state = start.to_owned(); // Expand the state to be at least end.len() wide. while state.len() < end.len() { state.push(ChannelState::Free); } let mut tasks = RouteTasks::default(); for end_idx in 0..end.len() { if!end[end_idx].contains_net() || end[end_idx] == state[end_idx] { continue; } let state_idx = state .iter() .position(|v| v == &end[end_idx]) .unwrap_or_else(|| panic!("Required field '{:?}' not found", end[end_idx])); tasks.add(state_idx, end_idx); } let mut tasks = tasks.into_tasks(&state); // Order by how much of the channel this task occupies. tasks.sort_by_key(|k| k.channel_width_required()); let mut steps: Vec<ChannelSubState> = Vec::new(); loop { // Ranges of the channel that is currently occupied. let mut ranges = Ranges::new(); // Instruction on how to connect pins in the current part of the channel. let mut wires = Vec::new(); // To detect if we were unable to do anything due to blocked pins. let old_task_len = tasks.len(); tasks = tasks .drain(0..tasks.len()) .filter(|task| { // Speed things up by only 'enforcing' 50% channel utilization. if ranges.range_sum() > (cmp::max(state.len(), end.len()) / 2) { return true; } // Do we have the required part of the channel available? if ranges.contains_range(&task.channel_range_required()) { return true; } let blocking_pins = task.occupied_target_pins(&state); if blocking_pins.is_empty() { // Targets are free, directly move (or copy) it there. let keep = if task.from >= end.len() || state[task.from]!= end[task.from] { state[task.from] = ChannelState::Free; false } else { true }; wires.push(WireConnection { from: task.from, to: task.to.clone(), mode: if keep { ChannelOp::Copy } else { ChannelOp::Move }, }); let r = task.channel_range_required(); // -1 here since.add() + channel_range_required() will do +1. ranges.add(r.start, r.end - 1); for &to in &task.to { state[to] = ChannelState::Net(task.net); } // We successfully handled this one. return false; } true }) .collect::<Vec<_>>(); // We were unable to handle any tasks -> we need to evict some channels. if old_task_len == tasks.len() { // Find available positions where we can evict to. let mut free_positions = state .iter() .enumerate() .filter(|(_, v)|!v.contains_net()) .map(|(k, _)| k) .filter(|&k| k >= end.len() ||!end[k].contains_net()) .collect::<Vec<_>>(); if free_positions.is_empty() { println!("[!] No free positions found, expanding channel"); // Make sure that we have some room, scaling with the number of // remaining tasks as a random tradeoff. for _ in 0..(tasks.len() / 10 + 1) { state.push(ChannelState::Free); free_positions.push(state.len() - 1); } } for task_idx in 0..tasks.len() { let blocking_pins = tasks[task_idx].occupied_target_pins(&state); for to_evict in blocking_pins { // Find corresponding task. let task_idx_to_evict = tasks .iter() .position(|t| t.from == to_evict) .unwrap_or_else(|| panic!("Could not find task blocking {}", to_evict)); // Find a good place for this task to evict to. free_positions.sort_by(|&a, &b| { // Comparing in the opposite order on purpose here so // that we can use pop() later. tasks[task_idx_to_evict] .eviction_cost(b) .cmp(&tasks[task_idx_to_evict].eviction_cost(a)) }); let from = tasks[task_idx_to_evict].from; let new_pos = *free_positions.last().unwrap(); // Check whether the space is actually available. let req_range = std::ops::Range { start: cmp::min(from, new_pos), end: cmp::max(from, new_pos) + 1, }; if!ranges.contains_range(&req_range) { free_positions.pop(); ranges.add(from, new_pos); wires.push(WireConnection { from, to: vec![new_pos], mode: ChannelOp::Move, }); tasks[task_idx_to_evict].from = new_pos; state[new_pos] = ChannelState::Net(tasks[task_idx_to_evict].net); state[to_evict] = ChannelState::Free; } } } } let mut bitmap = bitmap::Bitmap::from_storage(state.len(), (), vec![0; (state.len() + 63) / 64]) .unwrap(); for idx in state .iter() .enumerate() .filter(|(_, v)| v.contains_net()) .map(|(k, _)| k) { bitmap.set(idx, 1); } steps.push(ChannelSubState { wires, occupancy_map: bitmap, }); if tasks.is_empty() { return steps; } } }
{ self.ranges.iter().map(|r| r.end - r.start).sum() }
identifier_body
chain.rs
use std::collections::HashSet; use std::io::{self, Write}; use crate::disk::bam::BamRef; use crate::disk::block::{BlockDeviceRef, Location, BLOCK_SIZE}; use crate::disk::directory::DirectoryEntry; use crate::disk::error::DiskError; /// A "zero" chain link is a link that indicates that this is a tail block, and /// it has zero data bytes used. (Which means it has a total of two bytes /// used, counting the link itself.) pub static CHAIN_LINK_ZERO: ChainLink = ChainLink::Tail(2); #[derive(Debug)] pub enum ChainLink { Next(Location), Tail(usize), // used bytes } impl ChainLink { #[inline] pub fn new(block: &[u8]) -> io::Result<ChainLink> { if block[0] == 0x00 { // This is the last sector of the chain, so the next byte indicates how much of // this sector is actually used. if block[1] < 1 { // It's not valid for a chain sector to not include the first two bytes // as allocated. return Err(DiskError::InvalidChainLink.into()); } Ok(ChainLink::Tail(block[1] as usize + 1)) // 2..=256 } else { Ok(ChainLink::Next(Location::new(block[0], block[1]))) } } #[inline] pub fn to_bytes(&self, bytes: &mut [u8]) { assert!(bytes.len() >= 2); match &self { ChainLink::Next(location) => location.write_bytes(bytes), ChainLink::Tail(size) => { assert!(*size >= 2 && *size <= 256); bytes[0] = 0x00; bytes[1] = (*size - 1) as u8; } } } } /// A ChainSector is the result of a chain iteration, and provides the block contents and the /// location from which it was read. pub struct ChainSector { /// The 256-byte block contents, which includes the two-byte NTS (next track and sector) link. pub data: Vec<u8>, pub location: Location, }
visited_sectors: HashSet<Location>, block: [u8; BLOCK_SIZE], } impl ChainIterator { /// Create a new chain iterator starting at the specified location. pub fn new(blocks: BlockDeviceRef, starting_sector: Location) -> ChainIterator { ChainIterator { blocks, next_sector: Some(starting_sector), visited_sectors: HashSet::new(), block: [0u8; BLOCK_SIZE], } } /// Read the entire chain and return a list of locations. pub fn locations(self) -> io::Result<Vec<Location>> { self.map(|r| r.map(|cs| cs.location)).collect() } } impl Iterator for ChainIterator { type Item = io::Result<ChainSector>; fn next(&mut self) -> Option<io::Result<ChainSector>> { let location = match self.next_sector.take() { Some(next) => next, None => return None, }; // Loop detection. if!self.visited_sectors.insert(location) { return Some(Err(DiskError::ChainLoop.into())); } // Read the next sector. { let blocks = self.blocks.borrow(); let block = match blocks.sector(location) { Ok(b) => b, Err(e) => return Some(Err(e)), }; self.block.copy_from_slice(block); } // Trim the block if needed. let size = match ChainLink::new(&self.block[..]) { Ok(ChainLink::Next(location)) => { self.next_sector = Some(location); BLOCK_SIZE // The entire sector is used. } Ok(ChainLink::Tail(size)) => size, Err(e) => return Some(Err(e)), }; let block = &self.block[..size]; Some(Ok(ChainSector { data: block.to_vec(), location, })) } } /// ChainReader objects implement the Read trait are used to read a byte stream /// represented as a series of chained sectors on the disk image. Simple files /// (e.g. CBM PRG and SEQ files) store data in a single chain where the /// beginning track and sector is provided in the directory entry. More exotic /// file types (GEOS, REL, etc.) use more complex structures, possibly with /// multiple ChainReader objects (e.g. a GEOS VLIR file may provide a /// ChainReader for each record). pub struct ChainReader { chain: ChainIterator, block: Option<Vec<u8>>, eof: bool, } impl ChainReader { pub fn new(blocks: BlockDeviceRef, start: Location) -> ChainReader { let chain = ChainIterator::new(blocks, start); ChainReader { chain, block: None, eof: false, } } } impl io::Read for ChainReader { fn read(&mut self, mut buf: &mut [u8]) -> io::Result<usize> { let mut total_nbytes = 0; while!buf.is_empty() &&!self.eof { match self.block.take() { Some(mut block) => { // Copy as much of this block as possible into the caller-provided buffer. let nbytes = block.len().min(buf.len()); let _ = &buf[0..nbytes].copy_from_slice(&block[0..nbytes]); total_nbytes += nbytes; // Reduce the block slice to the unread portion (which may be zero bytes). if block.len() == nbytes { } else { // Reduce let mut tail = block.split_off(nbytes); ::std::mem::swap(&mut block, &mut tail); // Return the unread portion self.block = Some(block); } // Reduce the provided buffer slice to the unwritten portion. let buf_ref = &mut buf; let value: &mut [u8] = std::mem::take(buf_ref); *buf_ref = &mut value[nbytes..]; } None => { // Read the next block. match self.chain.next() { Some(Ok(mut block)) => { // discard the next-track/sector bytes self.block = Some(block.data.split_off(2)); // Loop back to the Some(_) case to process the block. } Some(Err(e)) => { self.eof = true; return Err(e); } None => self.eof = true, } } } } Ok(total_nbytes) } } /// A writer for writing data to a chain. The chain is extended as needed according to the /// allocation algorithm for the disk format. pub struct ChainWriter { blocks: BlockDeviceRef, bam: BamRef, entry: DirectoryEntry, location: Location, block: Vec<u8>, dirty: bool, } impl ChainWriter { pub fn new( blocks: BlockDeviceRef, bam: BamRef, entry: DirectoryEntry, start: Location, ) -> io::Result<ChainWriter> { // Advance to the last block in the chain. let tail_block; let mut tail_location; { let blocks = blocks.borrow(); let mut block = blocks.sector(start)?; tail_location = start; while let ChainLink::Next(location) = ChainLink::new(block)? { block = blocks.sector(location)?; tail_location = location; } tail_block = block.to_vec(); } Ok(ChainWriter { blocks, bam, entry, location: tail_location, block: tail_block, dirty: true, }) } fn increment_entry_blocks(&mut self) -> io::Result<()> { let mut blocks = self.blocks.borrow_mut(); blocks.positioned_read(&mut self.entry)?; self.entry.file_size += 1; blocks.positioned_write(&self.entry)?; Ok(()) } fn allocate_next_block(&mut self) -> io::Result<usize> { // NOTE: The ordering of these steps is important for consistency. We don't // want a block to be allocated in BAM, then not used because an error // was thrown later. // Write the current block without the updated link. self.write_current_block()?; // Find a new block. let next_location = self.bam.borrow_mut().next_free_block(None)?; // Initialize a fresh block in memory with a link indicating a tail block with // zero bytes used. (Really, two bytes used for the link, but zero data // bytes used.) for i in 2..BLOCK_SIZE { self.block[i] = 0; } ChainLink::Tail(2).to_bytes(&mut self.block[..]); // Write the fresh block to the new location self.blocks .borrow_mut() .sector_mut(next_location)? .copy_from_slice(&self.block); // Allocate the next block. self.bam.borrow_mut().allocate(next_location)?; // Increment the directory entry's file size (measured in blocks) self.increment_entry_blocks()?; // If allocation succeeds, only then do we link the current block to the next // block. let mut blocks = self.blocks.borrow_mut(); let block = match blocks.sector_mut(self.location) { Ok(block) => block, Err(e) => { // Roll back the allocation. self.bam.borrow_mut().free(next_location)?; return Err(e); } }; next_location.write_bytes(block); // Update state self.location = next_location; // Return the available bytes in the newly loaded block, which is always two // less than the block size. Ok(BLOCK_SIZE - 2) } fn write_current_block(&mut self) -> io::Result<()> { // Write the current block let mut blocks = self.blocks.borrow_mut(); blocks .sector_mut(self.location)? .copy_from_slice(&self.block); Ok(()) } } impl Drop for ChainWriter { fn drop(&mut self) { let _result = self.flush(); } } // NOTE: allocating and updating entry block size should be atomic. impl io::Write for ChainWriter { fn write(&mut self, mut buf: &[u8]) -> io::Result<usize> { self.dirty = true; let mut total_nbytes = 0; while!buf.is_empty() { let (offset, remaining) = match ChainLink::new(&self.block)? { ChainLink::Next(_) => unreachable!(), // The stored buffer is always a tail block. ChainLink::Tail(nbytes) if nbytes == BLOCK_SIZE => { // Allocate a new block let remaining = self.allocate_next_block()?; (BLOCK_SIZE - remaining, remaining) } ChainLink::Tail(nbytes) => (nbytes, BLOCK_SIZE - nbytes), }; // Copy as much of the caller-provided buffer as possible into the block. let nbytes = remaining.min(buf.len()); let _ = &self.block[offset..offset + nbytes].copy_from_slice(&buf[0..nbytes]); total_nbytes += nbytes; // Update the block link's indication of used bytes. ChainLink::Tail(offset + nbytes).to_bytes(&mut self.block); // Reduce the provided buffer slice to the unwritten portion. buf = &buf[nbytes..]; } Ok(total_nbytes) } fn flush(&mut self) -> io::Result<()> { if self.dirty { // Write the current block self.write_current_block()?; // Flush the BAM self.bam.borrow_mut().flush()?; // Flush the underlying medium. let mut blocks = self.blocks.borrow_mut(); blocks.flush()?; self.dirty = false; } Ok(()) } } pub fn remove_chain(blocks: BlockDeviceRef, bam: BamRef, start: Location) -> io::Result<()> { // Read the whole chain first to be sure we can visit every block with no // errors. let locations = ChainIterator::new(blocks, start).locations()?; // Deallocate let mut bam = bam.borrow_mut(); for location in locations { bam.free(location)?; } bam.flush()?; Ok(()) }
/// Returns a ChainSector which includes the NTS (next track and sector) link. pub struct ChainIterator { blocks: BlockDeviceRef, next_sector: Option<Location>,
random_line_split
chain.rs
use std::collections::HashSet; use std::io::{self, Write}; use crate::disk::bam::BamRef; use crate::disk::block::{BlockDeviceRef, Location, BLOCK_SIZE}; use crate::disk::directory::DirectoryEntry; use crate::disk::error::DiskError; /// A "zero" chain link is a link that indicates that this is a tail block, and /// it has zero data bytes used. (Which means it has a total of two bytes /// used, counting the link itself.) pub static CHAIN_LINK_ZERO: ChainLink = ChainLink::Tail(2); #[derive(Debug)] pub enum ChainLink { Next(Location), Tail(usize), // used bytes } impl ChainLink { #[inline] pub fn new(block: &[u8]) -> io::Result<ChainLink> { if block[0] == 0x00 { // This is the last sector of the chain, so the next byte indicates how much of // this sector is actually used. if block[1] < 1 { // It's not valid for a chain sector to not include the first two bytes // as allocated. return Err(DiskError::InvalidChainLink.into()); } Ok(ChainLink::Tail(block[1] as usize + 1)) // 2..=256 } else { Ok(ChainLink::Next(Location::new(block[0], block[1]))) } } #[inline] pub fn to_bytes(&self, bytes: &mut [u8]) { assert!(bytes.len() >= 2); match &self { ChainLink::Next(location) => location.write_bytes(bytes), ChainLink::Tail(size) => { assert!(*size >= 2 && *size <= 256); bytes[0] = 0x00; bytes[1] = (*size - 1) as u8; } } } } /// A ChainSector is the result of a chain iteration, and provides the block contents and the /// location from which it was read. pub struct ChainSector { /// The 256-byte block contents, which includes the two-byte NTS (next track and sector) link. pub data: Vec<u8>, pub location: Location, } /// Returns a ChainSector which includes the NTS (next track and sector) link. pub struct ChainIterator { blocks: BlockDeviceRef, next_sector: Option<Location>, visited_sectors: HashSet<Location>, block: [u8; BLOCK_SIZE], } impl ChainIterator { /// Create a new chain iterator starting at the specified location. pub fn new(blocks: BlockDeviceRef, starting_sector: Location) -> ChainIterator { ChainIterator { blocks, next_sector: Some(starting_sector), visited_sectors: HashSet::new(), block: [0u8; BLOCK_SIZE], } } /// Read the entire chain and return a list of locations. pub fn locations(self) -> io::Result<Vec<Location>> { self.map(|r| r.map(|cs| cs.location)).collect() } } impl Iterator for ChainIterator { type Item = io::Result<ChainSector>; fn next(&mut self) -> Option<io::Result<ChainSector>> { let location = match self.next_sector.take() { Some(next) => next, None => return None, }; // Loop detection. if!self.visited_sectors.insert(location) { return Some(Err(DiskError::ChainLoop.into())); } // Read the next sector. { let blocks = self.blocks.borrow(); let block = match blocks.sector(location) { Ok(b) => b, Err(e) => return Some(Err(e)), }; self.block.copy_from_slice(block); } // Trim the block if needed. let size = match ChainLink::new(&self.block[..]) { Ok(ChainLink::Next(location)) => { self.next_sector = Some(location); BLOCK_SIZE // The entire sector is used. } Ok(ChainLink::Tail(size)) => size, Err(e) => return Some(Err(e)), }; let block = &self.block[..size]; Some(Ok(ChainSector { data: block.to_vec(), location, })) } } /// ChainReader objects implement the Read trait are used to read a byte stream /// represented as a series of chained sectors on the disk image. Simple files /// (e.g. CBM PRG and SEQ files) store data in a single chain where the /// beginning track and sector is provided in the directory entry. More exotic /// file types (GEOS, REL, etc.) use more complex structures, possibly with /// multiple ChainReader objects (e.g. a GEOS VLIR file may provide a /// ChainReader for each record). pub struct ChainReader { chain: ChainIterator, block: Option<Vec<u8>>, eof: bool, } impl ChainReader { pub fn new(blocks: BlockDeviceRef, start: Location) -> ChainReader { let chain = ChainIterator::new(blocks, start); ChainReader { chain, block: None, eof: false, } } } impl io::Read for ChainReader { fn read(&mut self, mut buf: &mut [u8]) -> io::Result<usize> { let mut total_nbytes = 0; while!buf.is_empty() &&!self.eof { match self.block.take() { Some(mut block) => { // Copy as much of this block as possible into the caller-provided buffer. let nbytes = block.len().min(buf.len()); let _ = &buf[0..nbytes].copy_from_slice(&block[0..nbytes]); total_nbytes += nbytes; // Reduce the block slice to the unread portion (which may be zero bytes). if block.len() == nbytes { } else { // Reduce let mut tail = block.split_off(nbytes); ::std::mem::swap(&mut block, &mut tail); // Return the unread portion self.block = Some(block); } // Reduce the provided buffer slice to the unwritten portion. let buf_ref = &mut buf; let value: &mut [u8] = std::mem::take(buf_ref); *buf_ref = &mut value[nbytes..]; } None => { // Read the next block. match self.chain.next() { Some(Ok(mut block)) => { // discard the next-track/sector bytes self.block = Some(block.data.split_off(2)); // Loop back to the Some(_) case to process the block. } Some(Err(e)) => { self.eof = true; return Err(e); } None => self.eof = true, } } } } Ok(total_nbytes) } } /// A writer for writing data to a chain. The chain is extended as needed according to the /// allocation algorithm for the disk format. pub struct ChainWriter { blocks: BlockDeviceRef, bam: BamRef, entry: DirectoryEntry, location: Location, block: Vec<u8>, dirty: bool, } impl ChainWriter { pub fn new( blocks: BlockDeviceRef, bam: BamRef, entry: DirectoryEntry, start: Location, ) -> io::Result<ChainWriter> { // Advance to the last block in the chain. let tail_block; let mut tail_location; { let blocks = blocks.borrow(); let mut block = blocks.sector(start)?; tail_location = start; while let ChainLink::Next(location) = ChainLink::new(block)? { block = blocks.sector(location)?; tail_location = location; } tail_block = block.to_vec(); } Ok(ChainWriter { blocks, bam, entry, location: tail_location, block: tail_block, dirty: true, }) } fn increment_entry_blocks(&mut self) -> io::Result<()> { let mut blocks = self.blocks.borrow_mut(); blocks.positioned_read(&mut self.entry)?; self.entry.file_size += 1; blocks.positioned_write(&self.entry)?; Ok(()) } fn allocate_next_block(&mut self) -> io::Result<usize> { // NOTE: The ordering of these steps is important for consistency. We don't // want a block to be allocated in BAM, then not used because an error // was thrown later. // Write the current block without the updated link. self.write_current_block()?; // Find a new block. let next_location = self.bam.borrow_mut().next_free_block(None)?; // Initialize a fresh block in memory with a link indicating a tail block with // zero bytes used. (Really, two bytes used for the link, but zero data // bytes used.) for i in 2..BLOCK_SIZE { self.block[i] = 0; } ChainLink::Tail(2).to_bytes(&mut self.block[..]); // Write the fresh block to the new location self.blocks .borrow_mut() .sector_mut(next_location)? .copy_from_slice(&self.block); // Allocate the next block. self.bam.borrow_mut().allocate(next_location)?; // Increment the directory entry's file size (measured in blocks) self.increment_entry_blocks()?; // If allocation succeeds, only then do we link the current block to the next // block. let mut blocks = self.blocks.borrow_mut(); let block = match blocks.sector_mut(self.location) { Ok(block) => block, Err(e) => { // Roll back the allocation. self.bam.borrow_mut().free(next_location)?; return Err(e); } }; next_location.write_bytes(block); // Update state self.location = next_location; // Return the available bytes in the newly loaded block, which is always two // less than the block size. Ok(BLOCK_SIZE - 2) } fn
(&mut self) -> io::Result<()> { // Write the current block let mut blocks = self.blocks.borrow_mut(); blocks .sector_mut(self.location)? .copy_from_slice(&self.block); Ok(()) } } impl Drop for ChainWriter { fn drop(&mut self) { let _result = self.flush(); } } // NOTE: allocating and updating entry block size should be atomic. impl io::Write for ChainWriter { fn write(&mut self, mut buf: &[u8]) -> io::Result<usize> { self.dirty = true; let mut total_nbytes = 0; while!buf.is_empty() { let (offset, remaining) = match ChainLink::new(&self.block)? { ChainLink::Next(_) => unreachable!(), // The stored buffer is always a tail block. ChainLink::Tail(nbytes) if nbytes == BLOCK_SIZE => { // Allocate a new block let remaining = self.allocate_next_block()?; (BLOCK_SIZE - remaining, remaining) } ChainLink::Tail(nbytes) => (nbytes, BLOCK_SIZE - nbytes), }; // Copy as much of the caller-provided buffer as possible into the block. let nbytes = remaining.min(buf.len()); let _ = &self.block[offset..offset + nbytes].copy_from_slice(&buf[0..nbytes]); total_nbytes += nbytes; // Update the block link's indication of used bytes. ChainLink::Tail(offset + nbytes).to_bytes(&mut self.block); // Reduce the provided buffer slice to the unwritten portion. buf = &buf[nbytes..]; } Ok(total_nbytes) } fn flush(&mut self) -> io::Result<()> { if self.dirty { // Write the current block self.write_current_block()?; // Flush the BAM self.bam.borrow_mut().flush()?; // Flush the underlying medium. let mut blocks = self.blocks.borrow_mut(); blocks.flush()?; self.dirty = false; } Ok(()) } } pub fn remove_chain(blocks: BlockDeviceRef, bam: BamRef, start: Location) -> io::Result<()> { // Read the whole chain first to be sure we can visit every block with no // errors. let locations = ChainIterator::new(blocks, start).locations()?; // Deallocate let mut bam = bam.borrow_mut(); for location in locations { bam.free(location)?; } bam.flush()?; Ok(()) }
write_current_block
identifier_name
main.rs
use std::{ collections::HashSet, fmt, fs::{File, OpenOptions}, io::{self, prelude::*, BufRead, Cursor}, net::{IpAddr, Ipv4Addr}, process::Command, }; use failure::Fail; use nom::{ branch::alt, bytes::complete::tag, character::complete::{alpha1, alphanumeric1, digit1, hex_digit1, one_of, space1}, combinator::{all_consuming, map_res, opt, recognize, rest}, error::{convert_error, ErrorKind, ParseError, VerboseError}, multi::{many0, many_m_n, separated_list}, sequence::{preceded, tuple}, Err, IResult, }; use structopt::StructOpt; static HOSTS_FILE: &str = "/etc/hosts"; #[derive(Debug, StructOpt)] #[structopt(name = "local-domain-alias")] struct Options { #[structopt(name = "port")] port: u16, #[structopt(name = "alias")] alias: String, #[structopt(skip)] ip: Option<u8>, } #[derive(Debug, Fail)] enum Error { #[fail(display = "alias is already in use")] AliasAlreadyInUse, #[fail(display = "incomplete alias")] IncompleteAlias, #[fail(display = "invalid alias format\n{}", _0)] InvalidAliasFormat(String), #[fail(display = "could not set up port forwarding: ip tables error {}", _0)] IptablesCommandFailed(i32), #[fail(display = "must be run as root")] MustRunAsRoot, #[fail(display = "io error: {}", _0)] IoError(io::Error), } impl From<io::Error> for Error { fn from(error: io::Error) -> Error { match error.kind() { io::ErrorKind::PermissionDenied => Error::MustRunAsRoot, _e => Error::IoError(dbg!(error)), } } } fn octet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u8, E> { map_res(digit1, |s: &str| s.parse::<u8>())(input) } fn dotted_octet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u8, E> { preceded(tag("."), octet)(input) } fn ip_v4_addr<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> { recognize(tuple((octet, dotted_octet, dotted_octet, dotted_octet)))(input) } fn hextet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u16, E> { map_res(hex_digit1, |s: &str| s.parse::<u16>())(input) } fn sep_hextet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u16, E> { preceded(tag("::"), hextet)(input) } fn ip_v6_addr<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> { let parser = preceded(opt(hextet), many_m_n(1, 7, sep_hextet)); recognize(parser)(input) } fn ip_addr<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, IpAddr, E> { map_res(alt((ip_v4_addr, ip_v6_addr)), |s: &str| s.parse::<IpAddr>())(input) } fn hostname<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> { recognize(tuple(( alpha1, many0(alt((alphanumeric1, recognize(one_of("-."))))), )))(input) } fn check_hostname<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, (), E> { all_consuming(hostname)(input).map(|(input, _)| (input, ())) } fn aliases<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, Vec<String>, E> { let (input, aliases) = separated_list(tag(" "), hostname)(input)?; Ok((input, aliases.into_iter().map(String::from).collect())) } fn
<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> { preceded(tag("#"), rest)(input) } #[derive(Debug)] struct HostsLine { ip: IpAddr, canonical_hostname: String, aliases: Vec<String>, comment: Option<String>, } impl HostsLine { fn new(ip: IpAddr, canonical_hostname: String) -> HostsLine { let aliases = Vec::new(); let comment = None; HostsLine { ip, canonical_hostname, aliases, comment, } } } impl fmt::Display for HostsLine { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let HostsLine { ip, canonical_hostname, aliases, comment, } = self; let sep = match ip.to_string().chars().count() { 0..=8 => "\t\t", 7..=16 => "\t", _ => " ", }; write!( f, "{ip}{sep}{ch}", ip = ip, sep = sep, ch = canonical_hostname, )?; if!aliases.is_empty() { write!(f, "\t{}", aliases.join(" "))?; } if let Some(comment) = comment { write!(f, "#{}", comment)?; } Ok(()) } } fn hosts_line<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, HostsLine, E> { let (input, ip) = ip_addr(input)?; let (input, _) = space1(input)?; let (input, canonical_hostname) = hostname(input)?; let (input, _) = space1(input)?; let (input, aliases) = opt(aliases)(input)?; let (input, comment) = opt(comment)(input)?; let canonical_hostname = String::from(canonical_hostname); let aliases = aliases.unwrap_or_else(Vec::new); let comment = comment.map(String::from); Ok(( input, HostsLine { ip, canonical_hostname, aliases, comment, }, )) } #[derive(Debug)] enum Line { Unstructured(String), Structured(HostsLine), } impl Line { fn structured(ip: IpAddr, canonical_name: String) -> Line { Line::Structured(HostsLine::new(ip, canonical_name)) } fn structured_ref(&self) -> Option<&HostsLine> { match self { Line::Structured(line) => Some(line), Line::Unstructured(_) => None, } } } impl fmt::Display for Line { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Line::Unstructured(line) => write!(f, "{}", line), Line::Structured(hosts_line) => write!(f, "{}", hosts_line), } } } fn parse_line(line: &str) -> Line { match hosts_line::<(&str, ErrorKind)>(&line) { Ok((_, hosts_line)) => Line::Structured(hosts_line), Err(_error) => Line::Unstructured(String::from(line)), } } fn validate_alias(alias: &str) -> Result<(), Error> { check_hostname::<VerboseError<&str>>(alias) .map(|_| ()) .map_err(|error| match error { Err::Incomplete(_) => Error::IncompleteAlias, Err::Error(e) | Err::Failure(e) => Error::InvalidAliasFormat(convert_error(alias, e)), }) } fn iptables_rules_exist(options: &Options) -> Result<bool, Error> { let rule_match = format!( "-A OUTPUT -s 127.0.0.1/32 -d {alias}/32 -p tcp -m tcp --dport 80 -j DNAT --to-destination 127.0.0.", alias = options.alias, ); let output = Command::new("iptables") .args(&["-t", "nat", "-S", "OUTPUT"]) .output()?; let stdout = Cursor::new(output.stdout); let matched_lines: Vec<_> = stdout .lines() .filter_map(|line_ret| { line_ret.ok().and_then(|line| { let line: String = dbg!(line); line.rfind(&rule_match).map(|index| dbg!((index, line))) }) }) .collect(); let port = options.port.to_string(); if let Some((idx, line)) = matched_lines.first() { if dbg!(&line[*idx..]) == port { return Ok(true); } else { return Err(Error::AliasAlreadyInUse); } } Ok(false) } fn write_iptables_rules(options: &Options) -> Result<(), Error> { let status = Command::new("iptables") .args(&[ "-t", "nat", "--append", "OUTPUT", "--protocol", "tcp", "--dport", "80", "--source", "127.0.0.1", "--destination", &options.alias, "--jump", "DNAT", "--to-destination", &format!("127.0.0.{ip}:{port}", ip = "1", port = options.port), ]) .status()?; if!status.success() { return Err(Error::IptablesCommandFailed(status.code().unwrap_or(-1))); } Ok(()) } fn next_unused_local_ip(in_use_ips: &HashSet<IpAddr>) -> IpAddr { for b in 0..128 { for c in 0..128 { for d in 1..128 { let ip = IpAddr::V4(Ipv4Addr::new(127, b, c, d)); if!in_use_ips.contains(&ip) { return ip; } } } } "127.0.0.1".parse().unwrap() } fn run() -> Result<(), Error> { let options = Options::from_args(); validate_alias(&options.alias)?; let mut file = File::open(HOSTS_FILE)?; file.seek(io::SeekFrom::Start(0))?; let reader = io::BufReader::new(file); let mut lines: Vec<_> = reader .lines() .map(|line_res| line_res.map(|line| parse_line(&line))) .collect::<Result<Vec<_>, io::Error>>()?; let mut file = OpenOptions::new().write(true).open(HOSTS_FILE)?; file.seek(io::SeekFrom::Start(0))?; let structured_refs: Vec<_> = lines .iter() .filter_map(|line| line.structured_ref()) .collect(); if structured_refs .iter() .find(|&x| *x.canonical_hostname == options.alias) .is_none() { let in_use_ips: HashSet<IpAddr> = structured_refs.iter().map(|line| line.ip).collect(); let ip = next_unused_local_ip(&in_use_ips); lines.push(Line::structured(ip, options.alias.clone())); } else { eprintln!("Alias already in /etc/hosts not adding a second entry"); } for line in &lines { writeln!(file, "{}", line)?; } file.sync_all()?; drop(file); if!iptables_rules_exist(&options)? { write_iptables_rules(&options)?; } Ok(()) } fn main() { match run() { Ok(()) => {} Err(err) => { eprintln!("local-domain-alias: {}", err); std::process::exit(1); } } } #[cfg(test)] mod tests { use super::*; macro_rules! assert_parse_err { { $fn_name:ident($input:literal), rest == $expected_rest:literal } => { match $fn_name::<VerboseError<&str>>($input) { Ok((rest, value)) => { assert_eq!($expected_rest, rest, "actual unparsed input"); panic!("parse unexpectedly succeeded: {ifn}({iarg}) rest: '{rest}', value: '{value:?}'", ifn = stringify!($fn_name), iarg = stringify!($input), rest = rest, value = value, ); } Err(err) => err, } } } macro_rules! assert_parse_ok { { $fn_name:ident($input:literal) } => { match $fn_name::<VerboseError<&str>>($input) { Err(Err::Incomplete(i)) => panic!("incomplete input: '{}' {:?}", $input, i), Err(Err::Error(e)) | Err(Err::Failure(e)) => { panic!("failed to parse: {ifn}({iarg})\n{converted}", ifn = stringify!($fn_name), iarg = stringify!($input), converted = convert_error($input, e)); }, Ok(ret) => ret, } } } #[test] fn parse_hostname() { assert_parse_err!(hostname("123"), rest == ""); assert_parse_ok!(hostname("a123")); assert_parse_ok!(hostname("abc")); assert_parse_ok!(hostname("abc.def")); assert_parse_ok!(hostname("abc-def")); assert_parse_ok!(hostname("abc-def.ghi")); } #[test] fn parse_check_hostname() { assert_parse_err!(check_hostname("123"), rest == ""); assert_parse_ok!(check_hostname("a123")); assert_parse_ok!(check_hostname("abc")); assert_parse_ok!(check_hostname("abc-def")); assert_parse_ok!(hostname("abc.def")); assert_parse_ok!(hostname("abc-def.ghi")); } #[test] fn parse_aliases() { assert_parse_ok!(aliases("123")); assert_parse_ok!(aliases("a123")); assert_parse_ok!(aliases("abc")); } #[test] fn parse_comment() { assert_parse_err!(comment("123"), rest == "123"); assert_parse_err!(comment(""), rest == ""); assert_parse_ok!(comment("#")); assert_parse_ok!(comment("#abc 123!@# {}()[]")); assert_parse_ok!(comment("#abc123!@#\nfoobar")); } }
comment
identifier_name
main.rs
use std::{ collections::HashSet, fmt, fs::{File, OpenOptions}, io::{self, prelude::*, BufRead, Cursor}, net::{IpAddr, Ipv4Addr}, process::Command, }; use failure::Fail; use nom::{ branch::alt, bytes::complete::tag, character::complete::{alpha1, alphanumeric1, digit1, hex_digit1, one_of, space1}, combinator::{all_consuming, map_res, opt, recognize, rest}, error::{convert_error, ErrorKind, ParseError, VerboseError}, multi::{many0, many_m_n, separated_list}, sequence::{preceded, tuple}, Err, IResult, }; use structopt::StructOpt; static HOSTS_FILE: &str = "/etc/hosts"; #[derive(Debug, StructOpt)] #[structopt(name = "local-domain-alias")] struct Options { #[structopt(name = "port")] port: u16, #[structopt(name = "alias")] alias: String, #[structopt(skip)] ip: Option<u8>, } #[derive(Debug, Fail)] enum Error { #[fail(display = "alias is already in use")] AliasAlreadyInUse, #[fail(display = "incomplete alias")] IncompleteAlias, #[fail(display = "invalid alias format\n{}", _0)] InvalidAliasFormat(String), #[fail(display = "could not set up port forwarding: ip tables error {}", _0)] IptablesCommandFailed(i32), #[fail(display = "must be run as root")] MustRunAsRoot, #[fail(display = "io error: {}", _0)] IoError(io::Error), } impl From<io::Error> for Error { fn from(error: io::Error) -> Error { match error.kind() { io::ErrorKind::PermissionDenied => Error::MustRunAsRoot, _e => Error::IoError(dbg!(error)), } } } fn octet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u8, E> { map_res(digit1, |s: &str| s.parse::<u8>())(input) } fn dotted_octet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u8, E> { preceded(tag("."), octet)(input) } fn ip_v4_addr<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> { recognize(tuple((octet, dotted_octet, dotted_octet, dotted_octet)))(input) } fn hextet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u16, E> { map_res(hex_digit1, |s: &str| s.parse::<u16>())(input) } fn sep_hextet<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, u16, E> { preceded(tag("::"), hextet)(input) } fn ip_v6_addr<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> { let parser = preceded(opt(hextet), many_m_n(1, 7, sep_hextet)); recognize(parser)(input) } fn ip_addr<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, IpAddr, E> { map_res(alt((ip_v4_addr, ip_v6_addr)), |s: &str| s.parse::<IpAddr>())(input) } fn hostname<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> { recognize(tuple(( alpha1, many0(alt((alphanumeric1, recognize(one_of("-."))))), )))(input) } fn check_hostname<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, (), E> { all_consuming(hostname)(input).map(|(input, _)| (input, ())) } fn aliases<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, Vec<String>, E> { let (input, aliases) = separated_list(tag(" "), hostname)(input)?; Ok((input, aliases.into_iter().map(String::from).collect())) }
#[derive(Debug)] struct HostsLine { ip: IpAddr, canonical_hostname: String, aliases: Vec<String>, comment: Option<String>, } impl HostsLine { fn new(ip: IpAddr, canonical_hostname: String) -> HostsLine { let aliases = Vec::new(); let comment = None; HostsLine { ip, canonical_hostname, aliases, comment, } } } impl fmt::Display for HostsLine { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let HostsLine { ip, canonical_hostname, aliases, comment, } = self; let sep = match ip.to_string().chars().count() { 0..=8 => "\t\t", 7..=16 => "\t", _ => " ", }; write!( f, "{ip}{sep}{ch}", ip = ip, sep = sep, ch = canonical_hostname, )?; if!aliases.is_empty() { write!(f, "\t{}", aliases.join(" "))?; } if let Some(comment) = comment { write!(f, "#{}", comment)?; } Ok(()) } } fn hosts_line<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, HostsLine, E> { let (input, ip) = ip_addr(input)?; let (input, _) = space1(input)?; let (input, canonical_hostname) = hostname(input)?; let (input, _) = space1(input)?; let (input, aliases) = opt(aliases)(input)?; let (input, comment) = opt(comment)(input)?; let canonical_hostname = String::from(canonical_hostname); let aliases = aliases.unwrap_or_else(Vec::new); let comment = comment.map(String::from); Ok(( input, HostsLine { ip, canonical_hostname, aliases, comment, }, )) } #[derive(Debug)] enum Line { Unstructured(String), Structured(HostsLine), } impl Line { fn structured(ip: IpAddr, canonical_name: String) -> Line { Line::Structured(HostsLine::new(ip, canonical_name)) } fn structured_ref(&self) -> Option<&HostsLine> { match self { Line::Structured(line) => Some(line), Line::Unstructured(_) => None, } } } impl fmt::Display for Line { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Line::Unstructured(line) => write!(f, "{}", line), Line::Structured(hosts_line) => write!(f, "{}", hosts_line), } } } fn parse_line(line: &str) -> Line { match hosts_line::<(&str, ErrorKind)>(&line) { Ok((_, hosts_line)) => Line::Structured(hosts_line), Err(_error) => Line::Unstructured(String::from(line)), } } fn validate_alias(alias: &str) -> Result<(), Error> { check_hostname::<VerboseError<&str>>(alias) .map(|_| ()) .map_err(|error| match error { Err::Incomplete(_) => Error::IncompleteAlias, Err::Error(e) | Err::Failure(e) => Error::InvalidAliasFormat(convert_error(alias, e)), }) } fn iptables_rules_exist(options: &Options) -> Result<bool, Error> { let rule_match = format!( "-A OUTPUT -s 127.0.0.1/32 -d {alias}/32 -p tcp -m tcp --dport 80 -j DNAT --to-destination 127.0.0.", alias = options.alias, ); let output = Command::new("iptables") .args(&["-t", "nat", "-S", "OUTPUT"]) .output()?; let stdout = Cursor::new(output.stdout); let matched_lines: Vec<_> = stdout .lines() .filter_map(|line_ret| { line_ret.ok().and_then(|line| { let line: String = dbg!(line); line.rfind(&rule_match).map(|index| dbg!((index, line))) }) }) .collect(); let port = options.port.to_string(); if let Some((idx, line)) = matched_lines.first() { if dbg!(&line[*idx..]) == port { return Ok(true); } else { return Err(Error::AliasAlreadyInUse); } } Ok(false) } fn write_iptables_rules(options: &Options) -> Result<(), Error> { let status = Command::new("iptables") .args(&[ "-t", "nat", "--append", "OUTPUT", "--protocol", "tcp", "--dport", "80", "--source", "127.0.0.1", "--destination", &options.alias, "--jump", "DNAT", "--to-destination", &format!("127.0.0.{ip}:{port}", ip = "1", port = options.port), ]) .status()?; if!status.success() { return Err(Error::IptablesCommandFailed(status.code().unwrap_or(-1))); } Ok(()) } fn next_unused_local_ip(in_use_ips: &HashSet<IpAddr>) -> IpAddr { for b in 0..128 { for c in 0..128 { for d in 1..128 { let ip = IpAddr::V4(Ipv4Addr::new(127, b, c, d)); if!in_use_ips.contains(&ip) { return ip; } } } } "127.0.0.1".parse().unwrap() } fn run() -> Result<(), Error> { let options = Options::from_args(); validate_alias(&options.alias)?; let mut file = File::open(HOSTS_FILE)?; file.seek(io::SeekFrom::Start(0))?; let reader = io::BufReader::new(file); let mut lines: Vec<_> = reader .lines() .map(|line_res| line_res.map(|line| parse_line(&line))) .collect::<Result<Vec<_>, io::Error>>()?; let mut file = OpenOptions::new().write(true).open(HOSTS_FILE)?; file.seek(io::SeekFrom::Start(0))?; let structured_refs: Vec<_> = lines .iter() .filter_map(|line| line.structured_ref()) .collect(); if structured_refs .iter() .find(|&x| *x.canonical_hostname == options.alias) .is_none() { let in_use_ips: HashSet<IpAddr> = structured_refs.iter().map(|line| line.ip).collect(); let ip = next_unused_local_ip(&in_use_ips); lines.push(Line::structured(ip, options.alias.clone())); } else { eprintln!("Alias already in /etc/hosts not adding a second entry"); } for line in &lines { writeln!(file, "{}", line)?; } file.sync_all()?; drop(file); if!iptables_rules_exist(&options)? { write_iptables_rules(&options)?; } Ok(()) } fn main() { match run() { Ok(()) => {} Err(err) => { eprintln!("local-domain-alias: {}", err); std::process::exit(1); } } } #[cfg(test)] mod tests { use super::*; macro_rules! assert_parse_err { { $fn_name:ident($input:literal), rest == $expected_rest:literal } => { match $fn_name::<VerboseError<&str>>($input) { Ok((rest, value)) => { assert_eq!($expected_rest, rest, "actual unparsed input"); panic!("parse unexpectedly succeeded: {ifn}({iarg}) rest: '{rest}', value: '{value:?}'", ifn = stringify!($fn_name), iarg = stringify!($input), rest = rest, value = value, ); } Err(err) => err, } } } macro_rules! assert_parse_ok { { $fn_name:ident($input:literal) } => { match $fn_name::<VerboseError<&str>>($input) { Err(Err::Incomplete(i)) => panic!("incomplete input: '{}' {:?}", $input, i), Err(Err::Error(e)) | Err(Err::Failure(e)) => { panic!("failed to parse: {ifn}({iarg})\n{converted}", ifn = stringify!($fn_name), iarg = stringify!($input), converted = convert_error($input, e)); }, Ok(ret) => ret, } } } #[test] fn parse_hostname() { assert_parse_err!(hostname("123"), rest == ""); assert_parse_ok!(hostname("a123")); assert_parse_ok!(hostname("abc")); assert_parse_ok!(hostname("abc.def")); assert_parse_ok!(hostname("abc-def")); assert_parse_ok!(hostname("abc-def.ghi")); } #[test] fn parse_check_hostname() { assert_parse_err!(check_hostname("123"), rest == ""); assert_parse_ok!(check_hostname("a123")); assert_parse_ok!(check_hostname("abc")); assert_parse_ok!(check_hostname("abc-def")); assert_parse_ok!(hostname("abc.def")); assert_parse_ok!(hostname("abc-def.ghi")); } #[test] fn parse_aliases() { assert_parse_ok!(aliases("123")); assert_parse_ok!(aliases("a123")); assert_parse_ok!(aliases("abc")); } #[test] fn parse_comment() { assert_parse_err!(comment("123"), rest == "123"); assert_parse_err!(comment(""), rest == ""); assert_parse_ok!(comment("#")); assert_parse_ok!(comment("#abc 123!@# {}()[]")); assert_parse_ok!(comment("#abc123!@#\nfoobar")); } }
fn comment<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, &'a str, E> { preceded(tag("#"), rest)(input) }
random_line_split
attr.rs
//! Parameters for modifying the appearance or behavior of [`ContentItem`]s. use { std::{ convert::{ TryFrom, TryInto, }, fmt, str::FromStr, }, css_color_parser::ColorParseError, url::Url, crate::{ ContentItem, Menu, }, }; #[cfg(feature = "url1")] use url1::Url as Url1; #[cfg(all(feature = "base64", feature = "image"))] use { std::io::Cursor, image::{ DynamicImage, ImageError, ImageOutputFormat::Png, ImageResult, }, }; /// Used in [`ContentItem::color`](ContentItem::color()). /// /// Construct via [`Into`] or [`TryInto`](std::convert::TryInto) implementations. #[derive(Debug, Clone, Copy, PartialEq)] pub struct Color { pub(crate) light: css_color_parser::Color, /// SwiftBar only: separate color for dark system theme. If `None`, use `light`. pub(crate) dark: Option<css_color_parser::Color>, } impl From<css_color_parser::Color> for Color { fn from(light: css_color_parser::Color) -> Color { Color { light, dark: None } } } impl FromStr for Color { type Err = ColorParseError; fn from_str(s: &str) -> Result<Color, ColorParseError> { Ok(Color { light: s.parse()?, dark: None, }) } } impl<'a> TryFrom<&'a str> for Color { type Error = ColorParseError; fn try_from(s: &str) -> Result<Color, ColorParseError> { s.parse() } } #[cfg(feature = "css-colors")] macro_rules! css_color_try_into_color { ($t:ty) => { #[cfg_attr(docsrs, doc(cfg(feature = "css-colors")))] impl TryFrom<$t> for Color { type Error = ColorParseError; fn try_from(color: $t) -> Result<Color, ColorParseError> { Ok(Color { light: color.to_string().parse()?, dark: None, }) } } }; } #[cfg(feature = "css-colors")] css_color_try_into_color!(css_colors::RGB); #[cfg(feature = "css-colors")] css_color_try_into_color!(css_colors::RGBA); #[cfg(feature = "css-colors")] css_color_try_into_color!(css_colors::HSL); #[cfg(feature = "css-colors")] css_color_try_into_color!(css_colors::HSLA); #[cfg(feature = "serenity")] #[cfg_attr(docsrs, doc(cfg(feature = "serenity")))] impl From<serenity::utils::Colour> for Color { fn from(c: serenity::utils::Colour) -> Color { Color { light: css_color_parser::Color { r: c.r(), g: c.g(), b: c.b(), a: 1.0, }, dark: None, } } } impl fmt::Display for Color { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "#{:02x}{:02x}{:02x}", self.light.r, self.light.g, self.light.b)?; if let Some(dark) = self.dark { write!(f, ",#{:02x}{:02x}{:02x}", dark.r, dark.g, dark.b)?; } Ok(()) } } #[derive(Debug)] /// A menu item's alternate mode or submenu. pub enum Extra { /// A menu item's alternate mode, shown when <key>⌥</key> is held. Alternate(Box<ContentItem>), //TODO make sure alts don't have submenus /// A submenu. Submenu(Menu), } /// Used by [`ContentItem::href`](ContentItem::href()). pub trait IntoUrl { /// Converts `self` into a [`Url`]. fn into_url(self) -> Result<Url, url::ParseError>; } impl IntoUrl for Url { fn into_url(self) -> Result<Url, url::ParseError> { Ok(self) } } impl IntoUrl for String { fn into_url(self) -> Result<Url, url::ParseError> { Url::parse(&self) } } impl<'a> IntoUrl for &'a str { fn into_url(self) -> Result<Url, url::ParseError> { Url::parse(self) } } #[cfg(feature = "url1")] #[cfg_attr(docsrs, doc(cfg(feature = "url1")))] impl IntoUrl for Url1 { fn into_url(self) -> Result<Url, url::ParseError> { Url::parse(self.as_str()) } } /// BitBar only supports up to five parameters for `bash=` commands (see <https://github.com/matryer/bitbar/issues/490>). #[derive(Debug)] pub struct Params { pub(crate) cmd: String, pub(crate) params: Vec<String>, } impl Params { #[doc(hidden)] // used in proc macro pub fn new(cmd: String, params: Vec<String>) -> Self { Self { cmd, params } } } macro_rules! params_from { ($n:literal$(, $elt:ident: $t:ident)*) => { impl<T: ToString> From<[T; $n]> for Params { fn from([cmd, $($elt),*]: [T; $n]) -> Params { Params { cmd: cmd.to_string(), params: vec![$($elt.to_string()),*], } } } impl<Cmd: ToString, $($t: ToString),*> From<(Cmd, $($t),*)> for Params { fn from((cmd, $($elt),*): (Cmd, $($t),*)) -> Params { Params { cmd: cmd.to_string(), params: vec![$($elt.to_string()),*], } } } }; } params_from!(1); params_from!(2, param1: A); params_from!(3, param1: A, param2: B); params_from!(4, param1: A, param2: B, param3: C); params_from!(5, param1: A, param2: B, param3: C, param4: D); params_from!(6, param1: A, param2: B, param3: C, param4: D, param5: E); impl<'a, T: ToString> TryFrom<&'a [T]> for Params { type Error = &'a [T]; fn try_from(slice: &[T]) -> Result<Params, &[T]> { match slice { [cmd] => Ok(Params { cmd: cmd.to_string(), params: Vec::default() }), [cmd, param1] => Ok(Params { cmd: cmd.to_string(), params: vec![param1.to_string()] }), [cmd, param1, param2] => Ok(Params { cmd: cmd.to_string(), params: vec![param1.to_string(), param2.to_string()] }), [cmd, param1, param2, param3] => Ok(Params { cmd: cmd.to_string(), params: vec![param1.to_string(), param2.to_string(), param3.to_string()] }), [cmd, param1, param2, param3, param4] => Ok(Params { cmd: cmd.to_string(), params: vec![param1.to_string(), param2.to_string(), param3.to_string(), param4.to_string()] }), [cmd, param1, param2, param3, param4, param5] => Ok(Params { cmd: cmd.to_string(), params: vec![param1.to_string(), param2.to_string(), param3.to_string(), param4.to_string(), param5.to_string()] }), slice => Err(slice), } } } impl<T: ToString> TryFrom<Vec<T>> for Params { type Error = Vec<T>; fn try_from(mut v: Vec<T>) -> Result<Params, Vec<T>> { match v.len() { 1..=6 => Ok(Params { cmd: v.remove(0).to_string(), params: v.into_iter().map(|x| x.to_string()).collect(), }), _ => Err(v), } } } /// Used by [`ContentItem::command`](ContentItem::command()). /// /// A `Command` contains the [`Params`], which includes the actual command (called `bash=` by BitBar) and its parameters, and the value of `terminal=`. /// /// It is usually constructed via conversion, unless `terminal=true` is required. /// /// **Note:** Unlike BitBar's default of `true`, `Command` assumes a default of `terminal=false`. #[derive(Debug)] pub struct Command { pub(crate) params: Params, pub(crate) terminal: bool, } impl Command { /// Creates a `Command` with the `terminal=` value set to `true`. pub fn terminal(args: impl Into<Params>) -> Command { Command { params: args.into(), terminal: true, } } /// Attempts to construct a `Command` with `terminal=` set to `false` from the given arguments. /// /// This is not a `TryFrom` implementation due to a limitation in Rust. pub fn try_from<P: TryInto<Params>>(args: P) -> Result<Command, P::Error> { Ok(Command { params: args.try_into()?, terminal: false, }) } /// Same as `Command::terminal` but for types that might not convert to `Params`. pub fn try_terminal<P: TryInto<Params>>(args: P) -> Result<Command, P::Error> { Ok(Command { params: args.try_into()?, terminal: true, }) } } /// Converts an array containing a command string and 0–5 parameters to a command argument vector. The `terminal=` value will be `false`. impl<P: Into<Params>> From<P> for Command { fn from
s: P) -> Command { Command { params: args.into(), terminal: false, } } } /// Used by `ContentItem::image` and `ContentItem::template_image`. #[derive(Debug, Clone)] pub struct Image { /// The base64-encoded image data. pub base64_data: String, /// If this is `true`, the image will be used with BitBar's `templateImage=` instead of `image=`. pub is_template: bool, } impl Image { /// Constructs a template image, even if the `TryInto` implementation would otherwise construct a non-template image. pub fn template<T: TryInto<Image>>(img: T) -> Result<Image, T::Error> { let mut result = img.try_into()?; result.is_template = true; Ok(result) } } /// Converts already-encoded base64 data to a non-template image. impl From<String> for Image { fn from(base64_data: String) -> Image { Image { base64_data, is_template: false, } } } /// Converts a PNG file to a non-template image. #[cfg(feature = "base64")] #[cfg_attr(docsrs, doc(cfg(feature = "base64")))] impl From<Vec<u8>> for Image { fn from(input: Vec<u8>) -> Image { Image { base64_data: base64::encode(&input), is_template: false, } } } /// Converts a PNG file to a non-template image. #[cfg(feature = "base64")] #[cfg_attr(docsrs, doc(cfg(feature = "base64")))] impl<T:?Sized + AsRef<[u8]>> From<&T> for Image { fn from(input: &T) -> Image { Image { base64_data: base64::encode(input), is_template: false, } } } #[cfg(all(feature = "base64", feature = "image"))] #[cfg_attr(docsrs, doc(cfg(all(feature = "base64", feature = "image"))))] impl TryFrom<DynamicImage> for Image { type Error = ImageError; fn try_from(img: DynamicImage) -> ImageResult<Image> { let mut buf = Cursor::<Vec<_>>::default(); img.write_to(&mut buf, Png)?; Ok(Image::from(buf.into_inner())) } }
(arg
identifier_name
attr.rs
//! Parameters for modifying the appearance or behavior of [`ContentItem`]s. use { std::{ convert::{ TryFrom, TryInto, }, fmt, str::FromStr, }, css_color_parser::ColorParseError, url::Url, crate::{ ContentItem, Menu, }, }; #[cfg(feature = "url1")] use url1::Url as Url1; #[cfg(all(feature = "base64", feature = "image"))] use { std::io::Cursor, image::{ DynamicImage, ImageError, ImageOutputFormat::Png, ImageResult, }, }; /// Used in [`ContentItem::color`](ContentItem::color()). /// /// Construct via [`Into`] or [`TryInto`](std::convert::TryInto) implementations. #[derive(Debug, Clone, Copy, PartialEq)] pub struct Color { pub(crate) light: css_color_parser::Color, /// SwiftBar only: separate color for dark system theme. If `None`, use `light`. pub(crate) dark: Option<css_color_parser::Color>, } impl From<css_color_parser::Color> for Color { fn from(light: css_color_parser::Color) -> Color { Color { light, dark: None } } } impl FromStr for Color { type Err = ColorParseError; fn from_str(s: &str) -> Result<Color, ColorParseError> { Ok(Color { light: s.parse()?, dark: None, }) } } impl<'a> TryFrom<&'a str> for Color { type Error = ColorParseError; fn try_from(s: &str) -> Result<Color, ColorParseError> { s.parse() } } #[cfg(feature = "css-colors")] macro_rules! css_color_try_into_color { ($t:ty) => { #[cfg_attr(docsrs, doc(cfg(feature = "css-colors")))] impl TryFrom<$t> for Color { type Error = ColorParseError; fn try_from(color: $t) -> Result<Color, ColorParseError> { Ok(Color { light: color.to_string().parse()?, dark: None, }) } } }; } #[cfg(feature = "css-colors")] css_color_try_into_color!(css_colors::RGB); #[cfg(feature = "css-colors")] css_color_try_into_color!(css_colors::RGBA); #[cfg(feature = "css-colors")] css_color_try_into_color!(css_colors::HSL); #[cfg(feature = "css-colors")] css_color_try_into_color!(css_colors::HSLA); #[cfg(feature = "serenity")] #[cfg_attr(docsrs, doc(cfg(feature = "serenity")))] impl From<serenity::utils::Colour> for Color { fn from(c: serenity::utils::Colour) -> Color { Color { light: css_color_parser::Color { r: c.r(), g: c.g(), b: c.b(), a: 1.0, }, dark: None, } } } impl fmt::Display for Color { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "#{:02x}{:02x}{:02x}", self.light.r, self.light.g, self.light.b)?; if let Some(dark) = self.dark { write!(f, ",#{:02x}{:02x}{:02x}", dark.r, dark.g, dark.b)?; } Ok(()) } } #[derive(Debug)] /// A menu item's alternate mode or submenu. pub enum Extra { /// A menu item's alternate mode, shown when <key>⌥</key> is held. Alternate(Box<ContentItem>), //TODO make sure alts don't have submenus /// A submenu. Submenu(Menu), } /// Used by [`ContentItem::href`](ContentItem::href()). pub trait IntoUrl { /// Converts `self` into a [`Url`]. fn into_url(self) -> Result<Url, url::ParseError>; } impl IntoUrl for Url { fn into_url(self) -> Result<Url, url::ParseError> { Ok(self) } } impl IntoUrl for String { fn into_url(self) -> Result<Url, url::ParseError> { Url::parse(&self) } } impl<'a> IntoUrl for &'a str { fn into_url(self) -> Result<Url, url::ParseError> { Url::parse(self) } } #[cfg(feature = "url1")] #[cfg_attr(docsrs, doc(cfg(feature = "url1")))] impl IntoUrl for Url1 { fn into_url(self) -> Result<Url, url::ParseError> { Url::parse(self.as_str()) } } /// BitBar only supports up to five parameters for `bash=` commands (see <https://github.com/matryer/bitbar/issues/490>). #[derive(Debug)] pub struct Params { pub(crate) cmd: String, pub(crate) params: Vec<String>, } impl Params { #[doc(hidden)] // used in proc macro pub fn new(cmd: String, params: Vec<String>) -> Self { Self { cmd, params } } } macro_rules! params_from { ($n:literal$(, $elt:ident: $t:ident)*) => { impl<T: ToString> From<[T; $n]> for Params { fn from([cmd, $($elt),*]: [T; $n]) -> Params { Params { cmd: cmd.to_string(), params: vec![$($elt.to_string()),*], } } } impl<Cmd: ToString, $($t: ToString),*> From<(Cmd, $($t),*)> for Params { fn from((cmd, $($elt),*): (Cmd, $($t),*)) -> Params { Params { cmd: cmd.to_string(), params: vec![$($elt.to_string()),*], } } } }; } params_from!(1); params_from!(2, param1: A); params_from!(3, param1: A, param2: B); params_from!(4, param1: A, param2: B, param3: C); params_from!(5, param1: A, param2: B, param3: C, param4: D); params_from!(6, param1: A, param2: B, param3: C, param4: D, param5: E); impl<'a, T: ToString> TryFrom<&'a [T]> for Params { type Error = &'a [T]; fn try_from(slice: &[T]) -> Result<Params, &[T]> { match slice { [cmd] => Ok(Params { cmd: cmd.to_string(), params: Vec::default() }), [cmd, param1] => Ok(Params { cmd: cmd.to_string(), params: vec![param1.to_string()] }), [cmd, param1, param2] => Ok(Params { cmd: cmd.to_string(), params: vec![param1.to_string(), param2.to_string()] }), [cmd, param1, param2, param3] => Ok(Params { cmd: cmd.to_string(), params: vec![param1.to_string(), param2.to_string(), param3.to_string()] }), [cmd, param1, param2, param3, param4] => Ok(Params { cmd: cmd.to_string(), params: vec![param1.to_string(), param2.to_string(), param3.to_string(), param4.to_string()] }), [cmd, param1, param2, param3, param4, param5] => Ok(Params { cmd: cmd.to_string(), params: vec![param1.to_string(), param2.to_string(), param3.to_string(), param4.to_string(), param5.to_string()] }), slice => Err(slice), }
type Error = Vec<T>; fn try_from(mut v: Vec<T>) -> Result<Params, Vec<T>> { match v.len() { 1..=6 => Ok(Params { cmd: v.remove(0).to_string(), params: v.into_iter().map(|x| x.to_string()).collect(), }), _ => Err(v), } } } /// Used by [`ContentItem::command`](ContentItem::command()). /// /// A `Command` contains the [`Params`], which includes the actual command (called `bash=` by BitBar) and its parameters, and the value of `terminal=`. /// /// It is usually constructed via conversion, unless `terminal=true` is required. /// /// **Note:** Unlike BitBar's default of `true`, `Command` assumes a default of `terminal=false`. #[derive(Debug)] pub struct Command { pub(crate) params: Params, pub(crate) terminal: bool, } impl Command { /// Creates a `Command` with the `terminal=` value set to `true`. pub fn terminal(args: impl Into<Params>) -> Command { Command { params: args.into(), terminal: true, } } /// Attempts to construct a `Command` with `terminal=` set to `false` from the given arguments. /// /// This is not a `TryFrom` implementation due to a limitation in Rust. pub fn try_from<P: TryInto<Params>>(args: P) -> Result<Command, P::Error> { Ok(Command { params: args.try_into()?, terminal: false, }) } /// Same as `Command::terminal` but for types that might not convert to `Params`. pub fn try_terminal<P: TryInto<Params>>(args: P) -> Result<Command, P::Error> { Ok(Command { params: args.try_into()?, terminal: true, }) } } /// Converts an array containing a command string and 0–5 parameters to a command argument vector. The `terminal=` value will be `false`. impl<P: Into<Params>> From<P> for Command { fn from(args: P) -> Command { Command { params: args.into(), terminal: false, } } } /// Used by `ContentItem::image` and `ContentItem::template_image`. #[derive(Debug, Clone)] pub struct Image { /// The base64-encoded image data. pub base64_data: String, /// If this is `true`, the image will be used with BitBar's `templateImage=` instead of `image=`. pub is_template: bool, } impl Image { /// Constructs a template image, even if the `TryInto` implementation would otherwise construct a non-template image. pub fn template<T: TryInto<Image>>(img: T) -> Result<Image, T::Error> { let mut result = img.try_into()?; result.is_template = true; Ok(result) } } /// Converts already-encoded base64 data to a non-template image. impl From<String> for Image { fn from(base64_data: String) -> Image { Image { base64_data, is_template: false, } } } /// Converts a PNG file to a non-template image. #[cfg(feature = "base64")] #[cfg_attr(docsrs, doc(cfg(feature = "base64")))] impl From<Vec<u8>> for Image { fn from(input: Vec<u8>) -> Image { Image { base64_data: base64::encode(&input), is_template: false, } } } /// Converts a PNG file to a non-template image. #[cfg(feature = "base64")] #[cfg_attr(docsrs, doc(cfg(feature = "base64")))] impl<T:?Sized + AsRef<[u8]>> From<&T> for Image { fn from(input: &T) -> Image { Image { base64_data: base64::encode(input), is_template: false, } } } #[cfg(all(feature = "base64", feature = "image"))] #[cfg_attr(docsrs, doc(cfg(all(feature = "base64", feature = "image"))))] impl TryFrom<DynamicImage> for Image { type Error = ImageError; fn try_from(img: DynamicImage) -> ImageResult<Image> { let mut buf = Cursor::<Vec<_>>::default(); img.write_to(&mut buf, Png)?; Ok(Image::from(buf.into_inner())) } }
} } impl<T: ToString> TryFrom<Vec<T>> for Params {
random_line_split
state.rs
use std::sync::Mutex; use wasm_bindgen::prelude::*; use web_sys::CanvasRenderingContext2d; #[wasm_bindgen] extern "C" { pub type TimeoutId; #[wasm_bindgen(js_name = "setTimeout")] pub fn set_timeout_inner(cb: &JsValue, timeout: f64) -> TimeoutId; #[wasm_bindgen(js_name = "clearTimeout")] pub fn clear_timeout(id: &TimeoutId); } pub fn set_timeout<T: FnOnce() +'static>(cb: T, timeout: f64) -> TimeoutId { set_timeout_inner(&Closure::once_into_js(cb), timeout) } pub struct State { pub hide_timeout: Option<TimeoutId>, pub render_id: usize, pub last_rendered: usize, pub ctx: CanvasRenderingContext2d, pub image_data: web_sys::ImageData, pub config: shared::Config, pub history: Vec<shared::Config>, pub history_index: usize, pub last_rendered_config: Option<shared::Config>, pub buffer: Vec<u32>, pub ui: crate::ui::UiState, pub hist_canvas: Option<web_sys::HtmlCanvasElement>, pub on_change: js_sys::Function, pub workers: Vec<(web_sys::Worker, bool, Option<shared::messaging::Message>)>, } // umm I dunno if this is cheating or something // I mean bad things could happen if I accessed the ctx // from different threads // but given that wasm doesn't yet have threads, it's probably fine. unsafe impl Send for State {} impl State { pub fn new(config: shared::Config, on_change: js_sys::Function) -> Self { State { hide_timeout: None, render_id: 0, hist_canvas: None, last_rendered: 0, ctx: crate::ui::init(&config).expect("Unable to setup canvas"), image_data: web_sys::ImageData::new_with_sw( config.rendering.width as u32, config.rendering.height as u32, ) .expect("Can't make an imagedata"), buffer: vec![0_u32; config.rendering.width * config.rendering.height], workers: vec![], ui: Default::default(), history: vec![config.clone()], history_index: 0, last_rendered_config: None, on_change, config, } } } pub fn make_image_data( config: &shared::Config, bright: &[u32], ) -> Result<web_sys::ImageData, JsValue> { let colored = shared::colorize(config, bright); let mut clamped = wasm_bindgen::Clamped(colored.clone()); // let mut clamped = Clamped(state.buffer.clone()); let data = web_sys::ImageData::new_with_u8_clamped_array_and_sh( wasm_bindgen::Clamped(clamped.as_mut_slice()), config.rendering.width as u32, config.rendering.height as u32, )?; Ok(data) } impl State { pub fn reset_buffer(&mut self) { self.buffer = vec![0_u32; self.config.rendering.width * self.config.rendering.height]; self.invalidate_past_renders(); } pub fn add_worker(&mut self, worker: web_sys::Worker) { self.workers.push((worker, false, None)) } pub fn invalidate_past_renders(&mut self) { self.render_id += 1; self.last_rendered = self.render_id; } pub fn undo(&mut self) -> Result<(), JsValue> { log!("Undo {} {}", self.history.len(), self.history_index); if self.history_index == 0 { if Some(&self.config)!= self.history.last() { self.history.push(self.config.clone()); } } self.history_index = (self.history_index + 1).min(self.history.len() - 1); if let Some(config) = self .history .get(self.history.len() - self.history_index - 1) { self.config = config.clone(); self.async_render(false)?; } Ok(()) } pub fn redo(&mut self) -> Result<(), JsValue> { if self.history_index == 0 { log!("nothing to redo"); return Ok(()); } log!("redo"); self.history_index = (self.history_index - 1).max(0); if let Some(config) = self .history .get(self.history.len() - self.history_index - 1) { self.config = config.clone(); self.async_render(false)?; } Ok(()) } pub fn maybe_save_history(&mut self) { log!("saving history"); // If the lastest is the same if self.history_index == 0 && self .history .last() .map_or(false, |last| *last == self.config) { return; } if self.history_index!= 0 && self .history .get(self.history.len() - self.history_index - 1) .map_or(false, |last| *last == self.config) { return; } // snip undone stuff if self.history_index!= 0 { self.history = self.history[0..self.history.len() - self.history_index].to_vec(); self.history_index = 0; } // if self.history.last().map_or(true, |last| *last!= self.config) { self.history.push(self.config.clone()); if self.history.len() > 500 { // trim to 500 len self.history = self.history[self.history.len() - 500..].to_vec(); } // } } pub fn handle_render( &mut self, worker: usize, id: usize, array: js_sys::Uint32Array, ) -> Result<(), JsValue> { if id < self.last_rendered
if id > self.last_rendered { self.reset_buffer(); self.last_rendered = id; } let mut bright = vec![0_u32; self.config.rendering.width * self.config.rendering.height]; array.copy_to(&mut bright); for i in 0..bright.len() { self.buffer[i] += bright[i]; } self.image_data = make_image_data(&self.config, &self.buffer)?; // crate::ui::use_ui(|ui| { // crate::ui::draw(ui, &self) // }); // self.ctx.put_image_data(&self.image_data, 0.0, 0.0)?; let (worker, busy, queued) = &mut self.workers[worker]; match queued { None => { // log!("Finished a thread"); *busy = false } Some(message) => { // log!("Sending a new config to render"); worker.post_message(&JsValue::from_serde(message).unwrap())?; *queued = None } } Ok(()) } pub fn debug_render(&mut self) -> Result<(), JsValue> { let brightness = shared::calculate::deterministic_calc(&self.config); self.image_data = make_image_data(&self.config, &brightness)?; self.ctx.put_image_data(&self.image_data, 0.0, 0.0)?; Ok(()) } pub fn clear(&mut self) { self.ctx.clear_rect( 0.0, 0.0, self.config.rendering.width as f64, self.config.rendering.height as f64, ) } pub fn reexpose(&mut self) -> Result<(), JsValue> { self.image_data = make_image_data(&self.config, &self.buffer)?; // self.ctx.put_image_data(&self.image_data, 0.0, 0.0)?; // crate::ui::use_ui(|ui| { crate::ui::draw(&self); // }); Ok(()) } pub fn send_on_change(&self) { let _res = self.on_change.call2( &JsValue::null(), &JsValue::from_serde(&self.config).unwrap(), &JsValue::from_serde(&self.ui).unwrap(), ); } pub fn async_render(&mut self, small: bool) -> Result<(), JsValue> { // log!("Async nreder folks"); match &self.last_rendered_config { Some(config) => { if *config == self.config { return Ok(()); } let mut old_config_with_new_exposure = config.clone(); old_config_with_new_exposure.rendering.exposure = self.config.rendering.exposure.clone(); old_config_with_new_exposure.rendering.coloration = self.config.rendering.coloration.clone(); // We've only changed settings that don't require recalculation if old_config_with_new_exposure == self.config { self.last_rendered_config = Some(self.config.clone()); self.send_on_change(); self.reexpose(); return Ok(()); } else { log!("Not the same") // log!("Not the same! {} vs {}", old_json, json) } } _ => (), } // log!("Render new config"); // web_sys::console::log_1(&JsValue::from_serde(&self.config).unwrap()); self.send_on_change(); self.last_rendered_config = Some(self.config.clone()); self.render_id += 1; let message = shared::messaging::Message { config: self.config.clone(), id: self.render_id, // count: if small { 10_000 } else { 500_000 }, count: 200_000, }; if self.workers.is_empty() { return self.debug_render(); } for (worker, busy, queued) in self.workers.iter_mut() { if *busy { // log!("Queueing up for a worker"); *queued = Some(message.clone()) } else { *busy = true; // log!("Sending a new config to render"); worker.post_message(&JsValue::from_serde(&message).unwrap())?; } } Ok(()) } } lazy_static! { static ref STATE: Mutex<Option<State>> = Mutex::new(None); } pub fn with_opt_state<F: FnOnce(&mut Option<State>)>(f: F) { f(&mut STATE.lock().unwrap()) } pub fn set_state(state: State) { with_opt_state(|wrapper| *wrapper = Some(state)) } pub fn has_state() -> bool { match STATE.lock().unwrap().as_mut() { Some(_) => true, None => false, } } pub fn with<R, F: FnOnce(&mut State) -> R>(f: F) -> R { match STATE.lock().unwrap().as_mut() { Some(mut state) => f(&mut state), None => { log!("!!! Error: tried to handle state, but no state found"); panic!("No state found, must set state first") } } } pub fn maybe_with<F: FnOnce(&mut State)>(f: F) { match STATE.lock().unwrap().as_mut() { Some(mut state) => f(&mut state), None => (), } } pub fn try_with<F: FnOnce(&mut State) -> Result<(), wasm_bindgen::prelude::JsValue>>(f: F) { with(|state| crate::utils::try_log(|| f(state))) }
{ let (worker, busy, queued) = &mut self.workers[worker]; match queued { None => { // log!("Finished a thread"); *busy = false } Some(message) => { // log!("Sending a new config to render"); worker.post_message(&JsValue::from_serde(message).unwrap())?; *queued = None } } // this is old data, disregard return Ok(()); }
conditional_block
state.rs
use std::sync::Mutex; use wasm_bindgen::prelude::*; use web_sys::CanvasRenderingContext2d; #[wasm_bindgen] extern "C" { pub type TimeoutId; #[wasm_bindgen(js_name = "setTimeout")] pub fn set_timeout_inner(cb: &JsValue, timeout: f64) -> TimeoutId; #[wasm_bindgen(js_name = "clearTimeout")] pub fn clear_timeout(id: &TimeoutId); } pub fn set_timeout<T: FnOnce() +'static>(cb: T, timeout: f64) -> TimeoutId { set_timeout_inner(&Closure::once_into_js(cb), timeout) } pub struct State { pub hide_timeout: Option<TimeoutId>, pub render_id: usize, pub last_rendered: usize, pub ctx: CanvasRenderingContext2d, pub image_data: web_sys::ImageData, pub config: shared::Config, pub history: Vec<shared::Config>, pub history_index: usize, pub last_rendered_config: Option<shared::Config>, pub buffer: Vec<u32>, pub ui: crate::ui::UiState, pub hist_canvas: Option<web_sys::HtmlCanvasElement>, pub on_change: js_sys::Function, pub workers: Vec<(web_sys::Worker, bool, Option<shared::messaging::Message>)>, } // umm I dunno if this is cheating or something // I mean bad things could happen if I accessed the ctx // from different threads // but given that wasm doesn't yet have threads, it's probably fine. unsafe impl Send for State {} impl State { pub fn new(config: shared::Config, on_change: js_sys::Function) -> Self { State { hide_timeout: None, render_id: 0, hist_canvas: None, last_rendered: 0, ctx: crate::ui::init(&config).expect("Unable to setup canvas"), image_data: web_sys::ImageData::new_with_sw( config.rendering.width as u32, config.rendering.height as u32, ) .expect("Can't make an imagedata"), buffer: vec![0_u32; config.rendering.width * config.rendering.height], workers: vec![], ui: Default::default(), history: vec![config.clone()], history_index: 0, last_rendered_config: None, on_change, config, } } } pub fn make_image_data( config: &shared::Config, bright: &[u32], ) -> Result<web_sys::ImageData, JsValue> { let colored = shared::colorize(config, bright); let mut clamped = wasm_bindgen::Clamped(colored.clone()); // let mut clamped = Clamped(state.buffer.clone()); let data = web_sys::ImageData::new_with_u8_clamped_array_and_sh( wasm_bindgen::Clamped(clamped.as_mut_slice()), config.rendering.width as u32, config.rendering.height as u32, )?; Ok(data) } impl State { pub fn reset_buffer(&mut self) { self.buffer = vec![0_u32; self.config.rendering.width * self.config.rendering.height]; self.invalidate_past_renders(); } pub fn add_worker(&mut self, worker: web_sys::Worker) { self.workers.push((worker, false, None)) } pub fn invalidate_past_renders(&mut self) { self.render_id += 1; self.last_rendered = self.render_id; } pub fn undo(&mut self) -> Result<(), JsValue> { log!("Undo {} {}", self.history.len(), self.history_index); if self.history_index == 0 { if Some(&self.config)!= self.history.last() { self.history.push(self.config.clone()); } } self.history_index = (self.history_index + 1).min(self.history.len() - 1); if let Some(config) = self .history .get(self.history.len() - self.history_index - 1) { self.config = config.clone(); self.async_render(false)?; } Ok(()) } pub fn redo(&mut self) -> Result<(), JsValue> { if self.history_index == 0 { log!("nothing to redo"); return Ok(()); } log!("redo"); self.history_index = (self.history_index - 1).max(0); if let Some(config) = self .history .get(self.history.len() - self.history_index - 1) { self.config = config.clone(); self.async_render(false)?; } Ok(()) } pub fn maybe_save_history(&mut self) { log!("saving history"); // If the lastest is the same if self.history_index == 0 && self .history .last() .map_or(false, |last| *last == self.config) { return; } if self.history_index!= 0 && self .history .get(self.history.len() - self.history_index - 1) .map_or(false, |last| *last == self.config) { return; } // snip undone stuff if self.history_index!= 0 { self.history = self.history[0..self.history.len() - self.history_index].to_vec(); self.history_index = 0; } // if self.history.last().map_or(true, |last| *last!= self.config) { self.history.push(self.config.clone()); if self.history.len() > 500 { // trim to 500 len self.history = self.history[self.history.len() - 500..].to_vec(); } // } } pub fn handle_render( &mut self, worker: usize, id: usize, array: js_sys::Uint32Array, ) -> Result<(), JsValue> { if id < self.last_rendered { let (worker, busy, queued) = &mut self.workers[worker]; match queued { None => { // log!("Finished a thread"); *busy = false } Some(message) => { // log!("Sending a new config to render"); worker.post_message(&JsValue::from_serde(message).unwrap())?; *queued = None } } // this is old data, disregard return Ok(()); } if id > self.last_rendered { self.reset_buffer(); self.last_rendered = id; } let mut bright = vec![0_u32; self.config.rendering.width * self.config.rendering.height]; array.copy_to(&mut bright); for i in 0..bright.len() { self.buffer[i] += bright[i]; } self.image_data = make_image_data(&self.config, &self.buffer)?; // crate::ui::use_ui(|ui| { // crate::ui::draw(ui, &self) // }); // self.ctx.put_image_data(&self.image_data, 0.0, 0.0)?; let (worker, busy, queued) = &mut self.workers[worker]; match queued { None => { // log!("Finished a thread"); *busy = false } Some(message) => { // log!("Sending a new config to render"); worker.post_message(&JsValue::from_serde(message).unwrap())?; *queued = None } } Ok(()) } pub fn debug_render(&mut self) -> Result<(), JsValue> { let brightness = shared::calculate::deterministic_calc(&self.config); self.image_data = make_image_data(&self.config, &brightness)?; self.ctx.put_image_data(&self.image_data, 0.0, 0.0)?; Ok(()) } pub fn clear(&mut self) { self.ctx.clear_rect( 0.0, 0.0, self.config.rendering.width as f64, self.config.rendering.height as f64, ) } pub fn reexpose(&mut self) -> Result<(), JsValue> { self.image_data = make_image_data(&self.config, &self.buffer)?; // self.ctx.put_image_data(&self.image_data, 0.0, 0.0)?; // crate::ui::use_ui(|ui| { crate::ui::draw(&self); // }); Ok(()) } pub fn send_on_change(&self) { let _res = self.on_change.call2( &JsValue::null(), &JsValue::from_serde(&self.config).unwrap(), &JsValue::from_serde(&self.ui).unwrap(), ); } pub fn async_render(&mut self, small: bool) -> Result<(), JsValue> { // log!("Async nreder folks"); match &self.last_rendered_config { Some(config) => { if *config == self.config { return Ok(()); } let mut old_config_with_new_exposure = config.clone(); old_config_with_new_exposure.rendering.exposure = self.config.rendering.exposure.clone(); old_config_with_new_exposure.rendering.coloration = self.config.rendering.coloration.clone(); // We've only changed settings that don't require recalculation if old_config_with_new_exposure == self.config { self.last_rendered_config = Some(self.config.clone()); self.send_on_change(); self.reexpose(); return Ok(()); } else { log!("Not the same") // log!("Not the same! {} vs {}", old_json, json) } } _ => (), } // log!("Render new config"); // web_sys::console::log_1(&JsValue::from_serde(&self.config).unwrap()); self.send_on_change(); self.last_rendered_config = Some(self.config.clone()); self.render_id += 1; let message = shared::messaging::Message { config: self.config.clone(), id: self.render_id, // count: if small { 10_000 } else { 500_000 }, count: 200_000, }; if self.workers.is_empty() { return self.debug_render(); } for (worker, busy, queued) in self.workers.iter_mut() { if *busy { // log!("Queueing up for a worker"); *queued = Some(message.clone()) } else { *busy = true; // log!("Sending a new config to render"); worker.post_message(&JsValue::from_serde(&message).unwrap())?; } } Ok(()) } } lazy_static! { static ref STATE: Mutex<Option<State>> = Mutex::new(None); } pub fn with_opt_state<F: FnOnce(&mut Option<State>)>(f: F) { f(&mut STATE.lock().unwrap()) } pub fn set_state(state: State) { with_opt_state(|wrapper| *wrapper = Some(state)) } pub fn
() -> bool { match STATE.lock().unwrap().as_mut() { Some(_) => true, None => false, } } pub fn with<R, F: FnOnce(&mut State) -> R>(f: F) -> R { match STATE.lock().unwrap().as_mut() { Some(mut state) => f(&mut state), None => { log!("!!! Error: tried to handle state, but no state found"); panic!("No state found, must set state first") } } } pub fn maybe_with<F: FnOnce(&mut State)>(f: F) { match STATE.lock().unwrap().as_mut() { Some(mut state) => f(&mut state), None => (), } } pub fn try_with<F: FnOnce(&mut State) -> Result<(), wasm_bindgen::prelude::JsValue>>(f: F) { with(|state| crate::utils::try_log(|| f(state))) }
has_state
identifier_name
state.rs
use std::sync::Mutex; use wasm_bindgen::prelude::*; use web_sys::CanvasRenderingContext2d; #[wasm_bindgen] extern "C" { pub type TimeoutId; #[wasm_bindgen(js_name = "setTimeout")] pub fn set_timeout_inner(cb: &JsValue, timeout: f64) -> TimeoutId; #[wasm_bindgen(js_name = "clearTimeout")] pub fn clear_timeout(id: &TimeoutId); } pub fn set_timeout<T: FnOnce() +'static>(cb: T, timeout: f64) -> TimeoutId { set_timeout_inner(&Closure::once_into_js(cb), timeout) } pub struct State { pub hide_timeout: Option<TimeoutId>, pub render_id: usize, pub last_rendered: usize, pub ctx: CanvasRenderingContext2d, pub image_data: web_sys::ImageData, pub config: shared::Config, pub history: Vec<shared::Config>, pub history_index: usize, pub last_rendered_config: Option<shared::Config>, pub buffer: Vec<u32>, pub ui: crate::ui::UiState, pub hist_canvas: Option<web_sys::HtmlCanvasElement>, pub on_change: js_sys::Function, pub workers: Vec<(web_sys::Worker, bool, Option<shared::messaging::Message>)>, } // umm I dunno if this is cheating or something // I mean bad things could happen if I accessed the ctx // from different threads // but given that wasm doesn't yet have threads, it's probably fine. unsafe impl Send for State {} impl State { pub fn new(config: shared::Config, on_change: js_sys::Function) -> Self { State { hide_timeout: None, render_id: 0, hist_canvas: None, last_rendered: 0, ctx: crate::ui::init(&config).expect("Unable to setup canvas"), image_data: web_sys::ImageData::new_with_sw( config.rendering.width as u32, config.rendering.height as u32, ) .expect("Can't make an imagedata"), buffer: vec![0_u32; config.rendering.width * config.rendering.height], workers: vec![], ui: Default::default(), history: vec![config.clone()], history_index: 0, last_rendered_config: None, on_change, config, } } } pub fn make_image_data( config: &shared::Config, bright: &[u32], ) -> Result<web_sys::ImageData, JsValue> { let colored = shared::colorize(config, bright); let mut clamped = wasm_bindgen::Clamped(colored.clone()); // let mut clamped = Clamped(state.buffer.clone()); let data = web_sys::ImageData::new_with_u8_clamped_array_and_sh( wasm_bindgen::Clamped(clamped.as_mut_slice()), config.rendering.width as u32, config.rendering.height as u32, )?; Ok(data) } impl State { pub fn reset_buffer(&mut self)
pub fn add_worker(&mut self, worker: web_sys::Worker) { self.workers.push((worker, false, None)) } pub fn invalidate_past_renders(&mut self) { self.render_id += 1; self.last_rendered = self.render_id; } pub fn undo(&mut self) -> Result<(), JsValue> { log!("Undo {} {}", self.history.len(), self.history_index); if self.history_index == 0 { if Some(&self.config)!= self.history.last() { self.history.push(self.config.clone()); } } self.history_index = (self.history_index + 1).min(self.history.len() - 1); if let Some(config) = self .history .get(self.history.len() - self.history_index - 1) { self.config = config.clone(); self.async_render(false)?; } Ok(()) } pub fn redo(&mut self) -> Result<(), JsValue> { if self.history_index == 0 { log!("nothing to redo"); return Ok(()); } log!("redo"); self.history_index = (self.history_index - 1).max(0); if let Some(config) = self .history .get(self.history.len() - self.history_index - 1) { self.config = config.clone(); self.async_render(false)?; } Ok(()) } pub fn maybe_save_history(&mut self) { log!("saving history"); // If the lastest is the same if self.history_index == 0 && self .history .last() .map_or(false, |last| *last == self.config) { return; } if self.history_index!= 0 && self .history .get(self.history.len() - self.history_index - 1) .map_or(false, |last| *last == self.config) { return; } // snip undone stuff if self.history_index!= 0 { self.history = self.history[0..self.history.len() - self.history_index].to_vec(); self.history_index = 0; } // if self.history.last().map_or(true, |last| *last!= self.config) { self.history.push(self.config.clone()); if self.history.len() > 500 { // trim to 500 len self.history = self.history[self.history.len() - 500..].to_vec(); } // } } pub fn handle_render( &mut self, worker: usize, id: usize, array: js_sys::Uint32Array, ) -> Result<(), JsValue> { if id < self.last_rendered { let (worker, busy, queued) = &mut self.workers[worker]; match queued { None => { // log!("Finished a thread"); *busy = false } Some(message) => { // log!("Sending a new config to render"); worker.post_message(&JsValue::from_serde(message).unwrap())?; *queued = None } } // this is old data, disregard return Ok(()); } if id > self.last_rendered { self.reset_buffer(); self.last_rendered = id; } let mut bright = vec![0_u32; self.config.rendering.width * self.config.rendering.height]; array.copy_to(&mut bright); for i in 0..bright.len() { self.buffer[i] += bright[i]; } self.image_data = make_image_data(&self.config, &self.buffer)?; // crate::ui::use_ui(|ui| { // crate::ui::draw(ui, &self) // }); // self.ctx.put_image_data(&self.image_data, 0.0, 0.0)?; let (worker, busy, queued) = &mut self.workers[worker]; match queued { None => { // log!("Finished a thread"); *busy = false } Some(message) => { // log!("Sending a new config to render"); worker.post_message(&JsValue::from_serde(message).unwrap())?; *queued = None } } Ok(()) } pub fn debug_render(&mut self) -> Result<(), JsValue> { let brightness = shared::calculate::deterministic_calc(&self.config); self.image_data = make_image_data(&self.config, &brightness)?; self.ctx.put_image_data(&self.image_data, 0.0, 0.0)?; Ok(()) } pub fn clear(&mut self) { self.ctx.clear_rect( 0.0, 0.0, self.config.rendering.width as f64, self.config.rendering.height as f64, ) } pub fn reexpose(&mut self) -> Result<(), JsValue> { self.image_data = make_image_data(&self.config, &self.buffer)?; // self.ctx.put_image_data(&self.image_data, 0.0, 0.0)?; // crate::ui::use_ui(|ui| { crate::ui::draw(&self); // }); Ok(()) } pub fn send_on_change(&self) { let _res = self.on_change.call2( &JsValue::null(), &JsValue::from_serde(&self.config).unwrap(), &JsValue::from_serde(&self.ui).unwrap(), ); } pub fn async_render(&mut self, small: bool) -> Result<(), JsValue> { // log!("Async nreder folks"); match &self.last_rendered_config { Some(config) => { if *config == self.config { return Ok(()); } let mut old_config_with_new_exposure = config.clone(); old_config_with_new_exposure.rendering.exposure = self.config.rendering.exposure.clone(); old_config_with_new_exposure.rendering.coloration = self.config.rendering.coloration.clone(); // We've only changed settings that don't require recalculation if old_config_with_new_exposure == self.config { self.last_rendered_config = Some(self.config.clone()); self.send_on_change(); self.reexpose(); return Ok(()); } else { log!("Not the same") // log!("Not the same! {} vs {}", old_json, json) } } _ => (), } // log!("Render new config"); // web_sys::console::log_1(&JsValue::from_serde(&self.config).unwrap()); self.send_on_change(); self.last_rendered_config = Some(self.config.clone()); self.render_id += 1; let message = shared::messaging::Message { config: self.config.clone(), id: self.render_id, // count: if small { 10_000 } else { 500_000 }, count: 200_000, }; if self.workers.is_empty() { return self.debug_render(); } for (worker, busy, queued) in self.workers.iter_mut() { if *busy { // log!("Queueing up for a worker"); *queued = Some(message.clone()) } else { *busy = true; // log!("Sending a new config to render"); worker.post_message(&JsValue::from_serde(&message).unwrap())?; } } Ok(()) } } lazy_static! { static ref STATE: Mutex<Option<State>> = Mutex::new(None); } pub fn with_opt_state<F: FnOnce(&mut Option<State>)>(f: F) { f(&mut STATE.lock().unwrap()) } pub fn set_state(state: State) { with_opt_state(|wrapper| *wrapper = Some(state)) } pub fn has_state() -> bool { match STATE.lock().unwrap().as_mut() { Some(_) => true, None => false, } } pub fn with<R, F: FnOnce(&mut State) -> R>(f: F) -> R { match STATE.lock().unwrap().as_mut() { Some(mut state) => f(&mut state), None => { log!("!!! Error: tried to handle state, but no state found"); panic!("No state found, must set state first") } } } pub fn maybe_with<F: FnOnce(&mut State)>(f: F) { match STATE.lock().unwrap().as_mut() { Some(mut state) => f(&mut state), None => (), } } pub fn try_with<F: FnOnce(&mut State) -> Result<(), wasm_bindgen::prelude::JsValue>>(f: F) { with(|state| crate::utils::try_log(|| f(state))) }
{ self.buffer = vec![0_u32; self.config.rendering.width * self.config.rendering.height]; self.invalidate_past_renders(); }
identifier_body
state.rs
use std::sync::Mutex; use wasm_bindgen::prelude::*; use web_sys::CanvasRenderingContext2d; #[wasm_bindgen] extern "C" { pub type TimeoutId; #[wasm_bindgen(js_name = "setTimeout")] pub fn set_timeout_inner(cb: &JsValue, timeout: f64) -> TimeoutId; #[wasm_bindgen(js_name = "clearTimeout")] pub fn clear_timeout(id: &TimeoutId); } pub fn set_timeout<T: FnOnce() +'static>(cb: T, timeout: f64) -> TimeoutId { set_timeout_inner(&Closure::once_into_js(cb), timeout) } pub struct State { pub hide_timeout: Option<TimeoutId>, pub render_id: usize, pub last_rendered: usize, pub ctx: CanvasRenderingContext2d, pub image_data: web_sys::ImageData, pub config: shared::Config, pub history: Vec<shared::Config>, pub history_index: usize, pub last_rendered_config: Option<shared::Config>, pub buffer: Vec<u32>, pub ui: crate::ui::UiState, pub hist_canvas: Option<web_sys::HtmlCanvasElement>, pub on_change: js_sys::Function, pub workers: Vec<(web_sys::Worker, bool, Option<shared::messaging::Message>)>, } // umm I dunno if this is cheating or something // I mean bad things could happen if I accessed the ctx // from different threads // but given that wasm doesn't yet have threads, it's probably fine. unsafe impl Send for State {} impl State { pub fn new(config: shared::Config, on_change: js_sys::Function) -> Self { State { hide_timeout: None, render_id: 0, hist_canvas: None, last_rendered: 0, ctx: crate::ui::init(&config).expect("Unable to setup canvas"), image_data: web_sys::ImageData::new_with_sw( config.rendering.width as u32, config.rendering.height as u32, ) .expect("Can't make an imagedata"), buffer: vec![0_u32; config.rendering.width * config.rendering.height], workers: vec![], ui: Default::default(), history: vec![config.clone()], history_index: 0, last_rendered_config: None, on_change, config, } } } pub fn make_image_data( config: &shared::Config, bright: &[u32], ) -> Result<web_sys::ImageData, JsValue> { let colored = shared::colorize(config, bright); let mut clamped = wasm_bindgen::Clamped(colored.clone()); // let mut clamped = Clamped(state.buffer.clone()); let data = web_sys::ImageData::new_with_u8_clamped_array_and_sh( wasm_bindgen::Clamped(clamped.as_mut_slice()), config.rendering.width as u32, config.rendering.height as u32, )?; Ok(data) } impl State { pub fn reset_buffer(&mut self) { self.buffer = vec![0_u32; self.config.rendering.width * self.config.rendering.height]; self.invalidate_past_renders(); } pub fn add_worker(&mut self, worker: web_sys::Worker) { self.workers.push((worker, false, None)) } pub fn invalidate_past_renders(&mut self) { self.render_id += 1; self.last_rendered = self.render_id; } pub fn undo(&mut self) -> Result<(), JsValue> { log!("Undo {} {}", self.history.len(), self.history_index); if self.history_index == 0 { if Some(&self.config)!= self.history.last() { self.history.push(self.config.clone()); } } self.history_index = (self.history_index + 1).min(self.history.len() - 1); if let Some(config) = self .history .get(self.history.len() - self.history_index - 1) { self.config = config.clone(); self.async_render(false)?; } Ok(()) } pub fn redo(&mut self) -> Result<(), JsValue> { if self.history_index == 0 { log!("nothing to redo"); return Ok(()); } log!("redo"); self.history_index = (self.history_index - 1).max(0); if let Some(config) = self .history .get(self.history.len() - self.history_index - 1) { self.config = config.clone(); self.async_render(false)?; } Ok(()) } pub fn maybe_save_history(&mut self) { log!("saving history"); // If the lastest is the same if self.history_index == 0 && self .history .last() .map_or(false, |last| *last == self.config) { return; } if self.history_index!= 0 && self .history .get(self.history.len() - self.history_index - 1) .map_or(false, |last| *last == self.config) { return; } // snip undone stuff if self.history_index!= 0 { self.history = self.history[0..self.history.len() - self.history_index].to_vec(); self.history_index = 0; } // if self.history.last().map_or(true, |last| *last!= self.config) { self.history.push(self.config.clone()); if self.history.len() > 500 { // trim to 500 len self.history = self.history[self.history.len() - 500..].to_vec(); }
pub fn handle_render( &mut self, worker: usize, id: usize, array: js_sys::Uint32Array, ) -> Result<(), JsValue> { if id < self.last_rendered { let (worker, busy, queued) = &mut self.workers[worker]; match queued { None => { // log!("Finished a thread"); *busy = false } Some(message) => { // log!("Sending a new config to render"); worker.post_message(&JsValue::from_serde(message).unwrap())?; *queued = None } } // this is old data, disregard return Ok(()); } if id > self.last_rendered { self.reset_buffer(); self.last_rendered = id; } let mut bright = vec![0_u32; self.config.rendering.width * self.config.rendering.height]; array.copy_to(&mut bright); for i in 0..bright.len() { self.buffer[i] += bright[i]; } self.image_data = make_image_data(&self.config, &self.buffer)?; // crate::ui::use_ui(|ui| { // crate::ui::draw(ui, &self) // }); // self.ctx.put_image_data(&self.image_data, 0.0, 0.0)?; let (worker, busy, queued) = &mut self.workers[worker]; match queued { None => { // log!("Finished a thread"); *busy = false } Some(message) => { // log!("Sending a new config to render"); worker.post_message(&JsValue::from_serde(message).unwrap())?; *queued = None } } Ok(()) } pub fn debug_render(&mut self) -> Result<(), JsValue> { let brightness = shared::calculate::deterministic_calc(&self.config); self.image_data = make_image_data(&self.config, &brightness)?; self.ctx.put_image_data(&self.image_data, 0.0, 0.0)?; Ok(()) } pub fn clear(&mut self) { self.ctx.clear_rect( 0.0, 0.0, self.config.rendering.width as f64, self.config.rendering.height as f64, ) } pub fn reexpose(&mut self) -> Result<(), JsValue> { self.image_data = make_image_data(&self.config, &self.buffer)?; // self.ctx.put_image_data(&self.image_data, 0.0, 0.0)?; // crate::ui::use_ui(|ui| { crate::ui::draw(&self); // }); Ok(()) } pub fn send_on_change(&self) { let _res = self.on_change.call2( &JsValue::null(), &JsValue::from_serde(&self.config).unwrap(), &JsValue::from_serde(&self.ui).unwrap(), ); } pub fn async_render(&mut self, small: bool) -> Result<(), JsValue> { // log!("Async nreder folks"); match &self.last_rendered_config { Some(config) => { if *config == self.config { return Ok(()); } let mut old_config_with_new_exposure = config.clone(); old_config_with_new_exposure.rendering.exposure = self.config.rendering.exposure.clone(); old_config_with_new_exposure.rendering.coloration = self.config.rendering.coloration.clone(); // We've only changed settings that don't require recalculation if old_config_with_new_exposure == self.config { self.last_rendered_config = Some(self.config.clone()); self.send_on_change(); self.reexpose(); return Ok(()); } else { log!("Not the same") // log!("Not the same! {} vs {}", old_json, json) } } _ => (), } // log!("Render new config"); // web_sys::console::log_1(&JsValue::from_serde(&self.config).unwrap()); self.send_on_change(); self.last_rendered_config = Some(self.config.clone()); self.render_id += 1; let message = shared::messaging::Message { config: self.config.clone(), id: self.render_id, // count: if small { 10_000 } else { 500_000 }, count: 200_000, }; if self.workers.is_empty() { return self.debug_render(); } for (worker, busy, queued) in self.workers.iter_mut() { if *busy { // log!("Queueing up for a worker"); *queued = Some(message.clone()) } else { *busy = true; // log!("Sending a new config to render"); worker.post_message(&JsValue::from_serde(&message).unwrap())?; } } Ok(()) } } lazy_static! { static ref STATE: Mutex<Option<State>> = Mutex::new(None); } pub fn with_opt_state<F: FnOnce(&mut Option<State>)>(f: F) { f(&mut STATE.lock().unwrap()) } pub fn set_state(state: State) { with_opt_state(|wrapper| *wrapper = Some(state)) } pub fn has_state() -> bool { match STATE.lock().unwrap().as_mut() { Some(_) => true, None => false, } } pub fn with<R, F: FnOnce(&mut State) -> R>(f: F) -> R { match STATE.lock().unwrap().as_mut() { Some(mut state) => f(&mut state), None => { log!("!!! Error: tried to handle state, but no state found"); panic!("No state found, must set state first") } } } pub fn maybe_with<F: FnOnce(&mut State)>(f: F) { match STATE.lock().unwrap().as_mut() { Some(mut state) => f(&mut state), None => (), } } pub fn try_with<F: FnOnce(&mut State) -> Result<(), wasm_bindgen::prelude::JsValue>>(f: F) { with(|state| crate::utils::try_log(|| f(state))) }
// } }
random_line_split
lib.rs
#![cfg_attr(not(feature = "std"), no_std)] //! Kalman filter and Rauch-Tung-Striebel smoothing implementation //! //! Characteristics: //! - Uses the [nalgebra](https://nalgebra.org) crate for math. //! - Supports `no_std` to facilitate running on embedded microcontrollers. //! - Includes [various methods of computing the covariance matrix on the update //! step](enum.CoverianceUpdateMethod.html). //! - [Examples](https://github.com/strawlab/adskalman-rs/tree/main/examples) //! included. //! - Strong typing used to ensure correct matrix dimensions at compile time. //! //! Throughout the library, the generic type `SS` means "state size" and `OS` is //! "observation size". These refer to the number of dimensions of the state //! vector and observation vector, respectively. // Ideas for improvement: // - See http://mocha-java.uccs.edu/ECE5550/, especially // "5.1: Maintaining symmetry of covariance matrices". // - See http://www.anuncommonlab.com/articles/how-kalman-filters-work/part2.html // - See https://stats.stackexchange.com/questions/67262/non-overlapping-state-and-measurement-covariances-in-kalman-filter/292690 // - https://en.wikipedia.org/wiki/Kalman_filter#Square_root_form #[cfg(debug_assertions)] use approx::assert_relative_eq; #[cfg(feature = "std")] use log::trace; use na::{OMatrix, OVector}; use nalgebra as na; use nalgebra::base::dimension::DimMin; use na::allocator::Allocator; use na::{DefaultAllocator, DimName, RealField}; use num_traits::identities::One; // Without std, create a dummy trace!() macro. #[cfg(not(feature = "std"))] macro_rules! trace { ($e:expr) => {{}}; ($e:expr, $($es:expr),+) => {{}}; } /// perform a runtime check that matrix is symmetric /// /// only compiled in debug mode macro_rules! debug_assert_symmetric { ($mat:expr) => { #[cfg(debug_assertions)] { assert_relative_eq!($mat, &$mat.transpose(), max_relative = na::convert(1e-5)); } }; } /// convert an nalgebra array to a String #[cfg(feature = "std")] macro_rules! pretty_print { ($arr:expr) => {{ let indent = 4; let prefix = String::from_utf8(vec![b' '; indent]).unwrap(); let mut result_els = vec!["".to_string()]; for i in 0..$arr.nrows() { let mut row_els = vec![]; for j in 0..$arr.ncols() { row_els.push(format!("{:12.3}", $arr[(i, j)])); } let row_str = row_els.into_iter().collect::<Vec<_>>().join(" "); let row_str = format!("{}{}", prefix, row_str); result_els.push(row_str); } result_els.into_iter().collect::<Vec<_>>().join("\n") }}; } mod error; pub use error::{Error, ErrorKind}; mod state_and_covariance; pub use state_and_covariance::StateAndCovariance; /// A linear model of process dynamics with no control inputs pub trait TransitionModelLinearNoControl<R, SS> where R: RealField, SS: DimName, DefaultAllocator: Allocator<R, SS, SS>, DefaultAllocator: Allocator<R, SS>, { /// Get the state transition model. fn transition_model(&self) -> &OMatrix<R, SS, SS>; /// Get the transpose of the state transition model. fn transition_model_transpose(&self) -> &OMatrix<R, SS, SS>; /// Get the transition noise covariance. fn transition_noise_covariance(&self) -> &OMatrix<R, SS, SS>; /// Predict new state from old state. fn predict(&self, previous_estimate: &StateAndCovariance<R, SS>) -> StateAndCovariance<R, SS> { let state = self.transition_model() * previous_estimate.state(); let covariance = ((self.transition_model() * previous_estimate.covariance()) * self.transition_model_transpose()) + self.transition_noise_covariance(); StateAndCovariance::new(state, covariance) } } /// A linear observation model /// /// Note, to use a non-linear observation model, the non-linear model must /// be linearized (using the prior state estimate) and use this linearization /// as the basis for a `ObservationModelLinear` implementation. pub trait ObservationModelLinear<R, SS, OS> where R: RealField, SS: DimName, OS: DimName + DimMin<OS, Output = OS>, DefaultAllocator: Allocator<R, SS, SS>, DefaultAllocator: Allocator<R, SS>, DefaultAllocator: Allocator<R, OS, SS>, DefaultAllocator: Allocator<R, SS, OS>, DefaultAllocator: Allocator<R, OS, OS>, DefaultAllocator: Allocator<R, OS>, DefaultAllocator: Allocator<(usize, usize), OS>, { /// For a given state, predict the observation. /// /// If an observation is not possible, this returns NaN values. (This /// happens, for example, when a non-linear observation model implements /// this trait and must be evaluated for a state for which no observation is /// possible.) Observations with NaN values are treated as missing /// observations. fn evaluate(&self, state: &OVector<R, SS>) -> OVector<R, OS>; /// Get the observation model fn observation_matrix(&self) -> &OMatrix<R, OS, SS>; /// Get the transpose of the observation model. fn observation_matrix_transpose(&self) -> &OMatrix<R, SS, OS>; /// Get the observation noise covariance. // TODO: ensure this is positive definite? fn observation_noise_covariance(&self) -> &OMatrix<R, OS, OS>; /// Given a prior state and an observation, compute a posterior state estimate. fn
( &self, prior: &StateAndCovariance<R, SS>, observation: &OVector<R, OS>, covariance_method: CoverianceUpdateMethod, ) -> Result<StateAndCovariance<R, SS>, Error> { // Use conventional (e.g. wikipedia) names for these variables let h = self.observation_matrix(); trace!("h {}", pretty_print!(h)); let p = prior.covariance(); trace!("p {}", pretty_print!(p)); debug_assert_symmetric!(p); let ht = self.observation_matrix_transpose(); trace!("ht {}", pretty_print!(ht)); let r = self.observation_noise_covariance(); trace!("r {}", pretty_print!(r)); // Calculate innovation covariance // // Math note: if (h*p*ht) and r are positive definite, s is also // positive definite. If p is positive definite, then (h*p*ht) is at // least positive semi-definite. If h is full rank, it is positive // definite. let s = (h * p * ht) + r; trace!("s {}", pretty_print!(s)); // Calculate kalman gain by inverting. let s_chol = match na::linalg::Cholesky::new(s) { Some(v) => v, None => { // Maybe state covariance is not symmetric or // for from positive definite? Also, observation // noise should be positive definite. return Err(ErrorKind::CovarianceNotPositiveSemiDefinite.into()); } }; let s_inv: OMatrix<R, OS, OS> = s_chol.inverse(); trace!("s_inv {}", pretty_print!(s_inv)); let k_gain: OMatrix<R, SS, OS> = p * ht * s_inv; // let k_gain: OMatrix<R,SS,OS> = solve!( (p*ht), s ); trace!("k_gain {}", pretty_print!(k_gain)); let predicted: OVector<R, OS> = self.evaluate(prior.state()); trace!("predicted {}", pretty_print!(predicted)); trace!("observation {}", pretty_print!(observation)); let innovation: OVector<R, OS> = observation - predicted; trace!("innovation {}", pretty_print!(innovation)); let state: OVector<R, SS> = prior.state() + &k_gain * innovation; trace!("state {}", pretty_print!(state)); trace!( "self.observation_matrix() {}", pretty_print!(self.observation_matrix()) ); let kh: OMatrix<R, SS, SS> = &k_gain * self.observation_matrix(); trace!("kh {}", pretty_print!(kh)); let one_minus_kh = OMatrix::<R, SS, SS>::one() - kh; trace!("one_minus_kh {}", pretty_print!(one_minus_kh)); let covariance: OMatrix<R, SS, SS> = match covariance_method { CoverianceUpdateMethod::JosephForm => { // Joseph form of covariance update keeps covariance matrix symmetric. let left = &one_minus_kh * prior.covariance() * &one_minus_kh.transpose(); let right = &k_gain * r * &k_gain.transpose(); left + right } CoverianceUpdateMethod::OptimalKalman => one_minus_kh * prior.covariance(), CoverianceUpdateMethod::OptimalKalmanForcedSymmetric => { let covariance1 = one_minus_kh * prior.covariance(); trace!("covariance1 {}", pretty_print!(covariance1)); // Hack to force covariance to be symmetric. // See https://math.stackexchange.com/q/2335831 let half: R = na::convert(0.5); (&covariance1 + &covariance1.transpose()) * half } }; trace!("covariance {}", pretty_print!(covariance)); debug_assert_symmetric!(covariance); Ok(StateAndCovariance::new(state, covariance)) } } /// Specifies the approach used for updating the covariance matrix #[derive(Debug, PartialEq, Clone, Copy)] pub enum CoverianceUpdateMethod { /// Assumes optimal Kalman gain. /// /// Due to numerical errors, covariance matrix may not remain symmetric. OptimalKalman, /// Assumes optimal Kalman gain and then forces symmetric covariance matrix. /// /// With original covariance matrix P, returns covariance as (P + P.T)/2 /// to enforce that the covariance matrix remains symmetric. OptimalKalmanForcedSymmetric, /// Joseph form of covariance update keeps covariance matrix symmetric. JosephForm, } /// A Kalman filter with no control inputs, a linear process model and linear observation model pub struct KalmanFilterNoControl<'a, R, SS, OS> where R: RealField, SS: DimName, OS: DimName, { transition_model: &'a dyn TransitionModelLinearNoControl<R, SS>, observation_matrix: &'a dyn ObservationModelLinear<R, SS, OS>, } impl<'a, R, SS, OS> KalmanFilterNoControl<'a, R, SS, OS> where R: RealField, SS: DimName, OS: DimName + DimMin<OS, Output = OS>, DefaultAllocator: Allocator<R, SS, SS>, DefaultAllocator: Allocator<R, SS>, DefaultAllocator: Allocator<R, OS, SS>, DefaultAllocator: Allocator<R, SS, OS>, DefaultAllocator: Allocator<R, OS, OS>, DefaultAllocator: Allocator<R, OS>, DefaultAllocator: Allocator<(usize, usize), OS>, { /// Initialize a new `KalmanFilterNoControl` struct. /// /// The first parameter, `transition_model`, specifies the state transition /// model, including the function `F` and the process covariance `Q`. The /// second parameter, `observation_matrix`, specifies the observation model, /// including the measurement function `H` and the measurement covariance /// `R`. pub fn new( transition_model: &'a dyn TransitionModelLinearNoControl<R, SS>, observation_matrix: &'a dyn ObservationModelLinear<R, SS, OS>, ) -> Self { Self { transition_model, observation_matrix, } } /// Perform Kalman prediction and update steps with default values /// /// If any component of the observation is NaN (not a number), the /// observation will not be used but rather the prior will be returned as /// the posterior without performing the update step. /// /// This calls the prediction step of the transition model and then, if /// there is a (non-`nan`) observation, calls the update step of the /// observation model using the /// `CoverianceUpdateMethod::OptimalKalmanForcedSymmetric` covariance update /// method. /// /// This is a convenience method that calls /// [step_with_options](struct.KalmanFilterNoControl.html#method.step_with_options). pub fn step( &self, previous_estimate: &StateAndCovariance<R, SS>, observation: &OVector<R, OS>, ) -> Result<StateAndCovariance<R, SS>, Error> { self.step_with_options( previous_estimate, observation, CoverianceUpdateMethod::OptimalKalmanForcedSymmetric, ) } /// Perform Kalman prediction and update steps with default values /// /// If any component of the observation is NaN (not a number), the /// observation will not be used but rather the prior will be returned as /// the posterior without performing the update step. /// /// This calls the prediction step of the transition model and then, if /// there is a (non-`nan`) observation, calls the update step of the /// observation model using the specified covariance update method. pub fn step_with_options( &self, previous_estimate: &StateAndCovariance<R, SS>, observation: &OVector<R, OS>, covariance_update_method: CoverianceUpdateMethod, ) -> Result<StateAndCovariance<R, SS>, Error> { let prior = self.transition_model.predict(previous_estimate); if observation.iter().any(|x| is_nan(*x)) { Ok(prior) } else { self.observation_matrix .update(&prior, observation, covariance_update_method) } } /// Kalman filter (operates on in-place data without allocating) /// /// Operates on entire time series (by repeatedly calling /// [`step`](struct.KalmanFilterNoControl.html#method.step) for each /// observation) and returns a vector of state estimates. To be /// mathematically correct, the interval between observations must be the /// `dt` specified in the motion model. /// /// If any observation has a NaN component, it is treated as missing. pub fn filter_inplace( &self, initial_estimate: &StateAndCovariance<R, SS>, observations: &[OVector<R, OS>], state_estimates: &mut [StateAndCovariance<R, SS>], ) -> Result<(), Error> { let mut previous_estimate = initial_estimate.clone(); assert!(state_estimates.len() >= observations.len()); for (this_observation, state_estimate) in observations.iter().zip(state_estimates.iter_mut()) { let this_estimate = self.step(&previous_estimate, this_observation)?; *state_estimate = this_estimate.clone(); previous_estimate = this_estimate; } Ok(()) } /// Kalman filter /// /// This is a convenience function that calls [`filter_inplace`](struct.KalmanFilterNoControl.html#method.filter_inplace). #[cfg(feature = "std")] pub fn filter( &self, initial_estimate: &StateAndCovariance<R, SS>, observations: &[OVector<R, OS>], ) -> Result<Vec<StateAndCovariance<R, SS>>, Error> { let mut state_estimates = Vec::with_capacity(observations.len()); let empty = StateAndCovariance::new(na::zero(), na::OMatrix::<R, SS, SS>::identity()); for _ in 0..observations.len() { state_estimates.push(empty.clone()); } self.filter_inplace(initial_estimate, observations, &mut state_estimates)?; Ok(state_estimates) } /// Rauch-Tung-Striebel (RTS) smoother /// /// Operates on entire time series (by calling /// [`filter`](struct.KalmanFilterNoControl.html#method.filter) then /// [`smooth_from_filtered`](struct.KalmanFilterNoControl.html#method.smooth_from_filtered)) /// and returns a vector of state estimates. To be mathematically correct, /// the interval between observations must be the `dt` specified in the /// motion model. /// Operates on entire time series in one shot and returns a vector of state /// estimates. To be mathematically correct, the interval between /// observations must be the `dt` specified in the motion model. /// /// If any observation has a NaN component, it is treated as missing. #[cfg(feature = "std")] pub fn smooth( &self, initial_estimate: &StateAndCovariance<R, SS>, observations: &[OVector<R, OS>], ) -> Result<Vec<StateAndCovariance<R, SS>>, Error> { let forward_results = self.filter(initial_estimate, observations)?; self.smooth_from_filtered(forward_results) } /// Rauch-Tung-Striebel (RTS) smoother using already Kalman filtered estimates /// /// Operates on entire time series in one shot and returns a vector of state /// estimates. To be mathematically correct, the interval between /// observations must be the `dt` specified in the motion model. #[cfg(feature = "std")] pub fn smooth_from_filtered( &self, mut forward_results: Vec<StateAndCovariance<R, SS>>, ) -> Result<Vec<StateAndCovariance<R, SS>>, Error> { forward_results.reverse(); let mut smoothed_backwards = Vec::with_capacity(forward_results.len()); let mut smooth_future = forward_results[0].clone(); smoothed_backwards.push(smooth_future.clone()); for filt in forward_results.iter().skip(1) { smooth_future = self.smooth_step(&smooth_future, filt)?; smoothed_backwards.push(smooth_future.clone()); } smoothed_backwards.reverse(); Ok(smoothed_backwards) } #[cfg(feature = "std")] fn smooth_step( &self, smooth_future: &StateAndCovariance<R, SS>, filt: &StateAndCovariance<R, SS>, ) -> Result<StateAndCovariance<R, SS>, Error> { let prior = self.transition_model.predict(filt); let v_chol = match na::linalg::Cholesky::new(prior.covariance().clone()) { Some(v) => v, None => { return Err(ErrorKind::CovarianceNotPositiveSemiDefinite.into()); } }; let inv_prior_covariance: OMatrix<R, SS, SS> = v_chol.inverse(); trace!( "inv_prior_covariance {}", pretty_print!(inv_prior_covariance) ); // J = dot(Vfilt, dot(A.T, inv(Vpred))) # smoother gain matrix let j = filt.covariance() * (self.transition_model.transition_model_transpose() * inv_prior_covariance); // xsmooth = xfilt + dot(J, xsmooth_future - xpred) let residuals = smooth_future.state() - prior.state(); let state = filt.state() + &j * residuals; // Vsmooth = Vfilt + dot(J, dot(Vsmooth_future - Vpred, J.T)) let covar_residuals = smooth_future.covariance() - prior.covariance(); let covariance = filt.covariance() + &j * (covar_residuals * j.transpose()); Ok(StateAndCovariance::new(state, covariance)) } } #[inline] fn is_nan<R: RealField>(x: R) -> bool { x.partial_cmp(&R::zero()).is_none() } #[test] fn test_is_nan() { assert_eq!(is_nan::<f64>(-1.0), false); assert_eq!(is_nan::<f64>(0.0), false); assert_eq!(is_nan::<f64>(1.0), false); assert_eq!(is_nan::<f64>(1.0 / 0.0), false); assert_eq!(is_nan::<f64>(-1.0 / 0.0), false); assert_eq!(is_nan::<f64>(std::f64::NAN), true); assert_eq!(is_nan::<f32>(-1.0), false); assert_eq!(is_nan::<f32>(0.0), false); assert_eq!(is_nan::<f32>(1.0), false); assert_eq!(is_nan::<f32>(1.0 / 0.0), false); assert_eq!(is_nan::<f32>(-1.0 / 0.0), false); assert_eq!(is_nan::<f32>(std::f32::NAN), true); }
update
identifier_name
lib.rs
#![cfg_attr(not(feature = "std"), no_std)] //! Kalman filter and Rauch-Tung-Striebel smoothing implementation //! //! Characteristics: //! - Uses the [nalgebra](https://nalgebra.org) crate for math. //! - Supports `no_std` to facilitate running on embedded microcontrollers. //! - Includes [various methods of computing the covariance matrix on the update //! step](enum.CoverianceUpdateMethod.html). //! - [Examples](https://github.com/strawlab/adskalman-rs/tree/main/examples) //! included. //! - Strong typing used to ensure correct matrix dimensions at compile time. //! //! Throughout the library, the generic type `SS` means "state size" and `OS` is //! "observation size". These refer to the number of dimensions of the state //! vector and observation vector, respectively. // Ideas for improvement: // - See http://mocha-java.uccs.edu/ECE5550/, especially // "5.1: Maintaining symmetry of covariance matrices". // - See http://www.anuncommonlab.com/articles/how-kalman-filters-work/part2.html // - See https://stats.stackexchange.com/questions/67262/non-overlapping-state-and-measurement-covariances-in-kalman-filter/292690 // - https://en.wikipedia.org/wiki/Kalman_filter#Square_root_form #[cfg(debug_assertions)] use approx::assert_relative_eq; #[cfg(feature = "std")] use log::trace; use na::{OMatrix, OVector}; use nalgebra as na; use nalgebra::base::dimension::DimMin; use na::allocator::Allocator; use na::{DefaultAllocator, DimName, RealField}; use num_traits::identities::One; // Without std, create a dummy trace!() macro. #[cfg(not(feature = "std"))] macro_rules! trace { ($e:expr) => {{}}; ($e:expr, $($es:expr),+) => {{}}; } /// perform a runtime check that matrix is symmetric /// /// only compiled in debug mode macro_rules! debug_assert_symmetric { ($mat:expr) => { #[cfg(debug_assertions)] { assert_relative_eq!($mat, &$mat.transpose(), max_relative = na::convert(1e-5)); } }; } /// convert an nalgebra array to a String #[cfg(feature = "std")] macro_rules! pretty_print { ($arr:expr) => {{ let indent = 4; let prefix = String::from_utf8(vec![b' '; indent]).unwrap(); let mut result_els = vec!["".to_string()]; for i in 0..$arr.nrows() { let mut row_els = vec![]; for j in 0..$arr.ncols() { row_els.push(format!("{:12.3}", $arr[(i, j)])); } let row_str = row_els.into_iter().collect::<Vec<_>>().join(" "); let row_str = format!("{}{}", prefix, row_str); result_els.push(row_str); } result_els.into_iter().collect::<Vec<_>>().join("\n") }}; } mod error; pub use error::{Error, ErrorKind}; mod state_and_covariance; pub use state_and_covariance::StateAndCovariance; /// A linear model of process dynamics with no control inputs pub trait TransitionModelLinearNoControl<R, SS> where R: RealField, SS: DimName, DefaultAllocator: Allocator<R, SS, SS>, DefaultAllocator: Allocator<R, SS>, { /// Get the state transition model. fn transition_model(&self) -> &OMatrix<R, SS, SS>; /// Get the transpose of the state transition model. fn transition_model_transpose(&self) -> &OMatrix<R, SS, SS>; /// Get the transition noise covariance. fn transition_noise_covariance(&self) -> &OMatrix<R, SS, SS>; /// Predict new state from old state. fn predict(&self, previous_estimate: &StateAndCovariance<R, SS>) -> StateAndCovariance<R, SS> { let state = self.transition_model() * previous_estimate.state(); let covariance = ((self.transition_model() * previous_estimate.covariance()) * self.transition_model_transpose()) + self.transition_noise_covariance(); StateAndCovariance::new(state, covariance) } } /// A linear observation model /// /// Note, to use a non-linear observation model, the non-linear model must /// be linearized (using the prior state estimate) and use this linearization /// as the basis for a `ObservationModelLinear` implementation. pub trait ObservationModelLinear<R, SS, OS> where R: RealField, SS: DimName, OS: DimName + DimMin<OS, Output = OS>, DefaultAllocator: Allocator<R, SS, SS>, DefaultAllocator: Allocator<R, SS>, DefaultAllocator: Allocator<R, OS, SS>, DefaultAllocator: Allocator<R, SS, OS>, DefaultAllocator: Allocator<R, OS, OS>, DefaultAllocator: Allocator<R, OS>, DefaultAllocator: Allocator<(usize, usize), OS>, { /// For a given state, predict the observation. /// /// If an observation is not possible, this returns NaN values. (This /// happens, for example, when a non-linear observation model implements /// this trait and must be evaluated for a state for which no observation is /// possible.) Observations with NaN values are treated as missing /// observations. fn evaluate(&self, state: &OVector<R, SS>) -> OVector<R, OS>; /// Get the observation model fn observation_matrix(&self) -> &OMatrix<R, OS, SS>; /// Get the transpose of the observation model. fn observation_matrix_transpose(&self) -> &OMatrix<R, SS, OS>; /// Get the observation noise covariance. // TODO: ensure this is positive definite? fn observation_noise_covariance(&self) -> &OMatrix<R, OS, OS>; /// Given a prior state and an observation, compute a posterior state estimate. fn update( &self, prior: &StateAndCovariance<R, SS>, observation: &OVector<R, OS>, covariance_method: CoverianceUpdateMethod, ) -> Result<StateAndCovariance<R, SS>, Error> { // Use conventional (e.g. wikipedia) names for these variables let h = self.observation_matrix(); trace!("h {}", pretty_print!(h)); let p = prior.covariance(); trace!("p {}", pretty_print!(p)); debug_assert_symmetric!(p); let ht = self.observation_matrix_transpose(); trace!("ht {}", pretty_print!(ht)); let r = self.observation_noise_covariance(); trace!("r {}", pretty_print!(r)); // Calculate innovation covariance // // Math note: if (h*p*ht) and r are positive definite, s is also // positive definite. If p is positive definite, then (h*p*ht) is at // least positive semi-definite. If h is full rank, it is positive // definite. let s = (h * p * ht) + r; trace!("s {}", pretty_print!(s)); // Calculate kalman gain by inverting. let s_chol = match na::linalg::Cholesky::new(s) { Some(v) => v, None => { // Maybe state covariance is not symmetric or // for from positive definite? Also, observation // noise should be positive definite. return Err(ErrorKind::CovarianceNotPositiveSemiDefinite.into()); } }; let s_inv: OMatrix<R, OS, OS> = s_chol.inverse(); trace!("s_inv {}", pretty_print!(s_inv)); let k_gain: OMatrix<R, SS, OS> = p * ht * s_inv; // let k_gain: OMatrix<R,SS,OS> = solve!( (p*ht), s ); trace!("k_gain {}", pretty_print!(k_gain)); let predicted: OVector<R, OS> = self.evaluate(prior.state()); trace!("predicted {}", pretty_print!(predicted)); trace!("observation {}", pretty_print!(observation)); let innovation: OVector<R, OS> = observation - predicted; trace!("innovation {}", pretty_print!(innovation)); let state: OVector<R, SS> = prior.state() + &k_gain * innovation; trace!("state {}", pretty_print!(state)); trace!( "self.observation_matrix() {}", pretty_print!(self.observation_matrix()) ); let kh: OMatrix<R, SS, SS> = &k_gain * self.observation_matrix(); trace!("kh {}", pretty_print!(kh)); let one_minus_kh = OMatrix::<R, SS, SS>::one() - kh; trace!("one_minus_kh {}", pretty_print!(one_minus_kh)); let covariance: OMatrix<R, SS, SS> = match covariance_method { CoverianceUpdateMethod::JosephForm => { // Joseph form of covariance update keeps covariance matrix symmetric. let left = &one_minus_kh * prior.covariance() * &one_minus_kh.transpose(); let right = &k_gain * r * &k_gain.transpose(); left + right } CoverianceUpdateMethod::OptimalKalman => one_minus_kh * prior.covariance(), CoverianceUpdateMethod::OptimalKalmanForcedSymmetric => { let covariance1 = one_minus_kh * prior.covariance(); trace!("covariance1 {}", pretty_print!(covariance1)); // Hack to force covariance to be symmetric. // See https://math.stackexchange.com/q/2335831 let half: R = na::convert(0.5); (&covariance1 + &covariance1.transpose()) * half } }; trace!("covariance {}", pretty_print!(covariance)); debug_assert_symmetric!(covariance); Ok(StateAndCovariance::new(state, covariance)) } } /// Specifies the approach used for updating the covariance matrix #[derive(Debug, PartialEq, Clone, Copy)] pub enum CoverianceUpdateMethod { /// Assumes optimal Kalman gain. /// /// Due to numerical errors, covariance matrix may not remain symmetric. OptimalKalman, /// Assumes optimal Kalman gain and then forces symmetric covariance matrix. /// /// With original covariance matrix P, returns covariance as (P + P.T)/2 /// to enforce that the covariance matrix remains symmetric. OptimalKalmanForcedSymmetric, /// Joseph form of covariance update keeps covariance matrix symmetric. JosephForm, } /// A Kalman filter with no control inputs, a linear process model and linear observation model pub struct KalmanFilterNoControl<'a, R, SS, OS> where R: RealField, SS: DimName, OS: DimName, { transition_model: &'a dyn TransitionModelLinearNoControl<R, SS>, observation_matrix: &'a dyn ObservationModelLinear<R, SS, OS>, } impl<'a, R, SS, OS> KalmanFilterNoControl<'a, R, SS, OS> where R: RealField, SS: DimName, OS: DimName + DimMin<OS, Output = OS>, DefaultAllocator: Allocator<R, SS, SS>, DefaultAllocator: Allocator<R, SS>, DefaultAllocator: Allocator<R, OS, SS>, DefaultAllocator: Allocator<R, SS, OS>, DefaultAllocator: Allocator<R, OS, OS>, DefaultAllocator: Allocator<R, OS>, DefaultAllocator: Allocator<(usize, usize), OS>, { /// Initialize a new `KalmanFilterNoControl` struct. /// /// The first parameter, `transition_model`, specifies the state transition /// model, including the function `F` and the process covariance `Q`. The /// second parameter, `observation_matrix`, specifies the observation model, /// including the measurement function `H` and the measurement covariance /// `R`. pub fn new( transition_model: &'a dyn TransitionModelLinearNoControl<R, SS>, observation_matrix: &'a dyn ObservationModelLinear<R, SS, OS>, ) -> Self { Self { transition_model, observation_matrix, } } /// Perform Kalman prediction and update steps with default values /// /// If any component of the observation is NaN (not a number), the /// observation will not be used but rather the prior will be returned as /// the posterior without performing the update step. /// /// This calls the prediction step of the transition model and then, if /// there is a (non-`nan`) observation, calls the update step of the /// observation model using the /// `CoverianceUpdateMethod::OptimalKalmanForcedSymmetric` covariance update /// method. /// /// This is a convenience method that calls /// [step_with_options](struct.KalmanFilterNoControl.html#method.step_with_options). pub fn step( &self, previous_estimate: &StateAndCovariance<R, SS>, observation: &OVector<R, OS>, ) -> Result<StateAndCovariance<R, SS>, Error> { self.step_with_options( previous_estimate, observation, CoverianceUpdateMethod::OptimalKalmanForcedSymmetric, ) } /// Perform Kalman prediction and update steps with default values /// /// If any component of the observation is NaN (not a number), the /// observation will not be used but rather the prior will be returned as /// the posterior without performing the update step. /// /// This calls the prediction step of the transition model and then, if /// there is a (non-`nan`) observation, calls the update step of the /// observation model using the specified covariance update method. pub fn step_with_options( &self, previous_estimate: &StateAndCovariance<R, SS>, observation: &OVector<R, OS>, covariance_update_method: CoverianceUpdateMethod, ) -> Result<StateAndCovariance<R, SS>, Error> { let prior = self.transition_model.predict(previous_estimate); if observation.iter().any(|x| is_nan(*x)) { Ok(prior) } else { self.observation_matrix .update(&prior, observation, covariance_update_method) } } /// Kalman filter (operates on in-place data without allocating) /// /// Operates on entire time series (by repeatedly calling /// [`step`](struct.KalmanFilterNoControl.html#method.step) for each /// observation) and returns a vector of state estimates. To be /// mathematically correct, the interval between observations must be the /// `dt` specified in the motion model. /// /// If any observation has a NaN component, it is treated as missing. pub fn filter_inplace( &self, initial_estimate: &StateAndCovariance<R, SS>, observations: &[OVector<R, OS>], state_estimates: &mut [StateAndCovariance<R, SS>], ) -> Result<(), Error> { let mut previous_estimate = initial_estimate.clone(); assert!(state_estimates.len() >= observations.len()); for (this_observation, state_estimate) in observations.iter().zip(state_estimates.iter_mut()) { let this_estimate = self.step(&previous_estimate, this_observation)?; *state_estimate = this_estimate.clone(); previous_estimate = this_estimate; } Ok(()) } /// Kalman filter /// /// This is a convenience function that calls [`filter_inplace`](struct.KalmanFilterNoControl.html#method.filter_inplace). #[cfg(feature = "std")] pub fn filter( &self, initial_estimate: &StateAndCovariance<R, SS>, observations: &[OVector<R, OS>], ) -> Result<Vec<StateAndCovariance<R, SS>>, Error> { let mut state_estimates = Vec::with_capacity(observations.len()); let empty = StateAndCovariance::new(na::zero(), na::OMatrix::<R, SS, SS>::identity()); for _ in 0..observations.len() { state_estimates.push(empty.clone()); } self.filter_inplace(initial_estimate, observations, &mut state_estimates)?; Ok(state_estimates) } /// Rauch-Tung-Striebel (RTS) smoother /// /// Operates on entire time series (by calling /// [`filter`](struct.KalmanFilterNoControl.html#method.filter) then /// [`smooth_from_filtered`](struct.KalmanFilterNoControl.html#method.smooth_from_filtered)) /// and returns a vector of state estimates. To be mathematically correct, /// the interval between observations must be the `dt` specified in the /// motion model. /// Operates on entire time series in one shot and returns a vector of state /// estimates. To be mathematically correct, the interval between /// observations must be the `dt` specified in the motion model. /// /// If any observation has a NaN component, it is treated as missing. #[cfg(feature = "std")] pub fn smooth( &self, initial_estimate: &StateAndCovariance<R, SS>, observations: &[OVector<R, OS>], ) -> Result<Vec<StateAndCovariance<R, SS>>, Error> { let forward_results = self.filter(initial_estimate, observations)?; self.smooth_from_filtered(forward_results) } /// Rauch-Tung-Striebel (RTS) smoother using already Kalman filtered estimates /// /// Operates on entire time series in one shot and returns a vector of state /// estimates. To be mathematically correct, the interval between /// observations must be the `dt` specified in the motion model. #[cfg(feature = "std")] pub fn smooth_from_filtered( &self, mut forward_results: Vec<StateAndCovariance<R, SS>>, ) -> Result<Vec<StateAndCovariance<R, SS>>, Error> { forward_results.reverse(); let mut smoothed_backwards = Vec::with_capacity(forward_results.len()); let mut smooth_future = forward_results[0].clone(); smoothed_backwards.push(smooth_future.clone()); for filt in forward_results.iter().skip(1) { smooth_future = self.smooth_step(&smooth_future, filt)?; smoothed_backwards.push(smooth_future.clone()); } smoothed_backwards.reverse(); Ok(smoothed_backwards) } #[cfg(feature = "std")] fn smooth_step( &self, smooth_future: &StateAndCovariance<R, SS>, filt: &StateAndCovariance<R, SS>, ) -> Result<StateAndCovariance<R, SS>, Error> { let prior = self.transition_model.predict(filt); let v_chol = match na::linalg::Cholesky::new(prior.covariance().clone()) { Some(v) => v, None => { return Err(ErrorKind::CovarianceNotPositiveSemiDefinite.into()); } }; let inv_prior_covariance: OMatrix<R, SS, SS> = v_chol.inverse(); trace!( "inv_prior_covariance {}", pretty_print!(inv_prior_covariance) ); // J = dot(Vfilt, dot(A.T, inv(Vpred))) # smoother gain matrix let j = filt.covariance() * (self.transition_model.transition_model_transpose() * inv_prior_covariance); // xsmooth = xfilt + dot(J, xsmooth_future - xpred) let residuals = smooth_future.state() - prior.state(); let state = filt.state() + &j * residuals; // Vsmooth = Vfilt + dot(J, dot(Vsmooth_future - Vpred, J.T)) let covar_residuals = smooth_future.covariance() - prior.covariance(); let covariance = filt.covariance() + &j * (covar_residuals * j.transpose());
#[inline] fn is_nan<R: RealField>(x: R) -> bool { x.partial_cmp(&R::zero()).is_none() } #[test] fn test_is_nan() { assert_eq!(is_nan::<f64>(-1.0), false); assert_eq!(is_nan::<f64>(0.0), false); assert_eq!(is_nan::<f64>(1.0), false); assert_eq!(is_nan::<f64>(1.0 / 0.0), false); assert_eq!(is_nan::<f64>(-1.0 / 0.0), false); assert_eq!(is_nan::<f64>(std::f64::NAN), true); assert_eq!(is_nan::<f32>(-1.0), false); assert_eq!(is_nan::<f32>(0.0), false); assert_eq!(is_nan::<f32>(1.0), false); assert_eq!(is_nan::<f32>(1.0 / 0.0), false); assert_eq!(is_nan::<f32>(-1.0 / 0.0), false); assert_eq!(is_nan::<f32>(std::f32::NAN), true); }
Ok(StateAndCovariance::new(state, covariance)) } }
random_line_split
wallpaper.rs
use std::ffi::{OsStr, OsString}; use std::iter::once; use std::os::windows::ffi::{OsStrExt, OsStringExt}; use std::ptr::null_mut; use std::process::Command; use serde::{Serialize, Deserialize}; use winapi::shared::windef::{HWND, RECT, PPOINT, POINT}; use winapi::shared::minwindef::LPARAM; use winapi::um::errhandlingapi::{GetLastError, SetLastError}; use winapi::um::winuser::{ GetClassNameW, IsChild, ShowWindow, GetWindowRect, MapWindowPoints, MonitorFromPoint, GetMonitorInfoW, SetWindowPos, SW_SHOW, MONITOR_DEFAULTTONEAREST, MONITORINFO }; fn find_window_by_class(class: &str) -> HWND { use winapi::um::winuser::FindWindowW; unsafe { FindWindowW(to_wide(class).as_ptr(), null_mut()) } } fn to_wide(s: &str) -> Vec<u16> { OsStr::new(s).encode_wide().chain(once(0)).collect() } pub fn get_window_name(hwnd: HWND) -> String { use winapi::um::winuser::{GetWindowTextLengthW, GetWindowTextW}; if hwnd.is_null() { panic!("Invalid HWND"); } let text = unsafe { let text_length = GetWindowTextLengthW(hwnd); let mut text: Vec<u16> = vec![0; text_length as usize + 1]; GetWindowTextW(hwnd, text.as_mut_ptr(), text_length + 1); OsString::from_wide(&text[..text.iter().position(|&c| c == 0).unwrap()]) }; text.into_string().expect("Failed to convert string to UTF-8") } /** * Spawn a wallpaper window if it doesn't already exists and return handle to it. * * `progman` - a valid handle to the `Progman`. * * This function is unsafe, because user is responsible for providing valid progman handle. */ unsafe fn find_or_spawn_worker(progman: HWND) -> HWND { use winapi::um::winuser::{SendMessageW, EnumWindows}; extern "system" fn find_worker(hwnd: HWND, data: LPARAM) -> i32 { use winapi::um::winuser::FindWindowExW; let data = data as *mut UserData; unsafe { if FindWindowExW(hwnd, null_mut(), (*data).shell_class.as_ptr(), null_mut()).is_null() { return 1; } let worker = FindWindowExW(null_mut(), hwnd, (*data).worker_class.as_ptr(), null_mut()); if worker.is_null() { return 1; } (*data).worker = worker; (*data).parent = hwnd; } return 0; } struct UserData { shell_class: Vec<u16>, worker_class: Vec<u16>, worker: HWND, parent: HWND, } let mut user_data = UserData { shell_class: to_wide("SHELLDLL_DefView"), worker_class: to_wide("WorkerW"), worker: null_mut(), parent: null_mut(), }; SetLastError(0); EnumWindows(Some(find_worker), &mut user_data as *mut UserData as LPARAM); if GetLastError()!= 0 { panic!("EnumWindows failed, GetLastError says: '{}'", GetLastError()); } if user_data.worker.is_null() { // this is basically all the magic. it's an undocumented window message that // forces windows to spawn a window with class "WorkerW" behind deskicons SendMessageW(progman, 0x052C, 0xD, 0); SendMessageW(progman, 0x052C, 0xD, 1); SetLastError(0); EnumWindows(Some(find_worker), &mut user_data as *mut UserData as LPARAM); if GetLastError()!= 0 { panic!("EnumWindows failed, GetLastError says: '{}'", GetLastError()); } if user_data.worker.is_null() { eprintln!("W: couldn't spawn WorkerW window, trying old method"); SendMessageW(progman, 0x052C, 0, 0); SetLastError(0); EnumWindows(Some(find_worker), &mut user_data as *mut UserData as LPARAM); if GetLastError()!= 0 { panic!("EnumWindows failed, GetLastError says: '{}'", GetLastError()); } } } user_data.worker } unsafe fn get_window_style(hwnd: HWND) -> (i32, i32) { use winapi::um::winuser::{GetWindowLongW, GWL_STYLE, GWL_EXSTYLE}; SetLastError(0); let style = GetWindowLongW(hwnd, GWL_STYLE); let ex_style = GetWindowLongW(hwnd, GWL_EXSTYLE); if (style == 0 || ex_style == 0) && GetLastError()!= 0 { panic!("GetWindowLongW failed, GetLastError says: '{}'", GetLastError()); } (style, ex_style) } unsafe fn update_window_styles(wnd: HWND, and: i32, ex_and: i32, or: i32, ex_or: i32) -> bool { use winapi::um::winuser::{SetWindowLongW, GWL_STYLE, GWL_EXSTYLE}; let (mut style, mut ex_style) = get_window_style(wnd); style &= and; ex_style &= ex_and; style |= or; ex_style |= ex_or; SetLastError(0); let style = SetWindowLongW(wnd, GWL_STYLE, style); let ex_style = SetWindowLongW(wnd, GWL_EXSTYLE, ex_style); if (style == 0 || ex_style == 0) && GetLastError()!= 0 { panic!("SetWindowLongW failed, GetLastError says: '{}'", GetLastError()); } return true; } unsafe fn get_window_rect(wnd: HWND) -> Option<RECT> { let rect: RECT = Default::default(); let failed = GetWindowRect(wnd, &rect as *const RECT as *mut RECT) == 0; if failed { eprintln!("GetWindowRect failed, GetLastError says: '{}'", GetLastError()); return None; } return Some(rect); } unsafe fn map_window_rect(wallpaper: HWND, wnd: HWND) -> Option<RECT> { if let Some(rect) = get_window_rect(wnd) { MapWindowPoints(null_mut(), wallpaper, &rect as *const RECT as PPOINT, 2); return Some(rect); } return None; } unsafe fn move_window(wnd: HWND, rect: RECT) -> bool { let success = SetWindowPos( wnd, null_mut(), rect.left, rect.top, rect.right - rect.left, rect.bottom - rect.top, 0 ); if success == 0 { eprintln!("SetWindowPos failed, GetLastError says: '{}'", GetLastError()); return false; } return true; } unsafe fn add_window_as_wallpaper(wallpaper: HWND, wnd: HWND) -> bool { use winapi::um::winuser::{ SetParent, WS_CHILD, WS_CAPTION, WS_THICKFRAME, WS_SYSMENU, WS_MAXIMIZEBOX, WS_MINIMIZEBOX, WS_EX_DLGMODALFRAME, WS_EX_COMPOSITED, WS_EX_WINDOWEDGE, WS_EX_CLIENTEDGE, WS_EX_LAYERED, WS_EX_STATICEDGE, WS_EX_TOOLWINDOW, WS_EX_APPWINDOW, }; let wnd_class = { let wnd_class: &mut [u16] = &mut [0; 512]; GetClassNameW(wnd, wnd_class.as_mut_ptr(), wnd_class.len() as i32 - 1); OsString::from_wide(&wnd_class[..wnd_class.iter().position(|&c| c == 0).unwrap()]) }; if wallpaper == wnd || wnd_class == "Shell_TrayWnd" { eprintln!("can't add this window"); return false; } let is_child = IsChild(wallpaper, wnd)!= 0; if is_child { eprintln!("already added"); return false; } /* * styles blacklist taken from https://github.com/Codeusa/Borderless-Gaming/ * blob/2fef4ccc121412f215cd7f185c4351fd634cab8b/BorderlessGaming.Logic/ * Windows/Manipulation.cs#L70 */ /* TODO: somehow save old styles so we can restore them */ let and: i32 =!( WS_CAPTION | WS_THICKFRAME | WS_SYSMENU | WS_MAXIMIZEBOX | WS_MINIMIZEBOX ) as i32; let ex_and: i32 =!( WS_EX_DLGMODALFRAME | WS_EX_COMPOSITED | WS_EX_WINDOWEDGE | WS_EX_CLIENTEDGE | WS_EX_LAYERED | WS_EX_STATICEDGE | WS_EX_TOOLWINDOW | WS_EX_APPWINDOW ) as i32; if!update_window_styles(wnd, and, ex_and, WS_CHILD as i32, 0) { return false; } /* window retains screen coordinates so we need to adjust them */ map_window_rect(wallpaper, wnd).unwrap(); let prev_parent = SetParent(wnd, wallpaper); if prev_parent.is_null() { panic!("SetParent failed, GetLastError says: '{}'", GetLastError()); } ShowWindow(wnd, SW_SHOW); return true; } unsafe fn remove_window_from_wallpaper(wallpaper: HWND, wnd: HWND) -> bool { use winapi::um::winuser::{ SetParent, GetDesktopWindow, InvalidateRect, WS_EX_APPWINDOW, WS_OVERLAPPEDWINDOW, SWP_FRAMECHANGED, SWP_NOMOVE, SWP_NOSIZE, SWP_NOZORDER, SWP_NOOWNERZORDER }; if SetParent(wnd, GetDesktopWindow()).is_null() { eprintln!("SetParent failed, GetLastError says: '{}'", GetLastError()); return false; } let or = WS_OVERLAPPEDWINDOW as i32; let ex_or = WS_EX_APPWINDOW as i32; if!update_window_styles(wnd, -1, -1, or, ex_or) { return false; } SetWindowPos( wnd, null_mut(), 0, 0, 0, 0, SWP_FRAMECHANGED | SWP_NOMOVE | SWP_NOSIZE | SWP_NOZORDER | SWP_NOOWNERZORDER ); InvalidateRect(wallpaper, null_mut(), 1); // wp_id(); /* can sometimes fix leftover unrefreshed portions */ true } unsafe fn set_fullscreen(wallpaper: HWND, wnd: HWND) -> bool { if let Some(current_rect) = get_window_rect(wnd) { let monitor = MonitorFromPoint(POINT {x: current_rect.left, y: current_rect.top}, MONITOR_DEFAULTTONEAREST); if monitor.is_null() { eprintln!("MonitorFromWindow failed, GetLastError says: '{}'", GetLastError()); return false; } let mut mi: MONITORINFO = Default::default(); mi.cbSize = std::mem::size_of::<MONITORINFO>() as u32; let success = GetMonitorInfoW(monitor, &mi as *const MONITORINFO as *mut MONITORINFO); if success == 0 { eprintln!("GetMonitorInfoW failed, GetLastError says: '{}'", GetLastError()); return false; } MapWindowPoints(null_mut(), wallpaper, &mi.rcMonitor as *const RECT as PPOINT, 2); move_window(wnd, mi.rcMonitor); return true; } return false; } unsafe fn list_immediate_children(parent: HWND) -> Vec<HWND> { use winapi::um::winuser::EnumChildWindows; #[repr(C)] struct WindowState { parent: HWND, handles: Vec<HWND>, } let mut s = WindowState { parent, handles: Vec::new() }; extern "system" fn enum_windows(wnd: HWND, lp: LPARAM) -> i32 { use winapi::um::winuser::{GetAncestor, GA_PARENT}; let s: *mut WindowState = lp as *mut WindowState; unsafe { if GetAncestor(wnd, GA_PARENT) == (*s).parent { (*s).handles.push(wnd); } } return 1; } SetLastError(0); EnumChildWindows(parent, Some(enum_windows), &mut s as *mut WindowState as LPARAM); if GetLastError()!= 0 { panic!("EnumChildWindows failed, GetLastError says: {}", GetLastError()); } s.handles.sort_unstable(); return s.handles; } unsafe fn find_window_by_pid(pid: u32) -> HWND { use winapi::um::winuser::{EnumWindows, GetWindowThreadProcessId}; use winapi::shared::minwindef::{DWORD, LPDWORD}; #[repr(C)] #[derive(Debug)] struct Data { handle: HWND, pid: u32, } extern "system" fn enum_windows(wnd: HWND, data: LPARAM) -> i32 { let mut data = data as *mut Data; unsafe { let mut this_pid: DWORD = 0; GetWindowThreadProcessId(wnd, &mut this_pid as LPDWORD); if this_pid == (*data).pid { (*data).handle = wnd; return 0; } } return 1; } let mut data = Data {handle: null_mut(), pid}; SetLastError(0); EnumWindows(Some(enum_windows), &mut data as *mut Data as LPARAM); if GetLastError()!= 0 { panic!("EnumWindows failed, GetLastError says: {}", GetLastError()); } data.handle } pub fn list_windows() -> Vec<HWND> { use winapi::um::winuser::{ EnumWindows, IsWindowVisible, GetLastActivePopup, GetAncestor, GetWindowTextLengthW, GA_ROOTOWNER, WS_EX_NOREDIRECTIONBITMAP, WS_EX_TOOLWINDOW }; // https://stackoverflow.com/questions/210504/enumerate-windows-like-alt-tab-does unsafe fn should_list(hwnd: HWND) -> bool { // Start at the root owner let mut hwnd_walk = GetAncestor(hwnd, GA_ROOTOWNER); // See if we are the last active visible popup let mut hwnd_try = null_mut(); loop { let hwnd_try_next = GetLastActivePopup(hwnd_walk); if hwnd_try_next == hwnd_try || IsWindowVisible(hwnd_try_next) == 1 { break; } hwnd_try = hwnd_try_next; hwnd_walk = hwnd_try; } return hwnd_walk == hwnd; } extern "system" fn list_windows_callback(hwnd: HWND, lp: LPARAM) -> i32 { let data = lp as *mut Vec<HWND>; unsafe { if IsWindowVisible(hwnd) == 1 && GetWindowTextLengthW(hwnd) > 0 && should_list(hwnd) { let (_, ex_style) = get_window_style(hwnd); if (ex_style as u32 & WS_EX_NOREDIRECTIONBITMAP) == 0 && (ex_style as u32 & WS_EX_TOOLWINDOW) == 0 { (*data).push(hwnd); } } } 1 } let mut data: Vec<HWND> = Vec::new(); unsafe { SetLastError(0); EnumWindows(Some(list_windows_callback), &mut data as *mut Vec<HWND> as LPARAM); if GetLastError()!= 0 { panic!("EnumWindows failed, GetLastError says: '{}'", GetLastError()); } } data } #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum WindowSelector<'a> { WindowTitle(&'a str), None, } #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct WallpaperProperties { pub fullscreen: bool } #[derive(Debug)] pub enum EngineError { ProgmanNotFound, UnableToSpawnWorker, } #[derive(Debug)] pub struct Engine { progman: HWND, worker: HWND, } impl Engine { pub fn new() -> Result<Engine, EngineError> { let progman_handle = find_window_by_class("Progman"); if progman_handle.is_null() { return Err(EngineError::ProgmanNotFound); } let worker_handle = unsafe { find_or_spawn_worker(progman_handle) }; if worker_handle.is_null() { return Err(EngineError::UnableToSpawnWorker); } Ok(Engine {progman: progman_handle, worker: worker_handle}) } pub fn list_active(&self) -> Vec<HWND> { unsafe { // TODO this is not safe until we add a check for worker validity here. list_immediate_children(self.worker) } } pub fn add_window_by_handle(&self, handle: HWND, properties: WallpaperProperties) -> bool { if!unsafe { add_window_as_wallpaper(self.worker, handle) } { eprintln!("Cannot add window to wallpaper"); return false; } if properties.fullscreen &&!unsafe { set_fullscreen(self.worker, handle) } { return false } true } pub fn add_window(&self, command: Option<&mut Command>, selector: WindowSelector, properties: WallpaperProperties, wait_for: u64, attempts: u64 ) -> bool { let process_id = match command { Some(command) => command.spawn().expect("command failed to start").id(), None => { if let WindowSelector::None = selector { eprintln!("One or both of selector and command should be specified"); return false; } 0 } }; let mut handle = null_mut(); for _attempt in 1..=attempts { handle = match selector { WindowSelector::None => unsafe { find_window_by_pid(process_id) }, WindowSelector::WindowTitle(title) => { let windows = list_windows(); *windows.iter().find(|&&hwnd| get_window_name(hwnd) == title).unwrap_or(&null_mut()) }, }; if handle.is_null() { std::thread::sleep(std::time::Duration::from_millis(wait_for)); } else
} if handle.is_null() { eprintln!("Cannot find handle using selector: {:?}", selector); return false; } self.add_window_by_handle(handle, properties) } pub fn remove_wallpaper(&self, hwnd: HWND) { // TODO ensure that provided handle is actually attached to wallpaper window use winapi::um::winuser::{InvalidateRect, SendMessageW, WM_CLOSE}; unsafe { remove_window_from_wallpaper(self.worker, hwnd); std::thread::sleep(std::time::Duration::from_millis(32)); SendMessageW(hwnd, WM_CLOSE, 0, 0); std::thread::sleep(std::time::Duration::from_millis(32)); InvalidateRect(null_mut(), null_mut(), 1); } } }
{ break; }
conditional_block
wallpaper.rs
use std::ffi::{OsStr, OsString}; use std::iter::once; use std::os::windows::ffi::{OsStrExt, OsStringExt}; use std::ptr::null_mut; use std::process::Command; use serde::{Serialize, Deserialize}; use winapi::shared::windef::{HWND, RECT, PPOINT, POINT}; use winapi::shared::minwindef::LPARAM; use winapi::um::errhandlingapi::{GetLastError, SetLastError}; use winapi::um::winuser::{ GetClassNameW, IsChild, ShowWindow, GetWindowRect, MapWindowPoints, MonitorFromPoint, GetMonitorInfoW, SetWindowPos, SW_SHOW, MONITOR_DEFAULTTONEAREST, MONITORINFO }; fn find_window_by_class(class: &str) -> HWND
fn to_wide(s: &str) -> Vec<u16> { OsStr::new(s).encode_wide().chain(once(0)).collect() } pub fn get_window_name(hwnd: HWND) -> String { use winapi::um::winuser::{GetWindowTextLengthW, GetWindowTextW}; if hwnd.is_null() { panic!("Invalid HWND"); } let text = unsafe { let text_length = GetWindowTextLengthW(hwnd); let mut text: Vec<u16> = vec![0; text_length as usize + 1]; GetWindowTextW(hwnd, text.as_mut_ptr(), text_length + 1); OsString::from_wide(&text[..text.iter().position(|&c| c == 0).unwrap()]) }; text.into_string().expect("Failed to convert string to UTF-8") } /** * Spawn a wallpaper window if it doesn't already exists and return handle to it. * * `progman` - a valid handle to the `Progman`. * * This function is unsafe, because user is responsible for providing valid progman handle. */ unsafe fn find_or_spawn_worker(progman: HWND) -> HWND { use winapi::um::winuser::{SendMessageW, EnumWindows}; extern "system" fn find_worker(hwnd: HWND, data: LPARAM) -> i32 { use winapi::um::winuser::FindWindowExW; let data = data as *mut UserData; unsafe { if FindWindowExW(hwnd, null_mut(), (*data).shell_class.as_ptr(), null_mut()).is_null() { return 1; } let worker = FindWindowExW(null_mut(), hwnd, (*data).worker_class.as_ptr(), null_mut()); if worker.is_null() { return 1; } (*data).worker = worker; (*data).parent = hwnd; } return 0; } struct UserData { shell_class: Vec<u16>, worker_class: Vec<u16>, worker: HWND, parent: HWND, } let mut user_data = UserData { shell_class: to_wide("SHELLDLL_DefView"), worker_class: to_wide("WorkerW"), worker: null_mut(), parent: null_mut(), }; SetLastError(0); EnumWindows(Some(find_worker), &mut user_data as *mut UserData as LPARAM); if GetLastError()!= 0 { panic!("EnumWindows failed, GetLastError says: '{}'", GetLastError()); } if user_data.worker.is_null() { // this is basically all the magic. it's an undocumented window message that // forces windows to spawn a window with class "WorkerW" behind deskicons SendMessageW(progman, 0x052C, 0xD, 0); SendMessageW(progman, 0x052C, 0xD, 1); SetLastError(0); EnumWindows(Some(find_worker), &mut user_data as *mut UserData as LPARAM); if GetLastError()!= 0 { panic!("EnumWindows failed, GetLastError says: '{}'", GetLastError()); } if user_data.worker.is_null() { eprintln!("W: couldn't spawn WorkerW window, trying old method"); SendMessageW(progman, 0x052C, 0, 0); SetLastError(0); EnumWindows(Some(find_worker), &mut user_data as *mut UserData as LPARAM); if GetLastError()!= 0 { panic!("EnumWindows failed, GetLastError says: '{}'", GetLastError()); } } } user_data.worker } unsafe fn get_window_style(hwnd: HWND) -> (i32, i32) { use winapi::um::winuser::{GetWindowLongW, GWL_STYLE, GWL_EXSTYLE}; SetLastError(0); let style = GetWindowLongW(hwnd, GWL_STYLE); let ex_style = GetWindowLongW(hwnd, GWL_EXSTYLE); if (style == 0 || ex_style == 0) && GetLastError()!= 0 { panic!("GetWindowLongW failed, GetLastError says: '{}'", GetLastError()); } (style, ex_style) } unsafe fn update_window_styles(wnd: HWND, and: i32, ex_and: i32, or: i32, ex_or: i32) -> bool { use winapi::um::winuser::{SetWindowLongW, GWL_STYLE, GWL_EXSTYLE}; let (mut style, mut ex_style) = get_window_style(wnd); style &= and; ex_style &= ex_and; style |= or; ex_style |= ex_or; SetLastError(0); let style = SetWindowLongW(wnd, GWL_STYLE, style); let ex_style = SetWindowLongW(wnd, GWL_EXSTYLE, ex_style); if (style == 0 || ex_style == 0) && GetLastError()!= 0 { panic!("SetWindowLongW failed, GetLastError says: '{}'", GetLastError()); } return true; } unsafe fn get_window_rect(wnd: HWND) -> Option<RECT> { let rect: RECT = Default::default(); let failed = GetWindowRect(wnd, &rect as *const RECT as *mut RECT) == 0; if failed { eprintln!("GetWindowRect failed, GetLastError says: '{}'", GetLastError()); return None; } return Some(rect); } unsafe fn map_window_rect(wallpaper: HWND, wnd: HWND) -> Option<RECT> { if let Some(rect) = get_window_rect(wnd) { MapWindowPoints(null_mut(), wallpaper, &rect as *const RECT as PPOINT, 2); return Some(rect); } return None; } unsafe fn move_window(wnd: HWND, rect: RECT) -> bool { let success = SetWindowPos( wnd, null_mut(), rect.left, rect.top, rect.right - rect.left, rect.bottom - rect.top, 0 ); if success == 0 { eprintln!("SetWindowPos failed, GetLastError says: '{}'", GetLastError()); return false; } return true; } unsafe fn add_window_as_wallpaper(wallpaper: HWND, wnd: HWND) -> bool { use winapi::um::winuser::{ SetParent, WS_CHILD, WS_CAPTION, WS_THICKFRAME, WS_SYSMENU, WS_MAXIMIZEBOX, WS_MINIMIZEBOX, WS_EX_DLGMODALFRAME, WS_EX_COMPOSITED, WS_EX_WINDOWEDGE, WS_EX_CLIENTEDGE, WS_EX_LAYERED, WS_EX_STATICEDGE, WS_EX_TOOLWINDOW, WS_EX_APPWINDOW, }; let wnd_class = { let wnd_class: &mut [u16] = &mut [0; 512]; GetClassNameW(wnd, wnd_class.as_mut_ptr(), wnd_class.len() as i32 - 1); OsString::from_wide(&wnd_class[..wnd_class.iter().position(|&c| c == 0).unwrap()]) }; if wallpaper == wnd || wnd_class == "Shell_TrayWnd" { eprintln!("can't add this window"); return false; } let is_child = IsChild(wallpaper, wnd)!= 0; if is_child { eprintln!("already added"); return false; } /* * styles blacklist taken from https://github.com/Codeusa/Borderless-Gaming/ * blob/2fef4ccc121412f215cd7f185c4351fd634cab8b/BorderlessGaming.Logic/ * Windows/Manipulation.cs#L70 */ /* TODO: somehow save old styles so we can restore them */ let and: i32 =!( WS_CAPTION | WS_THICKFRAME | WS_SYSMENU | WS_MAXIMIZEBOX | WS_MINIMIZEBOX ) as i32; let ex_and: i32 =!( WS_EX_DLGMODALFRAME | WS_EX_COMPOSITED | WS_EX_WINDOWEDGE | WS_EX_CLIENTEDGE | WS_EX_LAYERED | WS_EX_STATICEDGE | WS_EX_TOOLWINDOW | WS_EX_APPWINDOW ) as i32; if!update_window_styles(wnd, and, ex_and, WS_CHILD as i32, 0) { return false; } /* window retains screen coordinates so we need to adjust them */ map_window_rect(wallpaper, wnd).unwrap(); let prev_parent = SetParent(wnd, wallpaper); if prev_parent.is_null() { panic!("SetParent failed, GetLastError says: '{}'", GetLastError()); } ShowWindow(wnd, SW_SHOW); return true; } unsafe fn remove_window_from_wallpaper(wallpaper: HWND, wnd: HWND) -> bool { use winapi::um::winuser::{ SetParent, GetDesktopWindow, InvalidateRect, WS_EX_APPWINDOW, WS_OVERLAPPEDWINDOW, SWP_FRAMECHANGED, SWP_NOMOVE, SWP_NOSIZE, SWP_NOZORDER, SWP_NOOWNERZORDER }; if SetParent(wnd, GetDesktopWindow()).is_null() { eprintln!("SetParent failed, GetLastError says: '{}'", GetLastError()); return false; } let or = WS_OVERLAPPEDWINDOW as i32; let ex_or = WS_EX_APPWINDOW as i32; if!update_window_styles(wnd, -1, -1, or, ex_or) { return false; } SetWindowPos( wnd, null_mut(), 0, 0, 0, 0, SWP_FRAMECHANGED | SWP_NOMOVE | SWP_NOSIZE | SWP_NOZORDER | SWP_NOOWNERZORDER ); InvalidateRect(wallpaper, null_mut(), 1); // wp_id(); /* can sometimes fix leftover unrefreshed portions */ true } unsafe fn set_fullscreen(wallpaper: HWND, wnd: HWND) -> bool { if let Some(current_rect) = get_window_rect(wnd) { let monitor = MonitorFromPoint(POINT {x: current_rect.left, y: current_rect.top}, MONITOR_DEFAULTTONEAREST); if monitor.is_null() { eprintln!("MonitorFromWindow failed, GetLastError says: '{}'", GetLastError()); return false; } let mut mi: MONITORINFO = Default::default(); mi.cbSize = std::mem::size_of::<MONITORINFO>() as u32; let success = GetMonitorInfoW(monitor, &mi as *const MONITORINFO as *mut MONITORINFO); if success == 0 { eprintln!("GetMonitorInfoW failed, GetLastError says: '{}'", GetLastError()); return false; } MapWindowPoints(null_mut(), wallpaper, &mi.rcMonitor as *const RECT as PPOINT, 2); move_window(wnd, mi.rcMonitor); return true; } return false; } unsafe fn list_immediate_children(parent: HWND) -> Vec<HWND> { use winapi::um::winuser::EnumChildWindows; #[repr(C)] struct WindowState { parent: HWND, handles: Vec<HWND>, } let mut s = WindowState { parent, handles: Vec::new() }; extern "system" fn enum_windows(wnd: HWND, lp: LPARAM) -> i32 { use winapi::um::winuser::{GetAncestor, GA_PARENT}; let s: *mut WindowState = lp as *mut WindowState; unsafe { if GetAncestor(wnd, GA_PARENT) == (*s).parent { (*s).handles.push(wnd); } } return 1; } SetLastError(0); EnumChildWindows(parent, Some(enum_windows), &mut s as *mut WindowState as LPARAM); if GetLastError()!= 0 { panic!("EnumChildWindows failed, GetLastError says: {}", GetLastError()); } s.handles.sort_unstable(); return s.handles; } unsafe fn find_window_by_pid(pid: u32) -> HWND { use winapi::um::winuser::{EnumWindows, GetWindowThreadProcessId}; use winapi::shared::minwindef::{DWORD, LPDWORD}; #[repr(C)] #[derive(Debug)] struct Data { handle: HWND, pid: u32, } extern "system" fn enum_windows(wnd: HWND, data: LPARAM) -> i32 { let mut data = data as *mut Data; unsafe { let mut this_pid: DWORD = 0; GetWindowThreadProcessId(wnd, &mut this_pid as LPDWORD); if this_pid == (*data).pid { (*data).handle = wnd; return 0; } } return 1; } let mut data = Data {handle: null_mut(), pid}; SetLastError(0); EnumWindows(Some(enum_windows), &mut data as *mut Data as LPARAM); if GetLastError()!= 0 { panic!("EnumWindows failed, GetLastError says: {}", GetLastError()); } data.handle } pub fn list_windows() -> Vec<HWND> { use winapi::um::winuser::{ EnumWindows, IsWindowVisible, GetLastActivePopup, GetAncestor, GetWindowTextLengthW, GA_ROOTOWNER, WS_EX_NOREDIRECTIONBITMAP, WS_EX_TOOLWINDOW }; // https://stackoverflow.com/questions/210504/enumerate-windows-like-alt-tab-does unsafe fn should_list(hwnd: HWND) -> bool { // Start at the root owner let mut hwnd_walk = GetAncestor(hwnd, GA_ROOTOWNER); // See if we are the last active visible popup let mut hwnd_try = null_mut(); loop { let hwnd_try_next = GetLastActivePopup(hwnd_walk); if hwnd_try_next == hwnd_try || IsWindowVisible(hwnd_try_next) == 1 { break; } hwnd_try = hwnd_try_next; hwnd_walk = hwnd_try; } return hwnd_walk == hwnd; } extern "system" fn list_windows_callback(hwnd: HWND, lp: LPARAM) -> i32 { let data = lp as *mut Vec<HWND>; unsafe { if IsWindowVisible(hwnd) == 1 && GetWindowTextLengthW(hwnd) > 0 && should_list(hwnd) { let (_, ex_style) = get_window_style(hwnd); if (ex_style as u32 & WS_EX_NOREDIRECTIONBITMAP) == 0 && (ex_style as u32 & WS_EX_TOOLWINDOW) == 0 { (*data).push(hwnd); } } } 1 } let mut data: Vec<HWND> = Vec::new(); unsafe { SetLastError(0); EnumWindows(Some(list_windows_callback), &mut data as *mut Vec<HWND> as LPARAM); if GetLastError()!= 0 { panic!("EnumWindows failed, GetLastError says: '{}'", GetLastError()); } } data } #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum WindowSelector<'a> { WindowTitle(&'a str), None, } #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct WallpaperProperties { pub fullscreen: bool } #[derive(Debug)] pub enum EngineError { ProgmanNotFound, UnableToSpawnWorker, } #[derive(Debug)] pub struct Engine { progman: HWND, worker: HWND, } impl Engine { pub fn new() -> Result<Engine, EngineError> { let progman_handle = find_window_by_class("Progman"); if progman_handle.is_null() { return Err(EngineError::ProgmanNotFound); } let worker_handle = unsafe { find_or_spawn_worker(progman_handle) }; if worker_handle.is_null() { return Err(EngineError::UnableToSpawnWorker); } Ok(Engine {progman: progman_handle, worker: worker_handle}) } pub fn list_active(&self) -> Vec<HWND> { unsafe { // TODO this is not safe until we add a check for worker validity here. list_immediate_children(self.worker) } } pub fn add_window_by_handle(&self, handle: HWND, properties: WallpaperProperties) -> bool { if!unsafe { add_window_as_wallpaper(self.worker, handle) } { eprintln!("Cannot add window to wallpaper"); return false; } if properties.fullscreen &&!unsafe { set_fullscreen(self.worker, handle) } { return false } true } pub fn add_window(&self, command: Option<&mut Command>, selector: WindowSelector, properties: WallpaperProperties, wait_for: u64, attempts: u64 ) -> bool { let process_id = match command { Some(command) => command.spawn().expect("command failed to start").id(), None => { if let WindowSelector::None = selector { eprintln!("One or both of selector and command should be specified"); return false; } 0 } }; let mut handle = null_mut(); for _attempt in 1..=attempts { handle = match selector { WindowSelector::None => unsafe { find_window_by_pid(process_id) }, WindowSelector::WindowTitle(title) => { let windows = list_windows(); *windows.iter().find(|&&hwnd| get_window_name(hwnd) == title).unwrap_or(&null_mut()) }, }; if handle.is_null() { std::thread::sleep(std::time::Duration::from_millis(wait_for)); } else { break; } } if handle.is_null() { eprintln!("Cannot find handle using selector: {:?}", selector); return false; } self.add_window_by_handle(handle, properties) } pub fn remove_wallpaper(&self, hwnd: HWND) { // TODO ensure that provided handle is actually attached to wallpaper window use winapi::um::winuser::{InvalidateRect, SendMessageW, WM_CLOSE}; unsafe { remove_window_from_wallpaper(self.worker, hwnd); std::thread::sleep(std::time::Duration::from_millis(32)); SendMessageW(hwnd, WM_CLOSE, 0, 0); std::thread::sleep(std::time::Duration::from_millis(32)); InvalidateRect(null_mut(), null_mut(), 1); } } }
{ use winapi::um::winuser::FindWindowW; unsafe { FindWindowW(to_wide(class).as_ptr(), null_mut()) } }
identifier_body
wallpaper.rs
use std::ffi::{OsStr, OsString}; use std::iter::once; use std::os::windows::ffi::{OsStrExt, OsStringExt}; use std::ptr::null_mut; use std::process::Command; use serde::{Serialize, Deserialize}; use winapi::shared::windef::{HWND, RECT, PPOINT, POINT}; use winapi::shared::minwindef::LPARAM; use winapi::um::errhandlingapi::{GetLastError, SetLastError}; use winapi::um::winuser::{ GetClassNameW, IsChild, ShowWindow, GetWindowRect, MapWindowPoints, MonitorFromPoint, GetMonitorInfoW, SetWindowPos, SW_SHOW, MONITOR_DEFAULTTONEAREST, MONITORINFO }; fn find_window_by_class(class: &str) -> HWND { use winapi::um::winuser::FindWindowW; unsafe { FindWindowW(to_wide(class).as_ptr(), null_mut()) } } fn to_wide(s: &str) -> Vec<u16> { OsStr::new(s).encode_wide().chain(once(0)).collect() } pub fn get_window_name(hwnd: HWND) -> String { use winapi::um::winuser::{GetWindowTextLengthW, GetWindowTextW}; if hwnd.is_null() { panic!("Invalid HWND"); } let text = unsafe { let text_length = GetWindowTextLengthW(hwnd); let mut text: Vec<u16> = vec![0; text_length as usize + 1]; GetWindowTextW(hwnd, text.as_mut_ptr(), text_length + 1); OsString::from_wide(&text[..text.iter().position(|&c| c == 0).unwrap()]) }; text.into_string().expect("Failed to convert string to UTF-8") } /** * Spawn a wallpaper window if it doesn't already exists and return handle to it. * * `progman` - a valid handle to the `Progman`. * * This function is unsafe, because user is responsible for providing valid progman handle. */ unsafe fn find_or_spawn_worker(progman: HWND) -> HWND { use winapi::um::winuser::{SendMessageW, EnumWindows}; extern "system" fn find_worker(hwnd: HWND, data: LPARAM) -> i32 { use winapi::um::winuser::FindWindowExW; let data = data as *mut UserData; unsafe { if FindWindowExW(hwnd, null_mut(), (*data).shell_class.as_ptr(), null_mut()).is_null() { return 1; } let worker = FindWindowExW(null_mut(), hwnd, (*data).worker_class.as_ptr(), null_mut()); if worker.is_null() { return 1; } (*data).worker = worker; (*data).parent = hwnd; } return 0; } struct UserData { shell_class: Vec<u16>, worker_class: Vec<u16>, worker: HWND, parent: HWND, } let mut user_data = UserData { shell_class: to_wide("SHELLDLL_DefView"), worker_class: to_wide("WorkerW"), worker: null_mut(), parent: null_mut(), }; SetLastError(0); EnumWindows(Some(find_worker), &mut user_data as *mut UserData as LPARAM); if GetLastError()!= 0 { panic!("EnumWindows failed, GetLastError says: '{}'", GetLastError()); } if user_data.worker.is_null() { // this is basically all the magic. it's an undocumented window message that // forces windows to spawn a window with class "WorkerW" behind deskicons SendMessageW(progman, 0x052C, 0xD, 0); SendMessageW(progman, 0x052C, 0xD, 1); SetLastError(0); EnumWindows(Some(find_worker), &mut user_data as *mut UserData as LPARAM); if GetLastError()!= 0 { panic!("EnumWindows failed, GetLastError says: '{}'", GetLastError()); } if user_data.worker.is_null() { eprintln!("W: couldn't spawn WorkerW window, trying old method"); SendMessageW(progman, 0x052C, 0, 0); SetLastError(0); EnumWindows(Some(find_worker), &mut user_data as *mut UserData as LPARAM); if GetLastError()!= 0 { panic!("EnumWindows failed, GetLastError says: '{}'", GetLastError()); } } } user_data.worker } unsafe fn get_window_style(hwnd: HWND) -> (i32, i32) { use winapi::um::winuser::{GetWindowLongW, GWL_STYLE, GWL_EXSTYLE}; SetLastError(0); let style = GetWindowLongW(hwnd, GWL_STYLE); let ex_style = GetWindowLongW(hwnd, GWL_EXSTYLE); if (style == 0 || ex_style == 0) && GetLastError()!= 0 { panic!("GetWindowLongW failed, GetLastError says: '{}'", GetLastError()); } (style, ex_style) } unsafe fn update_window_styles(wnd: HWND, and: i32, ex_and: i32, or: i32, ex_or: i32) -> bool { use winapi::um::winuser::{SetWindowLongW, GWL_STYLE, GWL_EXSTYLE}; let (mut style, mut ex_style) = get_window_style(wnd); style &= and; ex_style &= ex_and; style |= or; ex_style |= ex_or; SetLastError(0); let style = SetWindowLongW(wnd, GWL_STYLE, style); let ex_style = SetWindowLongW(wnd, GWL_EXSTYLE, ex_style); if (style == 0 || ex_style == 0) && GetLastError()!= 0 { panic!("SetWindowLongW failed, GetLastError says: '{}'", GetLastError()); } return true; } unsafe fn get_window_rect(wnd: HWND) -> Option<RECT> { let rect: RECT = Default::default(); let failed = GetWindowRect(wnd, &rect as *const RECT as *mut RECT) == 0; if failed { eprintln!("GetWindowRect failed, GetLastError says: '{}'", GetLastError()); return None; } return Some(rect); } unsafe fn map_window_rect(wallpaper: HWND, wnd: HWND) -> Option<RECT> { if let Some(rect) = get_window_rect(wnd) { MapWindowPoints(null_mut(), wallpaper, &rect as *const RECT as PPOINT, 2); return Some(rect); } return None; } unsafe fn move_window(wnd: HWND, rect: RECT) -> bool { let success = SetWindowPos( wnd, null_mut(), rect.left, rect.top, rect.right - rect.left, rect.bottom - rect.top, 0 ); if success == 0 { eprintln!("SetWindowPos failed, GetLastError says: '{}'", GetLastError()); return false; } return true; } unsafe fn add_window_as_wallpaper(wallpaper: HWND, wnd: HWND) -> bool { use winapi::um::winuser::{ SetParent, WS_CHILD, WS_CAPTION, WS_THICKFRAME, WS_SYSMENU, WS_MAXIMIZEBOX, WS_MINIMIZEBOX,
let wnd_class = { let wnd_class: &mut [u16] = &mut [0; 512]; GetClassNameW(wnd, wnd_class.as_mut_ptr(), wnd_class.len() as i32 - 1); OsString::from_wide(&wnd_class[..wnd_class.iter().position(|&c| c == 0).unwrap()]) }; if wallpaper == wnd || wnd_class == "Shell_TrayWnd" { eprintln!("can't add this window"); return false; } let is_child = IsChild(wallpaper, wnd)!= 0; if is_child { eprintln!("already added"); return false; } /* * styles blacklist taken from https://github.com/Codeusa/Borderless-Gaming/ * blob/2fef4ccc121412f215cd7f185c4351fd634cab8b/BorderlessGaming.Logic/ * Windows/Manipulation.cs#L70 */ /* TODO: somehow save old styles so we can restore them */ let and: i32 =!( WS_CAPTION | WS_THICKFRAME | WS_SYSMENU | WS_MAXIMIZEBOX | WS_MINIMIZEBOX ) as i32; let ex_and: i32 =!( WS_EX_DLGMODALFRAME | WS_EX_COMPOSITED | WS_EX_WINDOWEDGE | WS_EX_CLIENTEDGE | WS_EX_LAYERED | WS_EX_STATICEDGE | WS_EX_TOOLWINDOW | WS_EX_APPWINDOW ) as i32; if!update_window_styles(wnd, and, ex_and, WS_CHILD as i32, 0) { return false; } /* window retains screen coordinates so we need to adjust them */ map_window_rect(wallpaper, wnd).unwrap(); let prev_parent = SetParent(wnd, wallpaper); if prev_parent.is_null() { panic!("SetParent failed, GetLastError says: '{}'", GetLastError()); } ShowWindow(wnd, SW_SHOW); return true; } unsafe fn remove_window_from_wallpaper(wallpaper: HWND, wnd: HWND) -> bool { use winapi::um::winuser::{ SetParent, GetDesktopWindow, InvalidateRect, WS_EX_APPWINDOW, WS_OVERLAPPEDWINDOW, SWP_FRAMECHANGED, SWP_NOMOVE, SWP_NOSIZE, SWP_NOZORDER, SWP_NOOWNERZORDER }; if SetParent(wnd, GetDesktopWindow()).is_null() { eprintln!("SetParent failed, GetLastError says: '{}'", GetLastError()); return false; } let or = WS_OVERLAPPEDWINDOW as i32; let ex_or = WS_EX_APPWINDOW as i32; if!update_window_styles(wnd, -1, -1, or, ex_or) { return false; } SetWindowPos( wnd, null_mut(), 0, 0, 0, 0, SWP_FRAMECHANGED | SWP_NOMOVE | SWP_NOSIZE | SWP_NOZORDER | SWP_NOOWNERZORDER ); InvalidateRect(wallpaper, null_mut(), 1); // wp_id(); /* can sometimes fix leftover unrefreshed portions */ true } unsafe fn set_fullscreen(wallpaper: HWND, wnd: HWND) -> bool { if let Some(current_rect) = get_window_rect(wnd) { let monitor = MonitorFromPoint(POINT {x: current_rect.left, y: current_rect.top}, MONITOR_DEFAULTTONEAREST); if monitor.is_null() { eprintln!("MonitorFromWindow failed, GetLastError says: '{}'", GetLastError()); return false; } let mut mi: MONITORINFO = Default::default(); mi.cbSize = std::mem::size_of::<MONITORINFO>() as u32; let success = GetMonitorInfoW(monitor, &mi as *const MONITORINFO as *mut MONITORINFO); if success == 0 { eprintln!("GetMonitorInfoW failed, GetLastError says: '{}'", GetLastError()); return false; } MapWindowPoints(null_mut(), wallpaper, &mi.rcMonitor as *const RECT as PPOINT, 2); move_window(wnd, mi.rcMonitor); return true; } return false; } unsafe fn list_immediate_children(parent: HWND) -> Vec<HWND> { use winapi::um::winuser::EnumChildWindows; #[repr(C)] struct WindowState { parent: HWND, handles: Vec<HWND>, } let mut s = WindowState { parent, handles: Vec::new() }; extern "system" fn enum_windows(wnd: HWND, lp: LPARAM) -> i32 { use winapi::um::winuser::{GetAncestor, GA_PARENT}; let s: *mut WindowState = lp as *mut WindowState; unsafe { if GetAncestor(wnd, GA_PARENT) == (*s).parent { (*s).handles.push(wnd); } } return 1; } SetLastError(0); EnumChildWindows(parent, Some(enum_windows), &mut s as *mut WindowState as LPARAM); if GetLastError()!= 0 { panic!("EnumChildWindows failed, GetLastError says: {}", GetLastError()); } s.handles.sort_unstable(); return s.handles; } unsafe fn find_window_by_pid(pid: u32) -> HWND { use winapi::um::winuser::{EnumWindows, GetWindowThreadProcessId}; use winapi::shared::minwindef::{DWORD, LPDWORD}; #[repr(C)] #[derive(Debug)] struct Data { handle: HWND, pid: u32, } extern "system" fn enum_windows(wnd: HWND, data: LPARAM) -> i32 { let mut data = data as *mut Data; unsafe { let mut this_pid: DWORD = 0; GetWindowThreadProcessId(wnd, &mut this_pid as LPDWORD); if this_pid == (*data).pid { (*data).handle = wnd; return 0; } } return 1; } let mut data = Data {handle: null_mut(), pid}; SetLastError(0); EnumWindows(Some(enum_windows), &mut data as *mut Data as LPARAM); if GetLastError()!= 0 { panic!("EnumWindows failed, GetLastError says: {}", GetLastError()); } data.handle } pub fn list_windows() -> Vec<HWND> { use winapi::um::winuser::{ EnumWindows, IsWindowVisible, GetLastActivePopup, GetAncestor, GetWindowTextLengthW, GA_ROOTOWNER, WS_EX_NOREDIRECTIONBITMAP, WS_EX_TOOLWINDOW }; // https://stackoverflow.com/questions/210504/enumerate-windows-like-alt-tab-does unsafe fn should_list(hwnd: HWND) -> bool { // Start at the root owner let mut hwnd_walk = GetAncestor(hwnd, GA_ROOTOWNER); // See if we are the last active visible popup let mut hwnd_try = null_mut(); loop { let hwnd_try_next = GetLastActivePopup(hwnd_walk); if hwnd_try_next == hwnd_try || IsWindowVisible(hwnd_try_next) == 1 { break; } hwnd_try = hwnd_try_next; hwnd_walk = hwnd_try; } return hwnd_walk == hwnd; } extern "system" fn list_windows_callback(hwnd: HWND, lp: LPARAM) -> i32 { let data = lp as *mut Vec<HWND>; unsafe { if IsWindowVisible(hwnd) == 1 && GetWindowTextLengthW(hwnd) > 0 && should_list(hwnd) { let (_, ex_style) = get_window_style(hwnd); if (ex_style as u32 & WS_EX_NOREDIRECTIONBITMAP) == 0 && (ex_style as u32 & WS_EX_TOOLWINDOW) == 0 { (*data).push(hwnd); } } } 1 } let mut data: Vec<HWND> = Vec::new(); unsafe { SetLastError(0); EnumWindows(Some(list_windows_callback), &mut data as *mut Vec<HWND> as LPARAM); if GetLastError()!= 0 { panic!("EnumWindows failed, GetLastError says: '{}'", GetLastError()); } } data } #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum WindowSelector<'a> { WindowTitle(&'a str), None, } #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct WallpaperProperties { pub fullscreen: bool } #[derive(Debug)] pub enum EngineError { ProgmanNotFound, UnableToSpawnWorker, } #[derive(Debug)] pub struct Engine { progman: HWND, worker: HWND, } impl Engine { pub fn new() -> Result<Engine, EngineError> { let progman_handle = find_window_by_class("Progman"); if progman_handle.is_null() { return Err(EngineError::ProgmanNotFound); } let worker_handle = unsafe { find_or_spawn_worker(progman_handle) }; if worker_handle.is_null() { return Err(EngineError::UnableToSpawnWorker); } Ok(Engine {progman: progman_handle, worker: worker_handle}) } pub fn list_active(&self) -> Vec<HWND> { unsafe { // TODO this is not safe until we add a check for worker validity here. list_immediate_children(self.worker) } } pub fn add_window_by_handle(&self, handle: HWND, properties: WallpaperProperties) -> bool { if!unsafe { add_window_as_wallpaper(self.worker, handle) } { eprintln!("Cannot add window to wallpaper"); return false; } if properties.fullscreen &&!unsafe { set_fullscreen(self.worker, handle) } { return false } true } pub fn add_window(&self, command: Option<&mut Command>, selector: WindowSelector, properties: WallpaperProperties, wait_for: u64, attempts: u64 ) -> bool { let process_id = match command { Some(command) => command.spawn().expect("command failed to start").id(), None => { if let WindowSelector::None = selector { eprintln!("One or both of selector and command should be specified"); return false; } 0 } }; let mut handle = null_mut(); for _attempt in 1..=attempts { handle = match selector { WindowSelector::None => unsafe { find_window_by_pid(process_id) }, WindowSelector::WindowTitle(title) => { let windows = list_windows(); *windows.iter().find(|&&hwnd| get_window_name(hwnd) == title).unwrap_or(&null_mut()) }, }; if handle.is_null() { std::thread::sleep(std::time::Duration::from_millis(wait_for)); } else { break; } } if handle.is_null() { eprintln!("Cannot find handle using selector: {:?}", selector); return false; } self.add_window_by_handle(handle, properties) } pub fn remove_wallpaper(&self, hwnd: HWND) { // TODO ensure that provided handle is actually attached to wallpaper window use winapi::um::winuser::{InvalidateRect, SendMessageW, WM_CLOSE}; unsafe { remove_window_from_wallpaper(self.worker, hwnd); std::thread::sleep(std::time::Duration::from_millis(32)); SendMessageW(hwnd, WM_CLOSE, 0, 0); std::thread::sleep(std::time::Duration::from_millis(32)); InvalidateRect(null_mut(), null_mut(), 1); } } }
WS_EX_DLGMODALFRAME, WS_EX_COMPOSITED, WS_EX_WINDOWEDGE, WS_EX_CLIENTEDGE, WS_EX_LAYERED, WS_EX_STATICEDGE, WS_EX_TOOLWINDOW, WS_EX_APPWINDOW, };
random_line_split
wallpaper.rs
use std::ffi::{OsStr, OsString}; use std::iter::once; use std::os::windows::ffi::{OsStrExt, OsStringExt}; use std::ptr::null_mut; use std::process::Command; use serde::{Serialize, Deserialize}; use winapi::shared::windef::{HWND, RECT, PPOINT, POINT}; use winapi::shared::minwindef::LPARAM; use winapi::um::errhandlingapi::{GetLastError, SetLastError}; use winapi::um::winuser::{ GetClassNameW, IsChild, ShowWindow, GetWindowRect, MapWindowPoints, MonitorFromPoint, GetMonitorInfoW, SetWindowPos, SW_SHOW, MONITOR_DEFAULTTONEAREST, MONITORINFO }; fn find_window_by_class(class: &str) -> HWND { use winapi::um::winuser::FindWindowW; unsafe { FindWindowW(to_wide(class).as_ptr(), null_mut()) } } fn to_wide(s: &str) -> Vec<u16> { OsStr::new(s).encode_wide().chain(once(0)).collect() } pub fn get_window_name(hwnd: HWND) -> String { use winapi::um::winuser::{GetWindowTextLengthW, GetWindowTextW}; if hwnd.is_null() { panic!("Invalid HWND"); } let text = unsafe { let text_length = GetWindowTextLengthW(hwnd); let mut text: Vec<u16> = vec![0; text_length as usize + 1]; GetWindowTextW(hwnd, text.as_mut_ptr(), text_length + 1); OsString::from_wide(&text[..text.iter().position(|&c| c == 0).unwrap()]) }; text.into_string().expect("Failed to convert string to UTF-8") } /** * Spawn a wallpaper window if it doesn't already exists and return handle to it. * * `progman` - a valid handle to the `Progman`. * * This function is unsafe, because user is responsible for providing valid progman handle. */ unsafe fn find_or_spawn_worker(progman: HWND) -> HWND { use winapi::um::winuser::{SendMessageW, EnumWindows}; extern "system" fn find_worker(hwnd: HWND, data: LPARAM) -> i32 { use winapi::um::winuser::FindWindowExW; let data = data as *mut UserData; unsafe { if FindWindowExW(hwnd, null_mut(), (*data).shell_class.as_ptr(), null_mut()).is_null() { return 1; } let worker = FindWindowExW(null_mut(), hwnd, (*data).worker_class.as_ptr(), null_mut()); if worker.is_null() { return 1; } (*data).worker = worker; (*data).parent = hwnd; } return 0; } struct UserData { shell_class: Vec<u16>, worker_class: Vec<u16>, worker: HWND, parent: HWND, } let mut user_data = UserData { shell_class: to_wide("SHELLDLL_DefView"), worker_class: to_wide("WorkerW"), worker: null_mut(), parent: null_mut(), }; SetLastError(0); EnumWindows(Some(find_worker), &mut user_data as *mut UserData as LPARAM); if GetLastError()!= 0 { panic!("EnumWindows failed, GetLastError says: '{}'", GetLastError()); } if user_data.worker.is_null() { // this is basically all the magic. it's an undocumented window message that // forces windows to spawn a window with class "WorkerW" behind deskicons SendMessageW(progman, 0x052C, 0xD, 0); SendMessageW(progman, 0x052C, 0xD, 1); SetLastError(0); EnumWindows(Some(find_worker), &mut user_data as *mut UserData as LPARAM); if GetLastError()!= 0 { panic!("EnumWindows failed, GetLastError says: '{}'", GetLastError()); } if user_data.worker.is_null() { eprintln!("W: couldn't spawn WorkerW window, trying old method"); SendMessageW(progman, 0x052C, 0, 0); SetLastError(0); EnumWindows(Some(find_worker), &mut user_data as *mut UserData as LPARAM); if GetLastError()!= 0 { panic!("EnumWindows failed, GetLastError says: '{}'", GetLastError()); } } } user_data.worker } unsafe fn get_window_style(hwnd: HWND) -> (i32, i32) { use winapi::um::winuser::{GetWindowLongW, GWL_STYLE, GWL_EXSTYLE}; SetLastError(0); let style = GetWindowLongW(hwnd, GWL_STYLE); let ex_style = GetWindowLongW(hwnd, GWL_EXSTYLE); if (style == 0 || ex_style == 0) && GetLastError()!= 0 { panic!("GetWindowLongW failed, GetLastError says: '{}'", GetLastError()); } (style, ex_style) } unsafe fn update_window_styles(wnd: HWND, and: i32, ex_and: i32, or: i32, ex_or: i32) -> bool { use winapi::um::winuser::{SetWindowLongW, GWL_STYLE, GWL_EXSTYLE}; let (mut style, mut ex_style) = get_window_style(wnd); style &= and; ex_style &= ex_and; style |= or; ex_style |= ex_or; SetLastError(0); let style = SetWindowLongW(wnd, GWL_STYLE, style); let ex_style = SetWindowLongW(wnd, GWL_EXSTYLE, ex_style); if (style == 0 || ex_style == 0) && GetLastError()!= 0 { panic!("SetWindowLongW failed, GetLastError says: '{}'", GetLastError()); } return true; } unsafe fn get_window_rect(wnd: HWND) -> Option<RECT> { let rect: RECT = Default::default(); let failed = GetWindowRect(wnd, &rect as *const RECT as *mut RECT) == 0; if failed { eprintln!("GetWindowRect failed, GetLastError says: '{}'", GetLastError()); return None; } return Some(rect); } unsafe fn map_window_rect(wallpaper: HWND, wnd: HWND) -> Option<RECT> { if let Some(rect) = get_window_rect(wnd) { MapWindowPoints(null_mut(), wallpaper, &rect as *const RECT as PPOINT, 2); return Some(rect); } return None; } unsafe fn move_window(wnd: HWND, rect: RECT) -> bool { let success = SetWindowPos( wnd, null_mut(), rect.left, rect.top, rect.right - rect.left, rect.bottom - rect.top, 0 ); if success == 0 { eprintln!("SetWindowPos failed, GetLastError says: '{}'", GetLastError()); return false; } return true; } unsafe fn add_window_as_wallpaper(wallpaper: HWND, wnd: HWND) -> bool { use winapi::um::winuser::{ SetParent, WS_CHILD, WS_CAPTION, WS_THICKFRAME, WS_SYSMENU, WS_MAXIMIZEBOX, WS_MINIMIZEBOX, WS_EX_DLGMODALFRAME, WS_EX_COMPOSITED, WS_EX_WINDOWEDGE, WS_EX_CLIENTEDGE, WS_EX_LAYERED, WS_EX_STATICEDGE, WS_EX_TOOLWINDOW, WS_EX_APPWINDOW, }; let wnd_class = { let wnd_class: &mut [u16] = &mut [0; 512]; GetClassNameW(wnd, wnd_class.as_mut_ptr(), wnd_class.len() as i32 - 1); OsString::from_wide(&wnd_class[..wnd_class.iter().position(|&c| c == 0).unwrap()]) }; if wallpaper == wnd || wnd_class == "Shell_TrayWnd" { eprintln!("can't add this window"); return false; } let is_child = IsChild(wallpaper, wnd)!= 0; if is_child { eprintln!("already added"); return false; } /* * styles blacklist taken from https://github.com/Codeusa/Borderless-Gaming/ * blob/2fef4ccc121412f215cd7f185c4351fd634cab8b/BorderlessGaming.Logic/ * Windows/Manipulation.cs#L70 */ /* TODO: somehow save old styles so we can restore them */ let and: i32 =!( WS_CAPTION | WS_THICKFRAME | WS_SYSMENU | WS_MAXIMIZEBOX | WS_MINIMIZEBOX ) as i32; let ex_and: i32 =!( WS_EX_DLGMODALFRAME | WS_EX_COMPOSITED | WS_EX_WINDOWEDGE | WS_EX_CLIENTEDGE | WS_EX_LAYERED | WS_EX_STATICEDGE | WS_EX_TOOLWINDOW | WS_EX_APPWINDOW ) as i32; if!update_window_styles(wnd, and, ex_and, WS_CHILD as i32, 0) { return false; } /* window retains screen coordinates so we need to adjust them */ map_window_rect(wallpaper, wnd).unwrap(); let prev_parent = SetParent(wnd, wallpaper); if prev_parent.is_null() { panic!("SetParent failed, GetLastError says: '{}'", GetLastError()); } ShowWindow(wnd, SW_SHOW); return true; } unsafe fn remove_window_from_wallpaper(wallpaper: HWND, wnd: HWND) -> bool { use winapi::um::winuser::{ SetParent, GetDesktopWindow, InvalidateRect, WS_EX_APPWINDOW, WS_OVERLAPPEDWINDOW, SWP_FRAMECHANGED, SWP_NOMOVE, SWP_NOSIZE, SWP_NOZORDER, SWP_NOOWNERZORDER }; if SetParent(wnd, GetDesktopWindow()).is_null() { eprintln!("SetParent failed, GetLastError says: '{}'", GetLastError()); return false; } let or = WS_OVERLAPPEDWINDOW as i32; let ex_or = WS_EX_APPWINDOW as i32; if!update_window_styles(wnd, -1, -1, or, ex_or) { return false; } SetWindowPos( wnd, null_mut(), 0, 0, 0, 0, SWP_FRAMECHANGED | SWP_NOMOVE | SWP_NOSIZE | SWP_NOZORDER | SWP_NOOWNERZORDER ); InvalidateRect(wallpaper, null_mut(), 1); // wp_id(); /* can sometimes fix leftover unrefreshed portions */ true } unsafe fn set_fullscreen(wallpaper: HWND, wnd: HWND) -> bool { if let Some(current_rect) = get_window_rect(wnd) { let monitor = MonitorFromPoint(POINT {x: current_rect.left, y: current_rect.top}, MONITOR_DEFAULTTONEAREST); if monitor.is_null() { eprintln!("MonitorFromWindow failed, GetLastError says: '{}'", GetLastError()); return false; } let mut mi: MONITORINFO = Default::default(); mi.cbSize = std::mem::size_of::<MONITORINFO>() as u32; let success = GetMonitorInfoW(monitor, &mi as *const MONITORINFO as *mut MONITORINFO); if success == 0 { eprintln!("GetMonitorInfoW failed, GetLastError says: '{}'", GetLastError()); return false; } MapWindowPoints(null_mut(), wallpaper, &mi.rcMonitor as *const RECT as PPOINT, 2); move_window(wnd, mi.rcMonitor); return true; } return false; } unsafe fn list_immediate_children(parent: HWND) -> Vec<HWND> { use winapi::um::winuser::EnumChildWindows; #[repr(C)] struct WindowState { parent: HWND, handles: Vec<HWND>, } let mut s = WindowState { parent, handles: Vec::new() }; extern "system" fn enum_windows(wnd: HWND, lp: LPARAM) -> i32 { use winapi::um::winuser::{GetAncestor, GA_PARENT}; let s: *mut WindowState = lp as *mut WindowState; unsafe { if GetAncestor(wnd, GA_PARENT) == (*s).parent { (*s).handles.push(wnd); } } return 1; } SetLastError(0); EnumChildWindows(parent, Some(enum_windows), &mut s as *mut WindowState as LPARAM); if GetLastError()!= 0 { panic!("EnumChildWindows failed, GetLastError says: {}", GetLastError()); } s.handles.sort_unstable(); return s.handles; } unsafe fn find_window_by_pid(pid: u32) -> HWND { use winapi::um::winuser::{EnumWindows, GetWindowThreadProcessId}; use winapi::shared::minwindef::{DWORD, LPDWORD}; #[repr(C)] #[derive(Debug)] struct Data { handle: HWND, pid: u32, } extern "system" fn
(wnd: HWND, data: LPARAM) -> i32 { let mut data = data as *mut Data; unsafe { let mut this_pid: DWORD = 0; GetWindowThreadProcessId(wnd, &mut this_pid as LPDWORD); if this_pid == (*data).pid { (*data).handle = wnd; return 0; } } return 1; } let mut data = Data {handle: null_mut(), pid}; SetLastError(0); EnumWindows(Some(enum_windows), &mut data as *mut Data as LPARAM); if GetLastError()!= 0 { panic!("EnumWindows failed, GetLastError says: {}", GetLastError()); } data.handle } pub fn list_windows() -> Vec<HWND> { use winapi::um::winuser::{ EnumWindows, IsWindowVisible, GetLastActivePopup, GetAncestor, GetWindowTextLengthW, GA_ROOTOWNER, WS_EX_NOREDIRECTIONBITMAP, WS_EX_TOOLWINDOW }; // https://stackoverflow.com/questions/210504/enumerate-windows-like-alt-tab-does unsafe fn should_list(hwnd: HWND) -> bool { // Start at the root owner let mut hwnd_walk = GetAncestor(hwnd, GA_ROOTOWNER); // See if we are the last active visible popup let mut hwnd_try = null_mut(); loop { let hwnd_try_next = GetLastActivePopup(hwnd_walk); if hwnd_try_next == hwnd_try || IsWindowVisible(hwnd_try_next) == 1 { break; } hwnd_try = hwnd_try_next; hwnd_walk = hwnd_try; } return hwnd_walk == hwnd; } extern "system" fn list_windows_callback(hwnd: HWND, lp: LPARAM) -> i32 { let data = lp as *mut Vec<HWND>; unsafe { if IsWindowVisible(hwnd) == 1 && GetWindowTextLengthW(hwnd) > 0 && should_list(hwnd) { let (_, ex_style) = get_window_style(hwnd); if (ex_style as u32 & WS_EX_NOREDIRECTIONBITMAP) == 0 && (ex_style as u32 & WS_EX_TOOLWINDOW) == 0 { (*data).push(hwnd); } } } 1 } let mut data: Vec<HWND> = Vec::new(); unsafe { SetLastError(0); EnumWindows(Some(list_windows_callback), &mut data as *mut Vec<HWND> as LPARAM); if GetLastError()!= 0 { panic!("EnumWindows failed, GetLastError says: '{}'", GetLastError()); } } data } #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum WindowSelector<'a> { WindowTitle(&'a str), None, } #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct WallpaperProperties { pub fullscreen: bool } #[derive(Debug)] pub enum EngineError { ProgmanNotFound, UnableToSpawnWorker, } #[derive(Debug)] pub struct Engine { progman: HWND, worker: HWND, } impl Engine { pub fn new() -> Result<Engine, EngineError> { let progman_handle = find_window_by_class("Progman"); if progman_handle.is_null() { return Err(EngineError::ProgmanNotFound); } let worker_handle = unsafe { find_or_spawn_worker(progman_handle) }; if worker_handle.is_null() { return Err(EngineError::UnableToSpawnWorker); } Ok(Engine {progman: progman_handle, worker: worker_handle}) } pub fn list_active(&self) -> Vec<HWND> { unsafe { // TODO this is not safe until we add a check for worker validity here. list_immediate_children(self.worker) } } pub fn add_window_by_handle(&self, handle: HWND, properties: WallpaperProperties) -> bool { if!unsafe { add_window_as_wallpaper(self.worker, handle) } { eprintln!("Cannot add window to wallpaper"); return false; } if properties.fullscreen &&!unsafe { set_fullscreen(self.worker, handle) } { return false } true } pub fn add_window(&self, command: Option<&mut Command>, selector: WindowSelector, properties: WallpaperProperties, wait_for: u64, attempts: u64 ) -> bool { let process_id = match command { Some(command) => command.spawn().expect("command failed to start").id(), None => { if let WindowSelector::None = selector { eprintln!("One or both of selector and command should be specified"); return false; } 0 } }; let mut handle = null_mut(); for _attempt in 1..=attempts { handle = match selector { WindowSelector::None => unsafe { find_window_by_pid(process_id) }, WindowSelector::WindowTitle(title) => { let windows = list_windows(); *windows.iter().find(|&&hwnd| get_window_name(hwnd) == title).unwrap_or(&null_mut()) }, }; if handle.is_null() { std::thread::sleep(std::time::Duration::from_millis(wait_for)); } else { break; } } if handle.is_null() { eprintln!("Cannot find handle using selector: {:?}", selector); return false; } self.add_window_by_handle(handle, properties) } pub fn remove_wallpaper(&self, hwnd: HWND) { // TODO ensure that provided handle is actually attached to wallpaper window use winapi::um::winuser::{InvalidateRect, SendMessageW, WM_CLOSE}; unsafe { remove_window_from_wallpaper(self.worker, hwnd); std::thread::sleep(std::time::Duration::from_millis(32)); SendMessageW(hwnd, WM_CLOSE, 0, 0); std::thread::sleep(std::time::Duration::from_millis(32)); InvalidateRect(null_mut(), null_mut(), 1); } } }
enum_windows
identifier_name
tile.rs
//! Representation of tiles //! //! Items within a hex are usually given in hexagon-space. This is a 3D space //! where the axis are at 60° to each other. An example of the axis is given //! below. Note that the orientation of the axis when the hexagons are oriented //! with horizontal edges differs from when the hexagons are oriented with //! vertical edges. //! //! Instead of using coordinates in hexagon-space there are these position //! codes that can be used as a shortcut. North is the upper edge on a hexagon //! that has horizontal edges, it is the top left edge on hexagons that are //! oriented vertically. //! //! * `N`: north edge //! * `NE`: north east edge //! * `NW`: north west edge //! * `S`: south edge //! * `SE`: south east edge //! * `SW`: south west edge //! * `C`: center of hexagon //! //!![Coordinate system](../../../../axes.svg) extern crate nalgebra as na; extern crate serde_yaml; use std::collections::HashMap; use std::f64::consts::PI; use std::fs; use std::path::PathBuf; use std::fs::File; use std::process; /// Standard colors that can be used pub mod colors { pub struct Color { value: &'static str, } impl Color { pub fn value(&self) -> &str { self.value } } impl Default for Color { fn default() -> Color { GROUND } } pub const GROUND: Color = Color { value: "#FDD9B5" }; // Sandy Tan pub const YELLOW: Color = Color { value: "#FDEE00" }; // Aureolin pub const GREEN: Color = Color { value: "#00A550" }; // Pigment Green pub const RUSSET: Color = Color { value: "#CD7F32" }; // Bronze pub const GREY: Color = Color { value: "#ACACAC" }; // Silver Chalice pub const BROWN: Color = Color { value: "#7B3F00" }; // Chocolate pub const RED: Color = Color { value: "#DC143C" }; // Crimson pub const BLUE: Color = Color { value: "#007FFF" }; // Azure pub const BARRIER: Color = Color { value: "#660000" }; // Blood Red pub const WHITE: Color = Color { value: "#FFFFFF" }; pub fn name_to_color(name: &String) -> Color { match name.to_lowercase().as_str() { "ground" => GROUND, "yellow" => YELLOW, "green" => GREEN, "russet" => RUSSET, "grey" => GREY, "brown" => BROWN, "red" => RED, "blue" => BLUE, "barrier" => BARRIER, "white" => WHITE, _ => Color { value: "#000000" }, } } } /// Converts a position code to hex coordinates /// /// Converts a position code to a hexagon-space coordinate with its origin in /// the hexagon center. /// /// # Panics /// /// On invalid position code fn edge_to_coordinate(edge: &str) -> na::Vector3<f64> { match edge { "N" => na::Vector3::new( 0.0, 0.5, 0.5), "NE" => na::Vector3::new( 0.5, 0.5, 0.0), "SE" => na::Vector3::new( 0.5, 0.0, -0.5), "S" => na::Vector3::new( 0.0, -0.5, -0.5), "SW" => na::Vector3::new(-0.5, -0.5, 0.0), "NW" => na::Vector3::new(-0.5, 0.0, 0.5), "C" => na::Vector3::new( 0.0, 0.0, 0.0), c => panic!("Invalid edge code {}", c), } } /// Converts a compass direction to a number of degrees of rotation pub fn direction_to_angle(direction: &str) -> f64 { match direction { "N" => 0.0, "NW" => -PI / 3.0, "SW" => -PI * 2.0 / 3.0, "S" => PI, "SE" => PI * 2.0 / 3.0, "NE" => PI / 3.0, c => panic!("Invalid direction {}", c), } } /// Represents named or hex space coordinate #[derive(Clone, Deserialize, Debug)] #[serde(untagged)] pub enum Coordinate { Named(String), HexSpace((f64, f64, f64)), } impl Coordinate { pub fn as_vector(&self) -> na::Vector3<f64> { match self { &Coordinate::Named(ref name) => edge_to_coordinate(name.as_ref()), &Coordinate::HexSpace(ref pos) => na::Vector3::new(pos.0, pos.1, pos.2), } } } /// Attributes that are common between Tile and TileDefinition pub trait TileSpec { fn color(&self) -> colors::Color; fn set_name(&mut self, name: String); fn name(&self) -> &str; /// The paths on the tile. fn paths(&self) -> Vec<Path>; /// The city revenue locations on the tile. fn cities(&self) -> Vec<City>; /// The stop revenue locations on the tile fn stops(&self) -> Vec<Stop>; /// Whether a tile should be drawn as lawson track fn is_lawson(&self) -> bool; /// Arrows on the edge of a tile fn arrows(&self) -> Vec<Coordinate> { vec![] } /// Revenue track on the tile fn revenue_track(&self) -> Option<RevenueTrack> { None } fn terrain(&self) -> Option<Terrain> { None } fn get_text<'a>(&'a self, &'a str) -> &'a str; fn text_position(&self, usize) -> Option<na::Vector3<f64>>; fn text_spec(&self) -> Vec<Text>; /// Rotation of the tile fn orientation(&self) -> f64 { 0.0 } } /// The specification of a tile to be used in the game #[derive(Deserialize)] pub struct Tile { base_tile: String, color: String, text: HashMap<String, String>, #[serde(skip)] definition: Option<TileDefinition>, } impl Tile { pub fn set_definition(&mut self, definition: &TileDefinition) { self.definition = Some(definition.clone()); } pub fn base_tile(&self) -> String { self.base_tile.clone() } } impl Default for Tile { fn default() -> Tile { Tile { base_tile: String::new(), color: String::new(), text: HashMap::new(), definition: None, } } } impl TileSpec for Tile { fn color(&self) -> colors::Color { colors::name_to_color(&self.color) } /// The number of the tile, should be the first text specified fn name(&self) -> &str { self.text.get("number").unwrap() } fn set_name(&mut self, name: String) { self.text.insert("number".to_string(), name); } fn paths(&self) -> Vec<Path> { self.definition.as_ref() .expect("You must call set_definition() before using paths()") .paths() } fn cities(&self) -> Vec<City> { self.definition.as_ref() .expect("You must call set_definition() before using cities()") .cities() } fn stops(&self) -> Vec<Stop> { self.definition.as_ref() .expect("You must call set_definition() before using stops()") .stops() } fn is_lawson(&self) -> bool { self.definition.as_ref() .expect("You must call set_definition() before using is_lawson()") .is_lawson() } fn get_text(&self, id: &str) -> &str { match self.text.get(id) { Some(s) => s, None => "", } } fn text_position(&self, id: usize) -> Option<na::Vector3<f64>> { self.definition.as_ref() .expect("You must call set_definition() before using \ text_position()") .text_position(id) } fn text_spec(&self) -> Vec<Text> { self.definition.as_ref() .expect("You must call set_definition() before using \ text_spec()") .text_spec() } } /// Definition of tile layout, does not include color or name #[derive(Clone, Deserialize, Debug)] #[serde(default)] pub struct TileDefinition { name: String, paths: Vec<Path>, cities: Vec<City>, stops: Vec<Stop>, is_lawson: bool, text: Vec<Text>, } impl Default for TileDefinition { fn default() -> TileDefinition {
} impl TileSpec for TileDefinition { fn paths(&self) -> Vec<Path> { self.paths.clone() } fn cities(&self) -> Vec<City> { self.cities.clone() } fn stops(&self) -> Vec<Stop> { self.stops.clone() } fn is_lawson(&self) -> bool { self.is_lawson } fn color(&self) -> colors::Color { colors::GROUND } fn set_name(&mut self, name: String) { self.name = name; } fn name(&self) -> &str { self.name.as_str() } fn get_text<'a>(&'a self, id: &'a str) -> &'a str { match id { "number" => self.name(), x => x, } } fn text_position(&self, id: usize) -> Option<na::Vector3<f64>> { Some(self.text[id].position()) } fn text_spec(&self) -> Vec<Text> { let tile_number = Text { id: "number".to_string(), position: Coordinate::HexSpace((0.0, 0.0, -0.9)), anchor: TextAnchor::End, size: None, weight: None, }; let mut text = self.text.clone(); text.insert(0, tile_number); text } } /// Path on the tile /// /// A path is a line section that goes between `start point` and `end point`. /// There are two versions of each point `[start|end]` and `[start|end]_pos`, /// the `_pos` variant takes precedence over the non-`_pos` version. The /// non-`_pos` version should always be a position code, while the `_pos` /// version is a 3D position in hexagon-space. #[derive(Deserialize, Debug, Clone)] pub struct Path { start: Coordinate, end: Coordinate, pub start_control: Option<Coordinate>, pub end_control: Option<Coordinate>, #[serde(default)] is_bridge: bool, } impl Path { /// Getter that always returns the start coordinate in hexagon-space. pub fn start(&self) -> na::Vector3<f64> { self.start.as_vector() } /// Getter that always returns the end coordinate in hexagon-space. pub fn end(&self) -> na::Vector3<f64> { self.end.as_vector() } /// Whether the is_bridge flag is set pub fn is_bridge(&self) -> bool { self.is_bridge } /// The radius of the corner made by the path pub fn radius(&self) -> f64 { let gentle_curve = 2.0_f64.sqrt() / 2.0; // Gentle curves have a different radius if let (&Coordinate::Named(ref start), &Coordinate::Named(ref end)) = (&self.start, &self.end) { if start.len() == 2 && end.len() == 2 && start.chars().nth(0) == end.chars().nth(0) { // NW-NE, SW-SE return gentle_curve } else if ((start.len() == 2 && end.len() == 1) || (start.len() == 1 && end.len() == 2)) && start.chars().nth(0)!= end.chars().nth(0) { // N-SE, N-SW, etc. return gentle_curve } } // Everything else has a radius of one 1.0 } } /// City on the tile /// /// A city is a collection of circles where tokens can be put down. A city /// requires the specification of the number of circles (a positive integer) /// and the revenue (a positive integer). An optional position can also be /// given. If omitted then the position is assumed to be the center of the /// tile. The position can be given as the `pos` or `position` fields. The /// `pos` field is a coordinate in hexagon-space. The `position` field is a /// position code. #[derive(Deserialize, Debug, Clone)] pub struct City { pub circles: u32, pub text_id: String, pub revenue_position: Coordinate, position: Coordinate, } impl City { /// The coordinate of the city in hexagon-space. pub fn position(&self) -> na::Vector3<f64> { self.position.as_vector() } pub fn revenue_position(&self) -> na::Vector3<f64>{ self.revenue_position.as_vector() } } /// Stop on the tile /// /// A stop is a position with a revenue number. The `position` field is an /// 3D position in hexagon-space. #[derive(Deserialize, Debug, Clone)] pub struct Stop { position: Coordinate, pub text_id: String, pub revenue_angle: i32, } impl Stop { /// The coordinate of the stop in hexagon-space. pub fn position(&self) -> na::Vector3<f64> { self.position.as_vector() } } /// Text anchor position for text on tile #[derive(Deserialize, Debug, Clone)] pub enum TextAnchor { Start, Middle, End, } /// Text on the tile #[derive(Deserialize, Debug, Clone)] pub struct Text { pub id: String, position: Coordinate, size: Option<String>, pub weight: Option<u32>, pub anchor: TextAnchor, } impl Text { /// The coordinate of the text in hexagon-space. pub fn position(&self) -> na::Vector3<f64> { self.position.as_vector() } /// The size of the text pub fn size(&self) -> Option<&str> { match self.size { None => None, Some(ref s) => Some(&s), } } } /// Track which shows revenue for different phases #[derive(Deserialize, Debug, Clone)] pub struct RevenueTrack { position: Coordinate, pub yellow: String, pub green: Option<String>, pub russet: Option<String>, pub grey: Option<String>, } impl RevenueTrack { /// The coordinate of the track in hexagon-space. pub fn position(&self) -> na::Vector3<f64> { self.position.as_vector() } } /// Terrain on a tile #[derive(Clone, Deserialize)] pub struct Terrain { position: Coordinate, #[serde(rename="type")] pub terrain_type: TerrainType, pub cost: String, } impl Terrain { /// The coordinate of the terrain in hexagon-space. pub fn position(&self) -> na::Vector3<f64> { self.position.as_vector() } } /// Types of terrain that can be present #[derive(Clone, Deserialize)] #[serde(rename_all="lowercase")] pub enum TerrainType { Rough, Hill, Mountain, River, Marsh, } /// Reads and parses all tile definitions in./tiledefs/ pub fn definitions(options: &super::Options) -> HashMap<String, TileDefinition> { println!("Reading tile definitions from file..."); let def_files: Vec<PathBuf> = match fs::read_dir("tiledefs") { Err(err) => { eprintln!("Couldn't open tile definitions directory: {:?}", err.kind()); process::exit(1); } Ok(paths) => { paths.map(|path| path.unwrap().path()).collect() }, }; // Read and parse each file let mut definitions = HashMap::new(); for def in &def_files { // Ignore non.yaml files if def.extension().unwrap()!= "yaml" { continue; } if options.verbose { println!("Parsing definition {}", def.file_stem().unwrap().to_string_lossy()); } // Read yaml file let file = File::open(def).unwrap_or_else(|err| { eprintln!("Couldn't open {}: {:?}", def.to_string_lossy(), err.kind()); process::exit(1); }); let mut tile: TileDefinition = serde_yaml::from_reader(file) .unwrap_or_else(|err| { eprintln!("Error parsing {}: {}", def.to_string_lossy(), err); process::exit(1); }); tile.set_name(String::from(def.file_stem() .unwrap().to_string_lossy())); definitions.insert(String::from(tile.name()), tile); } definitions }
TileDefinition { name: "NoName".to_string(), paths: vec![], cities: vec![], stops: vec![], is_lawson: false, text: vec![], } }
identifier_body
tile.rs
//! Representation of tiles //! //! Items within a hex are usually given in hexagon-space. This is a 3D space //! where the axis are at 60° to each other. An example of the axis is given //! below. Note that the orientation of the axis when the hexagons are oriented //! with horizontal edges differs from when the hexagons are oriented with //! vertical edges. //! //! Instead of using coordinates in hexagon-space there are these position //! codes that can be used as a shortcut. North is the upper edge on a hexagon //! that has horizontal edges, it is the top left edge on hexagons that are //! oriented vertically. //! //! * `N`: north edge //! * `NE`: north east edge //! * `NW`: north west edge //! * `S`: south edge //! * `SE`: south east edge //! * `SW`: south west edge //! * `C`: center of hexagon //! //!![Coordinate system](../../../../axes.svg) extern crate nalgebra as na; extern crate serde_yaml; use std::collections::HashMap; use std::f64::consts::PI; use std::fs; use std::path::PathBuf; use std::fs::File; use std::process; /// Standard colors that can be used pub mod colors { pub struct Color { value: &'static str, } impl Color { pub fn value(&self) -> &str { self.value } } impl Default for Color { fn default() -> Color { GROUND } } pub const GROUND: Color = Color { value: "#FDD9B5" }; // Sandy Tan pub const YELLOW: Color = Color { value: "#FDEE00" }; // Aureolin pub const GREEN: Color = Color { value: "#00A550" }; // Pigment Green pub const RUSSET: Color = Color { value: "#CD7F32" }; // Bronze pub const GREY: Color = Color { value: "#ACACAC" }; // Silver Chalice pub const BROWN: Color = Color { value: "#7B3F00" }; // Chocolate pub const RED: Color = Color { value: "#DC143C" }; // Crimson pub const BLUE: Color = Color { value: "#007FFF" }; // Azure pub const BARRIER: Color = Color { value: "#660000" }; // Blood Red pub const WHITE: Color = Color { value: "#FFFFFF" }; pub fn name_to_color(name: &String) -> Color { match name.to_lowercase().as_str() { "ground" => GROUND, "yellow" => YELLOW, "green" => GREEN, "russet" => RUSSET, "grey" => GREY, "brown" => BROWN, "red" => RED, "blue" => BLUE, "barrier" => BARRIER, "white" => WHITE, _ => Color { value: "#000000" }, } } } /// Converts a position code to hex coordinates /// /// Converts a position code to a hexagon-space coordinate with its origin in /// the hexagon center. /// /// # Panics /// /// On invalid position code fn edge_to_coordinate(edge: &str) -> na::Vector3<f64> { match edge { "N" => na::Vector3::new( 0.0, 0.5, 0.5), "NE" => na::Vector3::new( 0.5, 0.5, 0.0), "SE" => na::Vector3::new( 0.5, 0.0, -0.5), "S" => na::Vector3::new( 0.0, -0.5, -0.5), "SW" => na::Vector3::new(-0.5, -0.5, 0.0), "NW" => na::Vector3::new(-0.5, 0.0, 0.5), "C" => na::Vector3::new( 0.0, 0.0, 0.0), c => panic!("Invalid edge code {}", c), } } /// Converts a compass direction to a number of degrees of rotation pub fn direction_to_angle(direction: &str) -> f64 { match direction { "N" => 0.0, "NW" => -PI / 3.0, "SW" => -PI * 2.0 / 3.0, "S" => PI, "SE" => PI * 2.0 / 3.0, "NE" => PI / 3.0, c => panic!("Invalid direction {}", c), } } /// Represents named or hex space coordinate #[derive(Clone, Deserialize, Debug)] #[serde(untagged)] pub enum Coordinate { Named(String), HexSpace((f64, f64, f64)), } impl Coordinate { pub fn as_vector(&self) -> na::Vector3<f64> { match self { &Coordinate::Named(ref name) => edge_to_coordinate(name.as_ref()), &Coordinate::HexSpace(ref pos) => na::Vector3::new(pos.0, pos.1, pos.2), } } } /// Attributes that are common between Tile and TileDefinition pub trait TileSpec { fn color(&self) -> colors::Color; fn set_name(&mut self, name: String); fn name(&self) -> &str; /// The paths on the tile. fn paths(&self) -> Vec<Path>; /// The city revenue locations on the tile. fn cities(&self) -> Vec<City>; /// The stop revenue locations on the tile fn stops(&self) -> Vec<Stop>; /// Whether a tile should be drawn as lawson track fn is_lawson(&self) -> bool; /// Arrows on the edge of a tile fn arrows(&self) -> Vec<Coordinate> { vec![] } /// Revenue track on the tile fn revenue_track(&self) -> Option<RevenueTrack> { None } fn terrain(&self) -> Option<Terrain> { None } fn get_text<'a>(&'a self, &'a str) -> &'a str; fn text_position(&self, usize) -> Option<na::Vector3<f64>>; fn text_spec(&self) -> Vec<Text>; /// Rotation of the tile fn orientation(&self) -> f64 { 0.0 } } /// The specification of a tile to be used in the game #[derive(Deserialize)] pub struct Tile { base_tile: String, color: String, text: HashMap<String, String>, #[serde(skip)] definition: Option<TileDefinition>, } impl Tile { pub fn set_definition(&mut self, definition: &TileDefinition) { self.definition = Some(definition.clone()); } pub fn base_tile(&self) -> String { self.base_tile.clone() } } impl Default for Tile { fn default() -> Tile { Tile { base_tile: String::new(), color: String::new(), text: HashMap::new(), definition: None, } } } impl TileSpec for Tile { fn color(&self) -> colors::Color { colors::name_to_color(&self.color) } /// The number of the tile, should be the first text specified fn name(&self) -> &str { self.text.get("number").unwrap() } fn set_name(&mut self, name: String) { self.text.insert("number".to_string(), name); } fn paths(&self) -> Vec<Path> { self.definition.as_ref() .expect("You must call set_definition() before using paths()") .paths() } fn cities(&self) -> Vec<City> { self.definition.as_ref() .expect("You must call set_definition() before using cities()") .cities() } fn stops(&self) -> Vec<Stop> { self.definition.as_ref() .expect("You must call set_definition() before using stops()") .stops() } fn is_lawson(&self) -> bool { self.definition.as_ref() .expect("You must call set_definition() before using is_lawson()") .is_lawson() } fn get_text(&self, id: &str) -> &str { match self.text.get(id) { Some(s) => s, None => "", } } fn text_position(&self, id: usize) -> Option<na::Vector3<f64>> { self.definition.as_ref() .expect("You must call set_definition() before using \ text_position()") .text_position(id) } fn text_spec(&self) -> Vec<Text> { self.definition.as_ref() .expect("You must call set_definition() before using \ text_spec()") .text_spec() } } /// Definition of tile layout, does not include color or name #[derive(Clone, Deserialize, Debug)] #[serde(default)] pub struct TileDefinition { name: String, paths: Vec<Path>, cities: Vec<City>, stops: Vec<Stop>, is_lawson: bool, text: Vec<Text>, } impl Default for TileDefinition { fn default() -> TileDefinition { TileDefinition { name: "NoName".to_string(), paths: vec![], cities: vec![], stops: vec![], is_lawson: false, text: vec![], } } } impl TileSpec for TileDefinition { fn paths(&self) -> Vec<Path> { self.paths.clone() } fn cities(&self) -> Vec<City> { self.cities.clone() } fn stops(&self) -> Vec<Stop> { self.stops.clone() } fn is_lawson(&self) -> bool { self.is_lawson } fn color(&self) -> colors::Color { colors::GROUND } fn set_name(&mut self, name: String) { self.name = name; } fn name(&self) -> &str { self.name.as_str() } fn get_text<'a>(&'a self, id: &'a str) -> &'a str { match id { "number" => self.name(), x => x, } } fn text_position(&self, id: usize) -> Option<na::Vector3<f64>> { Some(self.text[id].position()) } fn text_spec(&self) -> Vec<Text> { let tile_number = Text { id: "number".to_string(), position: Coordinate::HexSpace((0.0, 0.0, -0.9)), anchor: TextAnchor::End, size: None, weight: None, }; let mut text = self.text.clone(); text.insert(0, tile_number); text } } /// Path on the tile /// /// A path is a line section that goes between `start point` and `end point`. /// There are two versions of each point `[start|end]` and `[start|end]_pos`, /// the `_pos` variant takes precedence over the non-`_pos` version. The /// non-`_pos` version should always be a position code, while the `_pos` /// version is a 3D position in hexagon-space. #[derive(Deserialize, Debug, Clone)] pub struct Path { start: Coordinate, end: Coordinate, pub start_control: Option<Coordinate>, pub end_control: Option<Coordinate>, #[serde(default)] is_bridge: bool, } impl Path { /// Getter that always returns the start coordinate in hexagon-space. pub fn start(&self) -> na::Vector3<f64> { self.start.as_vector() } /// Getter that always returns the end coordinate in hexagon-space. pub fn end(&self) -> na::Vector3<f64> { self.end.as_vector() } /// Whether the is_bridge flag is set pub fn is_bridge(&self) -> bool { self.is_bridge } /// The radius of the corner made by the path pub fn radius(&self) -> f64 { let gentle_curve = 2.0_f64.sqrt() / 2.0; // Gentle curves have a different radius if let (&Coordinate::Named(ref start), &Coordinate::Named(ref end)) = (&self.start, &self.end) { if start.len() == 2 && end.len() == 2 && start.chars().nth(0) == end.chars().nth(0) { // NW-NE, SW-SE return gentle_curve } else if ((start.len() == 2 && end.len() == 1) || (start.len() == 1 && end.len() == 2)) && start.chars().nth(0)!= end.chars().nth(0) { // N-SE, N-SW, etc. return gentle_curve } } // Everything else has a radius of one 1.0 } } /// City on the tile /// /// A city is a collection of circles where tokens can be put down. A city /// requires the specification of the number of circles (a positive integer) /// and the revenue (a positive integer). An optional position can also be /// given. If omitted then the position is assumed to be the center of the /// tile. The position can be given as the `pos` or `position` fields. The /// `pos` field is a coordinate in hexagon-space. The `position` field is a /// position code. #[derive(Deserialize, Debug, Clone)] pub struct City { pub circles: u32, pub text_id: String, pub revenue_position: Coordinate, position: Coordinate, } impl City { /// The coordinate of the city in hexagon-space. pub fn position(&self) -> na::Vector3<f64> { self.position.as_vector() } pub fn revenue_position(&self) -> na::Vector3<f64>{ self.revenue_position.as_vector() } } /// Stop on the tile /// /// A stop is a position with a revenue number. The `position` field is an /// 3D position in hexagon-space. #[derive(Deserialize, Debug, Clone)] pub struct Stop { position: Coordinate, pub text_id: String, pub revenue_angle: i32, } impl Stop { /// The coordinate of the stop in hexagon-space. pub fn position(&self) -> na::Vector3<f64> { self.position.as_vector() } } /// Text anchor position for text on tile #[derive(Deserialize, Debug, Clone)] pub enum TextAnchor { Start, Middle, End, } /// Text on the tile #[derive(Deserialize, Debug, Clone)] pub struct Text { pub id: String, position: Coordinate, size: Option<String>, pub weight: Option<u32>, pub anchor: TextAnchor, } impl Text { /// The coordinate of the text in hexagon-space. pub fn position(&self) -> na::Vector3<f64> { self.position.as_vector() } /// The size of the text pub fn size(&self) -> Option<&str> { match self.size { None => None, Some(ref s) => Some(&s), } } } /// Track which shows revenue for different phases #[derive(Deserialize, Debug, Clone)] pub struct RevenueTrack { position: Coordinate, pub yellow: String, pub green: Option<String>, pub russet: Option<String>, pub grey: Option<String>, } impl RevenueTrack { /// The coordinate of the track in hexagon-space. pub fn position(&self) -> na::Vector3<f64> { self.position.as_vector() } } /// Terrain on a tile #[derive(Clone, Deserialize)] pub struct Terrain { position: Coordinate, #[serde(rename="type")] pub terrain_type: TerrainType, pub cost: String, } impl Terrain { /// The coordinate of the terrain in hexagon-space. pub fn position(&self) -> na::Vector3<f64> { self.position.as_vector() } } /// Types of terrain that can be present #[derive(Clone, Deserialize)] #[serde(rename_all="lowercase")] pub enum T
{ Rough, Hill, Mountain, River, Marsh, } /// Reads and parses all tile definitions in./tiledefs/ pub fn definitions(options: &super::Options) -> HashMap<String, TileDefinition> { println!("Reading tile definitions from file..."); let def_files: Vec<PathBuf> = match fs::read_dir("tiledefs") { Err(err) => { eprintln!("Couldn't open tile definitions directory: {:?}", err.kind()); process::exit(1); } Ok(paths) => { paths.map(|path| path.unwrap().path()).collect() }, }; // Read and parse each file let mut definitions = HashMap::new(); for def in &def_files { // Ignore non.yaml files if def.extension().unwrap()!= "yaml" { continue; } if options.verbose { println!("Parsing definition {}", def.file_stem().unwrap().to_string_lossy()); } // Read yaml file let file = File::open(def).unwrap_or_else(|err| { eprintln!("Couldn't open {}: {:?}", def.to_string_lossy(), err.kind()); process::exit(1); }); let mut tile: TileDefinition = serde_yaml::from_reader(file) .unwrap_or_else(|err| { eprintln!("Error parsing {}: {}", def.to_string_lossy(), err); process::exit(1); }); tile.set_name(String::from(def.file_stem() .unwrap().to_string_lossy())); definitions.insert(String::from(tile.name()), tile); } definitions }
errainType
identifier_name
tile.rs
//! Representation of tiles //! //! Items within a hex are usually given in hexagon-space. This is a 3D space //! where the axis are at 60° to each other. An example of the axis is given //! below. Note that the orientation of the axis when the hexagons are oriented //! with horizontal edges differs from when the hexagons are oriented with //! vertical edges. //! //! Instead of using coordinates in hexagon-space there are these position //! codes that can be used as a shortcut. North is the upper edge on a hexagon //! that has horizontal edges, it is the top left edge on hexagons that are //! oriented vertically. //! //! * `N`: north edge //! * `NE`: north east edge //! * `NW`: north west edge //! * `S`: south edge //! * `SE`: south east edge //! * `SW`: south west edge
//! * `C`: center of hexagon //! //!![Coordinate system](../../../../axes.svg) extern crate nalgebra as na; extern crate serde_yaml; use std::collections::HashMap; use std::f64::consts::PI; use std::fs; use std::path::PathBuf; use std::fs::File; use std::process; /// Standard colors that can be used pub mod colors { pub struct Color { value: &'static str, } impl Color { pub fn value(&self) -> &str { self.value } } impl Default for Color { fn default() -> Color { GROUND } } pub const GROUND: Color = Color { value: "#FDD9B5" }; // Sandy Tan pub const YELLOW: Color = Color { value: "#FDEE00" }; // Aureolin pub const GREEN: Color = Color { value: "#00A550" }; // Pigment Green pub const RUSSET: Color = Color { value: "#CD7F32" }; // Bronze pub const GREY: Color = Color { value: "#ACACAC" }; // Silver Chalice pub const BROWN: Color = Color { value: "#7B3F00" }; // Chocolate pub const RED: Color = Color { value: "#DC143C" }; // Crimson pub const BLUE: Color = Color { value: "#007FFF" }; // Azure pub const BARRIER: Color = Color { value: "#660000" }; // Blood Red pub const WHITE: Color = Color { value: "#FFFFFF" }; pub fn name_to_color(name: &String) -> Color { match name.to_lowercase().as_str() { "ground" => GROUND, "yellow" => YELLOW, "green" => GREEN, "russet" => RUSSET, "grey" => GREY, "brown" => BROWN, "red" => RED, "blue" => BLUE, "barrier" => BARRIER, "white" => WHITE, _ => Color { value: "#000000" }, } } } /// Converts a position code to hex coordinates /// /// Converts a position code to a hexagon-space coordinate with its origin in /// the hexagon center. /// /// # Panics /// /// On invalid position code fn edge_to_coordinate(edge: &str) -> na::Vector3<f64> { match edge { "N" => na::Vector3::new( 0.0, 0.5, 0.5), "NE" => na::Vector3::new( 0.5, 0.5, 0.0), "SE" => na::Vector3::new( 0.5, 0.0, -0.5), "S" => na::Vector3::new( 0.0, -0.5, -0.5), "SW" => na::Vector3::new(-0.5, -0.5, 0.0), "NW" => na::Vector3::new(-0.5, 0.0, 0.5), "C" => na::Vector3::new( 0.0, 0.0, 0.0), c => panic!("Invalid edge code {}", c), } } /// Converts a compass direction to a number of degrees of rotation pub fn direction_to_angle(direction: &str) -> f64 { match direction { "N" => 0.0, "NW" => -PI / 3.0, "SW" => -PI * 2.0 / 3.0, "S" => PI, "SE" => PI * 2.0 / 3.0, "NE" => PI / 3.0, c => panic!("Invalid direction {}", c), } } /// Represents named or hex space coordinate #[derive(Clone, Deserialize, Debug)] #[serde(untagged)] pub enum Coordinate { Named(String), HexSpace((f64, f64, f64)), } impl Coordinate { pub fn as_vector(&self) -> na::Vector3<f64> { match self { &Coordinate::Named(ref name) => edge_to_coordinate(name.as_ref()), &Coordinate::HexSpace(ref pos) => na::Vector3::new(pos.0, pos.1, pos.2), } } } /// Attributes that are common between Tile and TileDefinition pub trait TileSpec { fn color(&self) -> colors::Color; fn set_name(&mut self, name: String); fn name(&self) -> &str; /// The paths on the tile. fn paths(&self) -> Vec<Path>; /// The city revenue locations on the tile. fn cities(&self) -> Vec<City>; /// The stop revenue locations on the tile fn stops(&self) -> Vec<Stop>; /// Whether a tile should be drawn as lawson track fn is_lawson(&self) -> bool; /// Arrows on the edge of a tile fn arrows(&self) -> Vec<Coordinate> { vec![] } /// Revenue track on the tile fn revenue_track(&self) -> Option<RevenueTrack> { None } fn terrain(&self) -> Option<Terrain> { None } fn get_text<'a>(&'a self, &'a str) -> &'a str; fn text_position(&self, usize) -> Option<na::Vector3<f64>>; fn text_spec(&self) -> Vec<Text>; /// Rotation of the tile fn orientation(&self) -> f64 { 0.0 } } /// The specification of a tile to be used in the game #[derive(Deserialize)] pub struct Tile { base_tile: String, color: String, text: HashMap<String, String>, #[serde(skip)] definition: Option<TileDefinition>, } impl Tile { pub fn set_definition(&mut self, definition: &TileDefinition) { self.definition = Some(definition.clone()); } pub fn base_tile(&self) -> String { self.base_tile.clone() } } impl Default for Tile { fn default() -> Tile { Tile { base_tile: String::new(), color: String::new(), text: HashMap::new(), definition: None, } } } impl TileSpec for Tile { fn color(&self) -> colors::Color { colors::name_to_color(&self.color) } /// The number of the tile, should be the first text specified fn name(&self) -> &str { self.text.get("number").unwrap() } fn set_name(&mut self, name: String) { self.text.insert("number".to_string(), name); } fn paths(&self) -> Vec<Path> { self.definition.as_ref() .expect("You must call set_definition() before using paths()") .paths() } fn cities(&self) -> Vec<City> { self.definition.as_ref() .expect("You must call set_definition() before using cities()") .cities() } fn stops(&self) -> Vec<Stop> { self.definition.as_ref() .expect("You must call set_definition() before using stops()") .stops() } fn is_lawson(&self) -> bool { self.definition.as_ref() .expect("You must call set_definition() before using is_lawson()") .is_lawson() } fn get_text(&self, id: &str) -> &str { match self.text.get(id) { Some(s) => s, None => "", } } fn text_position(&self, id: usize) -> Option<na::Vector3<f64>> { self.definition.as_ref() .expect("You must call set_definition() before using \ text_position()") .text_position(id) } fn text_spec(&self) -> Vec<Text> { self.definition.as_ref() .expect("You must call set_definition() before using \ text_spec()") .text_spec() } } /// Definition of tile layout, does not include color or name #[derive(Clone, Deserialize, Debug)] #[serde(default)] pub struct TileDefinition { name: String, paths: Vec<Path>, cities: Vec<City>, stops: Vec<Stop>, is_lawson: bool, text: Vec<Text>, } impl Default for TileDefinition { fn default() -> TileDefinition { TileDefinition { name: "NoName".to_string(), paths: vec![], cities: vec![], stops: vec![], is_lawson: false, text: vec![], } } } impl TileSpec for TileDefinition { fn paths(&self) -> Vec<Path> { self.paths.clone() } fn cities(&self) -> Vec<City> { self.cities.clone() } fn stops(&self) -> Vec<Stop> { self.stops.clone() } fn is_lawson(&self) -> bool { self.is_lawson } fn color(&self) -> colors::Color { colors::GROUND } fn set_name(&mut self, name: String) { self.name = name; } fn name(&self) -> &str { self.name.as_str() } fn get_text<'a>(&'a self, id: &'a str) -> &'a str { match id { "number" => self.name(), x => x, } } fn text_position(&self, id: usize) -> Option<na::Vector3<f64>> { Some(self.text[id].position()) } fn text_spec(&self) -> Vec<Text> { let tile_number = Text { id: "number".to_string(), position: Coordinate::HexSpace((0.0, 0.0, -0.9)), anchor: TextAnchor::End, size: None, weight: None, }; let mut text = self.text.clone(); text.insert(0, tile_number); text } } /// Path on the tile /// /// A path is a line section that goes between `start point` and `end point`. /// There are two versions of each point `[start|end]` and `[start|end]_pos`, /// the `_pos` variant takes precedence over the non-`_pos` version. The /// non-`_pos` version should always be a position code, while the `_pos` /// version is a 3D position in hexagon-space. #[derive(Deserialize, Debug, Clone)] pub struct Path { start: Coordinate, end: Coordinate, pub start_control: Option<Coordinate>, pub end_control: Option<Coordinate>, #[serde(default)] is_bridge: bool, } impl Path { /// Getter that always returns the start coordinate in hexagon-space. pub fn start(&self) -> na::Vector3<f64> { self.start.as_vector() } /// Getter that always returns the end coordinate in hexagon-space. pub fn end(&self) -> na::Vector3<f64> { self.end.as_vector() } /// Whether the is_bridge flag is set pub fn is_bridge(&self) -> bool { self.is_bridge } /// The radius of the corner made by the path pub fn radius(&self) -> f64 { let gentle_curve = 2.0_f64.sqrt() / 2.0; // Gentle curves have a different radius if let (&Coordinate::Named(ref start), &Coordinate::Named(ref end)) = (&self.start, &self.end) { if start.len() == 2 && end.len() == 2 && start.chars().nth(0) == end.chars().nth(0) { // NW-NE, SW-SE return gentle_curve } else if ((start.len() == 2 && end.len() == 1) || (start.len() == 1 && end.len() == 2)) && start.chars().nth(0)!= end.chars().nth(0) { // N-SE, N-SW, etc. return gentle_curve } } // Everything else has a radius of one 1.0 } } /// City on the tile /// /// A city is a collection of circles where tokens can be put down. A city /// requires the specification of the number of circles (a positive integer) /// and the revenue (a positive integer). An optional position can also be /// given. If omitted then the position is assumed to be the center of the /// tile. The position can be given as the `pos` or `position` fields. The /// `pos` field is a coordinate in hexagon-space. The `position` field is a /// position code. #[derive(Deserialize, Debug, Clone)] pub struct City { pub circles: u32, pub text_id: String, pub revenue_position: Coordinate, position: Coordinate, } impl City { /// The coordinate of the city in hexagon-space. pub fn position(&self) -> na::Vector3<f64> { self.position.as_vector() } pub fn revenue_position(&self) -> na::Vector3<f64>{ self.revenue_position.as_vector() } } /// Stop on the tile /// /// A stop is a position with a revenue number. The `position` field is an /// 3D position in hexagon-space. #[derive(Deserialize, Debug, Clone)] pub struct Stop { position: Coordinate, pub text_id: String, pub revenue_angle: i32, } impl Stop { /// The coordinate of the stop in hexagon-space. pub fn position(&self) -> na::Vector3<f64> { self.position.as_vector() } } /// Text anchor position for text on tile #[derive(Deserialize, Debug, Clone)] pub enum TextAnchor { Start, Middle, End, } /// Text on the tile #[derive(Deserialize, Debug, Clone)] pub struct Text { pub id: String, position: Coordinate, size: Option<String>, pub weight: Option<u32>, pub anchor: TextAnchor, } impl Text { /// The coordinate of the text in hexagon-space. pub fn position(&self) -> na::Vector3<f64> { self.position.as_vector() } /// The size of the text pub fn size(&self) -> Option<&str> { match self.size { None => None, Some(ref s) => Some(&s), } } } /// Track which shows revenue for different phases #[derive(Deserialize, Debug, Clone)] pub struct RevenueTrack { position: Coordinate, pub yellow: String, pub green: Option<String>, pub russet: Option<String>, pub grey: Option<String>, } impl RevenueTrack { /// The coordinate of the track in hexagon-space. pub fn position(&self) -> na::Vector3<f64> { self.position.as_vector() } } /// Terrain on a tile #[derive(Clone, Deserialize)] pub struct Terrain { position: Coordinate, #[serde(rename="type")] pub terrain_type: TerrainType, pub cost: String, } impl Terrain { /// The coordinate of the terrain in hexagon-space. pub fn position(&self) -> na::Vector3<f64> { self.position.as_vector() } } /// Types of terrain that can be present #[derive(Clone, Deserialize)] #[serde(rename_all="lowercase")] pub enum TerrainType { Rough, Hill, Mountain, River, Marsh, } /// Reads and parses all tile definitions in./tiledefs/ pub fn definitions(options: &super::Options) -> HashMap<String, TileDefinition> { println!("Reading tile definitions from file..."); let def_files: Vec<PathBuf> = match fs::read_dir("tiledefs") { Err(err) => { eprintln!("Couldn't open tile definitions directory: {:?}", err.kind()); process::exit(1); } Ok(paths) => { paths.map(|path| path.unwrap().path()).collect() }, }; // Read and parse each file let mut definitions = HashMap::new(); for def in &def_files { // Ignore non.yaml files if def.extension().unwrap()!= "yaml" { continue; } if options.verbose { println!("Parsing definition {}", def.file_stem().unwrap().to_string_lossy()); } // Read yaml file let file = File::open(def).unwrap_or_else(|err| { eprintln!("Couldn't open {}: {:?}", def.to_string_lossy(), err.kind()); process::exit(1); }); let mut tile: TileDefinition = serde_yaml::from_reader(file) .unwrap_or_else(|err| { eprintln!("Error parsing {}: {}", def.to_string_lossy(), err); process::exit(1); }); tile.set_name(String::from(def.file_stem() .unwrap().to_string_lossy())); definitions.insert(String::from(tile.name()), tile); } definitions }
random_line_split
account_transform.rs
//! Functionality for modifying accounts according to actions. use std::{collections::BTreeMap, marker::PhantomData}; use anyhow::bail; use async_trait::*; use ed25519_dalek::Signer; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use crate::blockdata::{Action, MainBlock, SendInfo}; use crate::crypto::{hash, sign, verify_sig, Hash, HashCode, Signature}; use crate::hashlookup::HashLookup; use crate::hex_path::{bytes_to_path, HexPath}; use crate::queries::{lookup_account, lookup_data_in_account}; /// An typed account data field. #[derive(Serialize, Deserialize, Debug)] pub struct TypedDataField<T> { /// The path of the field in account data. pub path: HexPath, /// Phantom data for the type `T`. phantom: PhantomData<T>, } impl<T> Clone for TypedDataField<T> { fn clone(&self) -> Self { Self { path: self.path.clone(), phantom: PhantomData, } } } impl<T> TypedDataField<T> { /// Creates a `TypedDataField` given a path. pub fn from_path(path: HexPath) -> TypedDataField<T> { TypedDataField { path, phantom: PhantomData, } } } /// Account balance field. pub fn field_balance() -> TypedDataField<u128> { TypedDataField::from_path(bytes_to_path(b"balance")) } /// Account stake field. pub fn field_stake() -> TypedDataField<u128> { TypedDataField::from_path(bytes_to_path(b"stake")) } /// Account public key field. pub fn field_public_key() -> TypedDataField<ed25519_dalek::PublicKey> { TypedDataField::from_path(bytes_to_path(b"public_key")) } /// Field for a `SendInfo` stored in the sender's data. pub fn field_send(send: Hash<SendInfo>) -> TypedDataField<SendInfo> { let mut path = bytes_to_path(b"send"); path.0.extend(&bytes_to_path(&send.code).0); TypedDataField::from_path(path) } /// Field for tracking whether a `SendInfo` has been received in the receiver's /// data. pub fn field_received(send: Hash<SendInfo>) -> TypedDataField<bool> { let mut path = bytes_to_path(b"received"); path.0.extend(&bytes_to_path(&send.code).0); TypedDataField::from_path(path) } /// A context providing operations related to transforming an account (e.g. /// running actions). pub struct AccountTransform<'a, HL: HashLookup> { /// The `HashLookup` used to look up previous account data. pub hl: &'a HL, /// Whether this account is initializing. pub is_initializing: bool, /// The account being transformed. pub this_account: HashCode, /// The hash code of the last main block. pub last_main: Hash<MainBlock>, /// Which fields have been overwritten so far, and their most recent values. pub fields_set: BTreeMap<HexPath, Vec<u8>>, } #[async_trait] impl<'a, HL: HashLookup> HashLookup for AccountTransform<'a, HL> { async fn lookup_bytes(&self, hash: HashCode) -> Result<Vec<u8>, anyhow::Error> { self.hl.lookup_bytes(hash).await } } impl<'a, HL: HashLookup> AccountTransform<'a, HL> { /// Creates a new `AccountTransform`. pub fn new( hl: &'a HL, is_initializing: bool, this_account: HashCode, last_main: Hash<MainBlock>, ) -> AccountTransform<'a, HL> { AccountTransform { hl, is_initializing, this_account, last_main, fields_set: BTreeMap::new(), } } /// Gets the value of a given data field. async fn get_data_field_bytes( &self, acct: HashCode, field_name: &HexPath, ) -> Result<Option<Vec<u8>>, anyhow::Error> { if acct == self.this_account { match self.fields_set.get(field_name) { Some(x) => { return Ok(Some(x.clone())); } None => {} } } let main = self.lookup(self.last_main).await?; if let Some(acct_node) = lookup_account(self, &main.block.body, self.this_account).await? { lookup_data_in_account(self, &acct_node, field_name).await } else { Ok(None) } } /// Sets the value of a given data field. fn set_data_field_bytes( &mut self, field_name: &HexPath, value: Vec<u8>, ) -> Result<(), anyhow::Error> { self.fields_set.insert(field_name.clone(), value); Ok(()) } /// Gets the value of a given typed data field. async fn get_data_field<T: DeserializeOwned>( &self, acct: HashCode, field: &TypedDataField<T>, ) -> Result<Option<T>, anyhow::Error> { match self.get_data_field_bytes(acct, &field.path).await? { None => Ok(None), Some(bs) => Ok(Some(rmp_serde::from_read(bs.as_slice())?)), } } /// Gets the value of a given typed data field, throwing an error if it is not found. pub async fn get_data_field_or_error<T: DeserializeOwned>( &self, acct: HashCode, field: &TypedDataField<T>, ) -> Result<T, anyhow::Error> { match self.get_data_field(acct, field).await? { None => bail!("data field not found: {:?}", field.path), Some(x) => Ok(x), } } /// Sets the value of a given typed data field. fn set_data_field<T: Serialize>( &mut self, field: &TypedDataField<T>, value: &T, ) -> Result<(), anyhow::Error> { self.set_data_field_bytes(&field.path, rmp_serde::to_vec_named(value)?) } } /// Causes the current account to pay a fee. async fn pay_fee<'a, HL: HashLookup>( at: &mut AccountTransform<'a, HL>, fee: u128, ) -> Result<(), anyhow::Error> { let bal = at .get_data_field_or_error(at.this_account, &field_balance()) .await?; if bal < fee { bail!("not enough balance for fee"); } at.set_data_field(&field_balance(), &(bal - fee)) } /// Causes the current account to send. async fn do_send<'a, HL: HashLookup>( at: &mut AccountTransform<'a, HL>, send: &SendInfo, ) -> Result<(), anyhow::Error> { if send.sender!= at.this_account { bail!("sender must be sent by this account"); } if send.last_main!= at.last_main { bail!("last main of send must be the current last main"); } let bal = at .get_data_field_or_error(at.this_account, &field_balance()) .await?; if bal < send.send_amount { bail!("not enough balance for send"); } let send_df = field_send(hash(send)); if at .get_data_field(at.this_account, &send_df) .await? .is_some() { bail!("that was already sent"); } at.set_data_field(&field_balance(), &(bal - send.send_amount))?; at.set_data_field(&send_df, send)?; Ok(()) } /// Causes the current account to receive. async fn do_receive<'a, HL: HashLookup>( at: &mut AccountTransform<'a, HL>, sender: HashCode, send_hash: Hash<SendInfo>, ) -> Result<SendInfo, anyhow::Error> { let send = at .get_data_field_or_error(sender, &field_send(send_hash)) .await?; if hash(&send)!= send_hash { bail!("send hashes don't match"); } if send.recipient!= at.this_account { bail!("recipient of send doesn't match recipient"); } let received_field = field_received(send_hash); let already_received = at.get_data_field(at.this_account, &received_field).await?; if already_received == Some(true) { bail!("tried to receive the same send twice"); } let bal = at .get_data_field_or_error(at.this_account, &field_balance()) .await?; at.set_data_field(&field_balance(), &(bal + send.send_amount))?; at.set_data_field(&received_field, &true)?; Ok(send) } /// Gets an argument out of action arguments. fn get_arg<T: DeserializeOwned>(args: &Vec<Vec<u8>>, i: usize) -> Result<T, anyhow::Error> { if i >= args.len() { bail!("too few arguments"); } Ok(rmp_serde::from_read(args[i].as_slice())?) } /// Verifies that the argument at a given index is a signature of a modified /// version of the action where the signature itself is replaced with /// an empty vector, and also that the signature's account matches the /// given account. fn verify_signature_argument( acct: HashCode, action: &Action, i: usize, ) -> Result<(), anyhow::Error> { let sig: Signature<Action> = get_arg(&action.args, i)?; if sig.account()!= acct { bail!("signature account must equal current account"); } let mut act2 = action.clone(); act2.args[i] = Vec::new(); if!verify_sig(&act2, &sig) { bail!("invalid signature"); } Ok(()) } /// Runs an action in a given `AccountTransform` context. pub async fn run_action<'a, HL: HashLookup>( at: &mut AccountTransform<'a, HL>, action: &Action, ) -> Result<(), anyhow::Error> { if at.last_main!= action.last_main { bail!("action last main must equal current last main"); } if action.command == b"send" { if at.is_initializing
let recipient: HashCode = get_arg(&action.args, 0)?; let send_amount: u128 = get_arg(&action.args, 1)?; let initialize_spec: Option<Hash<Vec<u8>>> = get_arg(&action.args, 2)?; let message: Vec<u8> = get_arg(&action.args, 3)?; verify_signature_argument(at.this_account, action, 4)?; pay_fee(at, action.fee).await?; let send = SendInfo { last_main: action.last_main, sender: at.this_account, recipient, send_amount, initialize_spec, message, }; do_send(at, &send).await?; } else if action.command == b"receive" { let sender: HashCode = get_arg(&action.args, 0)?; let send_hash: Hash<SendInfo> = get_arg(&action.args, 1)?; let sig: Signature<Action> = get_arg(&action.args, 2)?; verify_signature_argument(at.this_account, action, 2)?; if at.is_initializing { at.set_data_field(&field_balance(), &0)?; at.set_data_field(&field_stake(), &0)?; at.set_data_field(&field_public_key(), &sig.key)?; } do_receive(at, sender, send_hash).await?; pay_fee(at, action.fee).await?; } else { bail!("unknown command {:?}", action.command); } Ok(()) } /// Creates a send action. pub fn mk_send( last_main: Hash<MainBlock>, fee: u128, recipient: HashCode, send_amount: u128, initialize_spec: Option<Hash<Vec<u8>>>, message: Vec<u8>, key: &ed25519_dalek::Keypair, ) -> (Action, SendInfo) { let mut act = Action { last_main, fee, command: b"send".to_vec(), args: vec![ rmp_serde::to_vec_named(&recipient).unwrap(), rmp_serde::to_vec_named(&send_amount).unwrap(), rmp_serde::to_vec_named(&initialize_spec).unwrap(), rmp_serde::to_vec_named(&message).unwrap(), vec![], ], }; act.args[4] = rmp_serde::to_vec_named(&sign(&key, &act)).unwrap(); let si = SendInfo { last_main, sender: hash(&key.public).code, recipient, send_amount, initialize_spec, message, }; (act, si) } /// Creates a receive action. pub fn mk_receive( last_main: Hash<MainBlock>, fee: u128, sender: HashCode, send_hash: Hash<SendInfo>, key: ed25519_dalek::Keypair, ) -> Action { let mut act = Action { last_main, fee, command: b"receive".to_vec(), args: vec![ rmp_serde::to_vec_named(&sender).unwrap(), rmp_serde::to_vec_named(&send_hash).unwrap(), vec![], ], }; act.args[2] = rmp_serde::to_vec_named(&sign(&key, &act)).unwrap(); act } #[cfg(test)] mod tests { use super::*; use crate::crypto; #[test] fn verify_send() { let last_main = Hash::<MainBlock> { code: [0; 32], phantom: PhantomData, }; let fee: u128 = 5; let recipient: HashCode = [0; 32]; let send_amount: u128 = 25; let init_spec: Option<Hash<Vec<u8>>> = None; let msg: Vec<u8> = vec![]; let key = crypto::gen_private_key(); let (act, si) = mk_send(last_main, fee, recipient, send_amount, init_spec, msg, &key); let res = verify_signature_argument(si.sender, &act, 4); assert!(res.is_ok(), "got error: {}", res.unwrap_err()); } }
{ bail!("send can't initialize an account"); }
conditional_block
account_transform.rs
//! Functionality for modifying accounts according to actions. use std::{collections::BTreeMap, marker::PhantomData}; use anyhow::bail; use async_trait::*; use ed25519_dalek::Signer; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use crate::blockdata::{Action, MainBlock, SendInfo}; use crate::crypto::{hash, sign, verify_sig, Hash, HashCode, Signature}; use crate::hashlookup::HashLookup; use crate::hex_path::{bytes_to_path, HexPath}; use crate::queries::{lookup_account, lookup_data_in_account}; /// An typed account data field. #[derive(Serialize, Deserialize, Debug)] pub struct TypedDataField<T> { /// The path of the field in account data. pub path: HexPath, /// Phantom data for the type `T`. phantom: PhantomData<T>, } impl<T> Clone for TypedDataField<T> { fn clone(&self) -> Self { Self { path: self.path.clone(), phantom: PhantomData, } } }
pub fn from_path(path: HexPath) -> TypedDataField<T> { TypedDataField { path, phantom: PhantomData, } } } /// Account balance field. pub fn field_balance() -> TypedDataField<u128> { TypedDataField::from_path(bytes_to_path(b"balance")) } /// Account stake field. pub fn field_stake() -> TypedDataField<u128> { TypedDataField::from_path(bytes_to_path(b"stake")) } /// Account public key field. pub fn field_public_key() -> TypedDataField<ed25519_dalek::PublicKey> { TypedDataField::from_path(bytes_to_path(b"public_key")) } /// Field for a `SendInfo` stored in the sender's data. pub fn field_send(send: Hash<SendInfo>) -> TypedDataField<SendInfo> { let mut path = bytes_to_path(b"send"); path.0.extend(&bytes_to_path(&send.code).0); TypedDataField::from_path(path) } /// Field for tracking whether a `SendInfo` has been received in the receiver's /// data. pub fn field_received(send: Hash<SendInfo>) -> TypedDataField<bool> { let mut path = bytes_to_path(b"received"); path.0.extend(&bytes_to_path(&send.code).0); TypedDataField::from_path(path) } /// A context providing operations related to transforming an account (e.g. /// running actions). pub struct AccountTransform<'a, HL: HashLookup> { /// The `HashLookup` used to look up previous account data. pub hl: &'a HL, /// Whether this account is initializing. pub is_initializing: bool, /// The account being transformed. pub this_account: HashCode, /// The hash code of the last main block. pub last_main: Hash<MainBlock>, /// Which fields have been overwritten so far, and their most recent values. pub fields_set: BTreeMap<HexPath, Vec<u8>>, } #[async_trait] impl<'a, HL: HashLookup> HashLookup for AccountTransform<'a, HL> { async fn lookup_bytes(&self, hash: HashCode) -> Result<Vec<u8>, anyhow::Error> { self.hl.lookup_bytes(hash).await } } impl<'a, HL: HashLookup> AccountTransform<'a, HL> { /// Creates a new `AccountTransform`. pub fn new( hl: &'a HL, is_initializing: bool, this_account: HashCode, last_main: Hash<MainBlock>, ) -> AccountTransform<'a, HL> { AccountTransform { hl, is_initializing, this_account, last_main, fields_set: BTreeMap::new(), } } /// Gets the value of a given data field. async fn get_data_field_bytes( &self, acct: HashCode, field_name: &HexPath, ) -> Result<Option<Vec<u8>>, anyhow::Error> { if acct == self.this_account { match self.fields_set.get(field_name) { Some(x) => { return Ok(Some(x.clone())); } None => {} } } let main = self.lookup(self.last_main).await?; if let Some(acct_node) = lookup_account(self, &main.block.body, self.this_account).await? { lookup_data_in_account(self, &acct_node, field_name).await } else { Ok(None) } } /// Sets the value of a given data field. fn set_data_field_bytes( &mut self, field_name: &HexPath, value: Vec<u8>, ) -> Result<(), anyhow::Error> { self.fields_set.insert(field_name.clone(), value); Ok(()) } /// Gets the value of a given typed data field. async fn get_data_field<T: DeserializeOwned>( &self, acct: HashCode, field: &TypedDataField<T>, ) -> Result<Option<T>, anyhow::Error> { match self.get_data_field_bytes(acct, &field.path).await? { None => Ok(None), Some(bs) => Ok(Some(rmp_serde::from_read(bs.as_slice())?)), } } /// Gets the value of a given typed data field, throwing an error if it is not found. pub async fn get_data_field_or_error<T: DeserializeOwned>( &self, acct: HashCode, field: &TypedDataField<T>, ) -> Result<T, anyhow::Error> { match self.get_data_field(acct, field).await? { None => bail!("data field not found: {:?}", field.path), Some(x) => Ok(x), } } /// Sets the value of a given typed data field. fn set_data_field<T: Serialize>( &mut self, field: &TypedDataField<T>, value: &T, ) -> Result<(), anyhow::Error> { self.set_data_field_bytes(&field.path, rmp_serde::to_vec_named(value)?) } } /// Causes the current account to pay a fee. async fn pay_fee<'a, HL: HashLookup>( at: &mut AccountTransform<'a, HL>, fee: u128, ) -> Result<(), anyhow::Error> { let bal = at .get_data_field_or_error(at.this_account, &field_balance()) .await?; if bal < fee { bail!("not enough balance for fee"); } at.set_data_field(&field_balance(), &(bal - fee)) } /// Causes the current account to send. async fn do_send<'a, HL: HashLookup>( at: &mut AccountTransform<'a, HL>, send: &SendInfo, ) -> Result<(), anyhow::Error> { if send.sender!= at.this_account { bail!("sender must be sent by this account"); } if send.last_main!= at.last_main { bail!("last main of send must be the current last main"); } let bal = at .get_data_field_or_error(at.this_account, &field_balance()) .await?; if bal < send.send_amount { bail!("not enough balance for send"); } let send_df = field_send(hash(send)); if at .get_data_field(at.this_account, &send_df) .await? .is_some() { bail!("that was already sent"); } at.set_data_field(&field_balance(), &(bal - send.send_amount))?; at.set_data_field(&send_df, send)?; Ok(()) } /// Causes the current account to receive. async fn do_receive<'a, HL: HashLookup>( at: &mut AccountTransform<'a, HL>, sender: HashCode, send_hash: Hash<SendInfo>, ) -> Result<SendInfo, anyhow::Error> { let send = at .get_data_field_or_error(sender, &field_send(send_hash)) .await?; if hash(&send)!= send_hash { bail!("send hashes don't match"); } if send.recipient!= at.this_account { bail!("recipient of send doesn't match recipient"); } let received_field = field_received(send_hash); let already_received = at.get_data_field(at.this_account, &received_field).await?; if already_received == Some(true) { bail!("tried to receive the same send twice"); } let bal = at .get_data_field_or_error(at.this_account, &field_balance()) .await?; at.set_data_field(&field_balance(), &(bal + send.send_amount))?; at.set_data_field(&received_field, &true)?; Ok(send) } /// Gets an argument out of action arguments. fn get_arg<T: DeserializeOwned>(args: &Vec<Vec<u8>>, i: usize) -> Result<T, anyhow::Error> { if i >= args.len() { bail!("too few arguments"); } Ok(rmp_serde::from_read(args[i].as_slice())?) } /// Verifies that the argument at a given index is a signature of a modified /// version of the action where the signature itself is replaced with /// an empty vector, and also that the signature's account matches the /// given account. fn verify_signature_argument( acct: HashCode, action: &Action, i: usize, ) -> Result<(), anyhow::Error> { let sig: Signature<Action> = get_arg(&action.args, i)?; if sig.account()!= acct { bail!("signature account must equal current account"); } let mut act2 = action.clone(); act2.args[i] = Vec::new(); if!verify_sig(&act2, &sig) { bail!("invalid signature"); } Ok(()) } /// Runs an action in a given `AccountTransform` context. pub async fn run_action<'a, HL: HashLookup>( at: &mut AccountTransform<'a, HL>, action: &Action, ) -> Result<(), anyhow::Error> { if at.last_main!= action.last_main { bail!("action last main must equal current last main"); } if action.command == b"send" { if at.is_initializing { bail!("send can't initialize an account"); } let recipient: HashCode = get_arg(&action.args, 0)?; let send_amount: u128 = get_arg(&action.args, 1)?; let initialize_spec: Option<Hash<Vec<u8>>> = get_arg(&action.args, 2)?; let message: Vec<u8> = get_arg(&action.args, 3)?; verify_signature_argument(at.this_account, action, 4)?; pay_fee(at, action.fee).await?; let send = SendInfo { last_main: action.last_main, sender: at.this_account, recipient, send_amount, initialize_spec, message, }; do_send(at, &send).await?; } else if action.command == b"receive" { let sender: HashCode = get_arg(&action.args, 0)?; let send_hash: Hash<SendInfo> = get_arg(&action.args, 1)?; let sig: Signature<Action> = get_arg(&action.args, 2)?; verify_signature_argument(at.this_account, action, 2)?; if at.is_initializing { at.set_data_field(&field_balance(), &0)?; at.set_data_field(&field_stake(), &0)?; at.set_data_field(&field_public_key(), &sig.key)?; } do_receive(at, sender, send_hash).await?; pay_fee(at, action.fee).await?; } else { bail!("unknown command {:?}", action.command); } Ok(()) } /// Creates a send action. pub fn mk_send( last_main: Hash<MainBlock>, fee: u128, recipient: HashCode, send_amount: u128, initialize_spec: Option<Hash<Vec<u8>>>, message: Vec<u8>, key: &ed25519_dalek::Keypair, ) -> (Action, SendInfo) { let mut act = Action { last_main, fee, command: b"send".to_vec(), args: vec![ rmp_serde::to_vec_named(&recipient).unwrap(), rmp_serde::to_vec_named(&send_amount).unwrap(), rmp_serde::to_vec_named(&initialize_spec).unwrap(), rmp_serde::to_vec_named(&message).unwrap(), vec![], ], }; act.args[4] = rmp_serde::to_vec_named(&sign(&key, &act)).unwrap(); let si = SendInfo { last_main, sender: hash(&key.public).code, recipient, send_amount, initialize_spec, message, }; (act, si) } /// Creates a receive action. pub fn mk_receive( last_main: Hash<MainBlock>, fee: u128, sender: HashCode, send_hash: Hash<SendInfo>, key: ed25519_dalek::Keypair, ) -> Action { let mut act = Action { last_main, fee, command: b"receive".to_vec(), args: vec![ rmp_serde::to_vec_named(&sender).unwrap(), rmp_serde::to_vec_named(&send_hash).unwrap(), vec![], ], }; act.args[2] = rmp_serde::to_vec_named(&sign(&key, &act)).unwrap(); act } #[cfg(test)] mod tests { use super::*; use crate::crypto; #[test] fn verify_send() { let last_main = Hash::<MainBlock> { code: [0; 32], phantom: PhantomData, }; let fee: u128 = 5; let recipient: HashCode = [0; 32]; let send_amount: u128 = 25; let init_spec: Option<Hash<Vec<u8>>> = None; let msg: Vec<u8> = vec![]; let key = crypto::gen_private_key(); let (act, si) = mk_send(last_main, fee, recipient, send_amount, init_spec, msg, &key); let res = verify_signature_argument(si.sender, &act, 4); assert!(res.is_ok(), "got error: {}", res.unwrap_err()); } }
impl<T> TypedDataField<T> { /// Creates a `TypedDataField` given a path.
random_line_split
account_transform.rs
//! Functionality for modifying accounts according to actions. use std::{collections::BTreeMap, marker::PhantomData}; use anyhow::bail; use async_trait::*; use ed25519_dalek::Signer; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use crate::blockdata::{Action, MainBlock, SendInfo}; use crate::crypto::{hash, sign, verify_sig, Hash, HashCode, Signature}; use crate::hashlookup::HashLookup; use crate::hex_path::{bytes_to_path, HexPath}; use crate::queries::{lookup_account, lookup_data_in_account}; /// An typed account data field. #[derive(Serialize, Deserialize, Debug)] pub struct TypedDataField<T> { /// The path of the field in account data. pub path: HexPath, /// Phantom data for the type `T`. phantom: PhantomData<T>, } impl<T> Clone for TypedDataField<T> { fn clone(&self) -> Self { Self { path: self.path.clone(), phantom: PhantomData, } } } impl<T> TypedDataField<T> { /// Creates a `TypedDataField` given a path. pub fn from_path(path: HexPath) -> TypedDataField<T> { TypedDataField { path, phantom: PhantomData, } } } /// Account balance field. pub fn field_balance() -> TypedDataField<u128> { TypedDataField::from_path(bytes_to_path(b"balance")) } /// Account stake field. pub fn field_stake() -> TypedDataField<u128> { TypedDataField::from_path(bytes_to_path(b"stake")) } /// Account public key field. pub fn field_public_key() -> TypedDataField<ed25519_dalek::PublicKey> { TypedDataField::from_path(bytes_to_path(b"public_key")) } /// Field for a `SendInfo` stored in the sender's data. pub fn field_send(send: Hash<SendInfo>) -> TypedDataField<SendInfo> { let mut path = bytes_to_path(b"send"); path.0.extend(&bytes_to_path(&send.code).0); TypedDataField::from_path(path) } /// Field for tracking whether a `SendInfo` has been received in the receiver's /// data. pub fn field_received(send: Hash<SendInfo>) -> TypedDataField<bool>
/// A context providing operations related to transforming an account (e.g. /// running actions). pub struct AccountTransform<'a, HL: HashLookup> { /// The `HashLookup` used to look up previous account data. pub hl: &'a HL, /// Whether this account is initializing. pub is_initializing: bool, /// The account being transformed. pub this_account: HashCode, /// The hash code of the last main block. pub last_main: Hash<MainBlock>, /// Which fields have been overwritten so far, and their most recent values. pub fields_set: BTreeMap<HexPath, Vec<u8>>, } #[async_trait] impl<'a, HL: HashLookup> HashLookup for AccountTransform<'a, HL> { async fn lookup_bytes(&self, hash: HashCode) -> Result<Vec<u8>, anyhow::Error> { self.hl.lookup_bytes(hash).await } } impl<'a, HL: HashLookup> AccountTransform<'a, HL> { /// Creates a new `AccountTransform`. pub fn new( hl: &'a HL, is_initializing: bool, this_account: HashCode, last_main: Hash<MainBlock>, ) -> AccountTransform<'a, HL> { AccountTransform { hl, is_initializing, this_account, last_main, fields_set: BTreeMap::new(), } } /// Gets the value of a given data field. async fn get_data_field_bytes( &self, acct: HashCode, field_name: &HexPath, ) -> Result<Option<Vec<u8>>, anyhow::Error> { if acct == self.this_account { match self.fields_set.get(field_name) { Some(x) => { return Ok(Some(x.clone())); } None => {} } } let main = self.lookup(self.last_main).await?; if let Some(acct_node) = lookup_account(self, &main.block.body, self.this_account).await? { lookup_data_in_account(self, &acct_node, field_name).await } else { Ok(None) } } /// Sets the value of a given data field. fn set_data_field_bytes( &mut self, field_name: &HexPath, value: Vec<u8>, ) -> Result<(), anyhow::Error> { self.fields_set.insert(field_name.clone(), value); Ok(()) } /// Gets the value of a given typed data field. async fn get_data_field<T: DeserializeOwned>( &self, acct: HashCode, field: &TypedDataField<T>, ) -> Result<Option<T>, anyhow::Error> { match self.get_data_field_bytes(acct, &field.path).await? { None => Ok(None), Some(bs) => Ok(Some(rmp_serde::from_read(bs.as_slice())?)), } } /// Gets the value of a given typed data field, throwing an error if it is not found. pub async fn get_data_field_or_error<T: DeserializeOwned>( &self, acct: HashCode, field: &TypedDataField<T>, ) -> Result<T, anyhow::Error> { match self.get_data_field(acct, field).await? { None => bail!("data field not found: {:?}", field.path), Some(x) => Ok(x), } } /// Sets the value of a given typed data field. fn set_data_field<T: Serialize>( &mut self, field: &TypedDataField<T>, value: &T, ) -> Result<(), anyhow::Error> { self.set_data_field_bytes(&field.path, rmp_serde::to_vec_named(value)?) } } /// Causes the current account to pay a fee. async fn pay_fee<'a, HL: HashLookup>( at: &mut AccountTransform<'a, HL>, fee: u128, ) -> Result<(), anyhow::Error> { let bal = at .get_data_field_or_error(at.this_account, &field_balance()) .await?; if bal < fee { bail!("not enough balance for fee"); } at.set_data_field(&field_balance(), &(bal - fee)) } /// Causes the current account to send. async fn do_send<'a, HL: HashLookup>( at: &mut AccountTransform<'a, HL>, send: &SendInfo, ) -> Result<(), anyhow::Error> { if send.sender!= at.this_account { bail!("sender must be sent by this account"); } if send.last_main!= at.last_main { bail!("last main of send must be the current last main"); } let bal = at .get_data_field_or_error(at.this_account, &field_balance()) .await?; if bal < send.send_amount { bail!("not enough balance for send"); } let send_df = field_send(hash(send)); if at .get_data_field(at.this_account, &send_df) .await? .is_some() { bail!("that was already sent"); } at.set_data_field(&field_balance(), &(bal - send.send_amount))?; at.set_data_field(&send_df, send)?; Ok(()) } /// Causes the current account to receive. async fn do_receive<'a, HL: HashLookup>( at: &mut AccountTransform<'a, HL>, sender: HashCode, send_hash: Hash<SendInfo>, ) -> Result<SendInfo, anyhow::Error> { let send = at .get_data_field_or_error(sender, &field_send(send_hash)) .await?; if hash(&send)!= send_hash { bail!("send hashes don't match"); } if send.recipient!= at.this_account { bail!("recipient of send doesn't match recipient"); } let received_field = field_received(send_hash); let already_received = at.get_data_field(at.this_account, &received_field).await?; if already_received == Some(true) { bail!("tried to receive the same send twice"); } let bal = at .get_data_field_or_error(at.this_account, &field_balance()) .await?; at.set_data_field(&field_balance(), &(bal + send.send_amount))?; at.set_data_field(&received_field, &true)?; Ok(send) } /// Gets an argument out of action arguments. fn get_arg<T: DeserializeOwned>(args: &Vec<Vec<u8>>, i: usize) -> Result<T, anyhow::Error> { if i >= args.len() { bail!("too few arguments"); } Ok(rmp_serde::from_read(args[i].as_slice())?) } /// Verifies that the argument at a given index is a signature of a modified /// version of the action where the signature itself is replaced with /// an empty vector, and also that the signature's account matches the /// given account. fn verify_signature_argument( acct: HashCode, action: &Action, i: usize, ) -> Result<(), anyhow::Error> { let sig: Signature<Action> = get_arg(&action.args, i)?; if sig.account()!= acct { bail!("signature account must equal current account"); } let mut act2 = action.clone(); act2.args[i] = Vec::new(); if!verify_sig(&act2, &sig) { bail!("invalid signature"); } Ok(()) } /// Runs an action in a given `AccountTransform` context. pub async fn run_action<'a, HL: HashLookup>( at: &mut AccountTransform<'a, HL>, action: &Action, ) -> Result<(), anyhow::Error> { if at.last_main!= action.last_main { bail!("action last main must equal current last main"); } if action.command == b"send" { if at.is_initializing { bail!("send can't initialize an account"); } let recipient: HashCode = get_arg(&action.args, 0)?; let send_amount: u128 = get_arg(&action.args, 1)?; let initialize_spec: Option<Hash<Vec<u8>>> = get_arg(&action.args, 2)?; let message: Vec<u8> = get_arg(&action.args, 3)?; verify_signature_argument(at.this_account, action, 4)?; pay_fee(at, action.fee).await?; let send = SendInfo { last_main: action.last_main, sender: at.this_account, recipient, send_amount, initialize_spec, message, }; do_send(at, &send).await?; } else if action.command == b"receive" { let sender: HashCode = get_arg(&action.args, 0)?; let send_hash: Hash<SendInfo> = get_arg(&action.args, 1)?; let sig: Signature<Action> = get_arg(&action.args, 2)?; verify_signature_argument(at.this_account, action, 2)?; if at.is_initializing { at.set_data_field(&field_balance(), &0)?; at.set_data_field(&field_stake(), &0)?; at.set_data_field(&field_public_key(), &sig.key)?; } do_receive(at, sender, send_hash).await?; pay_fee(at, action.fee).await?; } else { bail!("unknown command {:?}", action.command); } Ok(()) } /// Creates a send action. pub fn mk_send( last_main: Hash<MainBlock>, fee: u128, recipient: HashCode, send_amount: u128, initialize_spec: Option<Hash<Vec<u8>>>, message: Vec<u8>, key: &ed25519_dalek::Keypair, ) -> (Action, SendInfo) { let mut act = Action { last_main, fee, command: b"send".to_vec(), args: vec![ rmp_serde::to_vec_named(&recipient).unwrap(), rmp_serde::to_vec_named(&send_amount).unwrap(), rmp_serde::to_vec_named(&initialize_spec).unwrap(), rmp_serde::to_vec_named(&message).unwrap(), vec![], ], }; act.args[4] = rmp_serde::to_vec_named(&sign(&key, &act)).unwrap(); let si = SendInfo { last_main, sender: hash(&key.public).code, recipient, send_amount, initialize_spec, message, }; (act, si) } /// Creates a receive action. pub fn mk_receive( last_main: Hash<MainBlock>, fee: u128, sender: HashCode, send_hash: Hash<SendInfo>, key: ed25519_dalek::Keypair, ) -> Action { let mut act = Action { last_main, fee, command: b"receive".to_vec(), args: vec![ rmp_serde::to_vec_named(&sender).unwrap(), rmp_serde::to_vec_named(&send_hash).unwrap(), vec![], ], }; act.args[2] = rmp_serde::to_vec_named(&sign(&key, &act)).unwrap(); act } #[cfg(test)] mod tests { use super::*; use crate::crypto; #[test] fn verify_send() { let last_main = Hash::<MainBlock> { code: [0; 32], phantom: PhantomData, }; let fee: u128 = 5; let recipient: HashCode = [0; 32]; let send_amount: u128 = 25; let init_spec: Option<Hash<Vec<u8>>> = None; let msg: Vec<u8> = vec![]; let key = crypto::gen_private_key(); let (act, si) = mk_send(last_main, fee, recipient, send_amount, init_spec, msg, &key); let res = verify_signature_argument(si.sender, &act, 4); assert!(res.is_ok(), "got error: {}", res.unwrap_err()); } }
{ let mut path = bytes_to_path(b"received"); path.0.extend(&bytes_to_path(&send.code).0); TypedDataField::from_path(path) }
identifier_body
account_transform.rs
//! Functionality for modifying accounts according to actions. use std::{collections::BTreeMap, marker::PhantomData}; use anyhow::bail; use async_trait::*; use ed25519_dalek::Signer; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use crate::blockdata::{Action, MainBlock, SendInfo}; use crate::crypto::{hash, sign, verify_sig, Hash, HashCode, Signature}; use crate::hashlookup::HashLookup; use crate::hex_path::{bytes_to_path, HexPath}; use crate::queries::{lookup_account, lookup_data_in_account}; /// An typed account data field. #[derive(Serialize, Deserialize, Debug)] pub struct TypedDataField<T> { /// The path of the field in account data. pub path: HexPath, /// Phantom data for the type `T`. phantom: PhantomData<T>, } impl<T> Clone for TypedDataField<T> { fn clone(&self) -> Self { Self { path: self.path.clone(), phantom: PhantomData, } } } impl<T> TypedDataField<T> { /// Creates a `TypedDataField` given a path. pub fn from_path(path: HexPath) -> TypedDataField<T> { TypedDataField { path, phantom: PhantomData, } } } /// Account balance field. pub fn field_balance() -> TypedDataField<u128> { TypedDataField::from_path(bytes_to_path(b"balance")) } /// Account stake field. pub fn field_stake() -> TypedDataField<u128> { TypedDataField::from_path(bytes_to_path(b"stake")) } /// Account public key field. pub fn field_public_key() -> TypedDataField<ed25519_dalek::PublicKey> { TypedDataField::from_path(bytes_to_path(b"public_key")) } /// Field for a `SendInfo` stored in the sender's data. pub fn field_send(send: Hash<SendInfo>) -> TypedDataField<SendInfo> { let mut path = bytes_to_path(b"send"); path.0.extend(&bytes_to_path(&send.code).0); TypedDataField::from_path(path) } /// Field for tracking whether a `SendInfo` has been received in the receiver's /// data. pub fn field_received(send: Hash<SendInfo>) -> TypedDataField<bool> { let mut path = bytes_to_path(b"received"); path.0.extend(&bytes_to_path(&send.code).0); TypedDataField::from_path(path) } /// A context providing operations related to transforming an account (e.g. /// running actions). pub struct
<'a, HL: HashLookup> { /// The `HashLookup` used to look up previous account data. pub hl: &'a HL, /// Whether this account is initializing. pub is_initializing: bool, /// The account being transformed. pub this_account: HashCode, /// The hash code of the last main block. pub last_main: Hash<MainBlock>, /// Which fields have been overwritten so far, and their most recent values. pub fields_set: BTreeMap<HexPath, Vec<u8>>, } #[async_trait] impl<'a, HL: HashLookup> HashLookup for AccountTransform<'a, HL> { async fn lookup_bytes(&self, hash: HashCode) -> Result<Vec<u8>, anyhow::Error> { self.hl.lookup_bytes(hash).await } } impl<'a, HL: HashLookup> AccountTransform<'a, HL> { /// Creates a new `AccountTransform`. pub fn new( hl: &'a HL, is_initializing: bool, this_account: HashCode, last_main: Hash<MainBlock>, ) -> AccountTransform<'a, HL> { AccountTransform { hl, is_initializing, this_account, last_main, fields_set: BTreeMap::new(), } } /// Gets the value of a given data field. async fn get_data_field_bytes( &self, acct: HashCode, field_name: &HexPath, ) -> Result<Option<Vec<u8>>, anyhow::Error> { if acct == self.this_account { match self.fields_set.get(field_name) { Some(x) => { return Ok(Some(x.clone())); } None => {} } } let main = self.lookup(self.last_main).await?; if let Some(acct_node) = lookup_account(self, &main.block.body, self.this_account).await? { lookup_data_in_account(self, &acct_node, field_name).await } else { Ok(None) } } /// Sets the value of a given data field. fn set_data_field_bytes( &mut self, field_name: &HexPath, value: Vec<u8>, ) -> Result<(), anyhow::Error> { self.fields_set.insert(field_name.clone(), value); Ok(()) } /// Gets the value of a given typed data field. async fn get_data_field<T: DeserializeOwned>( &self, acct: HashCode, field: &TypedDataField<T>, ) -> Result<Option<T>, anyhow::Error> { match self.get_data_field_bytes(acct, &field.path).await? { None => Ok(None), Some(bs) => Ok(Some(rmp_serde::from_read(bs.as_slice())?)), } } /// Gets the value of a given typed data field, throwing an error if it is not found. pub async fn get_data_field_or_error<T: DeserializeOwned>( &self, acct: HashCode, field: &TypedDataField<T>, ) -> Result<T, anyhow::Error> { match self.get_data_field(acct, field).await? { None => bail!("data field not found: {:?}", field.path), Some(x) => Ok(x), } } /// Sets the value of a given typed data field. fn set_data_field<T: Serialize>( &mut self, field: &TypedDataField<T>, value: &T, ) -> Result<(), anyhow::Error> { self.set_data_field_bytes(&field.path, rmp_serde::to_vec_named(value)?) } } /// Causes the current account to pay a fee. async fn pay_fee<'a, HL: HashLookup>( at: &mut AccountTransform<'a, HL>, fee: u128, ) -> Result<(), anyhow::Error> { let bal = at .get_data_field_or_error(at.this_account, &field_balance()) .await?; if bal < fee { bail!("not enough balance for fee"); } at.set_data_field(&field_balance(), &(bal - fee)) } /// Causes the current account to send. async fn do_send<'a, HL: HashLookup>( at: &mut AccountTransform<'a, HL>, send: &SendInfo, ) -> Result<(), anyhow::Error> { if send.sender!= at.this_account { bail!("sender must be sent by this account"); } if send.last_main!= at.last_main { bail!("last main of send must be the current last main"); } let bal = at .get_data_field_or_error(at.this_account, &field_balance()) .await?; if bal < send.send_amount { bail!("not enough balance for send"); } let send_df = field_send(hash(send)); if at .get_data_field(at.this_account, &send_df) .await? .is_some() { bail!("that was already sent"); } at.set_data_field(&field_balance(), &(bal - send.send_amount))?; at.set_data_field(&send_df, send)?; Ok(()) } /// Causes the current account to receive. async fn do_receive<'a, HL: HashLookup>( at: &mut AccountTransform<'a, HL>, sender: HashCode, send_hash: Hash<SendInfo>, ) -> Result<SendInfo, anyhow::Error> { let send = at .get_data_field_or_error(sender, &field_send(send_hash)) .await?; if hash(&send)!= send_hash { bail!("send hashes don't match"); } if send.recipient!= at.this_account { bail!("recipient of send doesn't match recipient"); } let received_field = field_received(send_hash); let already_received = at.get_data_field(at.this_account, &received_field).await?; if already_received == Some(true) { bail!("tried to receive the same send twice"); } let bal = at .get_data_field_or_error(at.this_account, &field_balance()) .await?; at.set_data_field(&field_balance(), &(bal + send.send_amount))?; at.set_data_field(&received_field, &true)?; Ok(send) } /// Gets an argument out of action arguments. fn get_arg<T: DeserializeOwned>(args: &Vec<Vec<u8>>, i: usize) -> Result<T, anyhow::Error> { if i >= args.len() { bail!("too few arguments"); } Ok(rmp_serde::from_read(args[i].as_slice())?) } /// Verifies that the argument at a given index is a signature of a modified /// version of the action where the signature itself is replaced with /// an empty vector, and also that the signature's account matches the /// given account. fn verify_signature_argument( acct: HashCode, action: &Action, i: usize, ) -> Result<(), anyhow::Error> { let sig: Signature<Action> = get_arg(&action.args, i)?; if sig.account()!= acct { bail!("signature account must equal current account"); } let mut act2 = action.clone(); act2.args[i] = Vec::new(); if!verify_sig(&act2, &sig) { bail!("invalid signature"); } Ok(()) } /// Runs an action in a given `AccountTransform` context. pub async fn run_action<'a, HL: HashLookup>( at: &mut AccountTransform<'a, HL>, action: &Action, ) -> Result<(), anyhow::Error> { if at.last_main!= action.last_main { bail!("action last main must equal current last main"); } if action.command == b"send" { if at.is_initializing { bail!("send can't initialize an account"); } let recipient: HashCode = get_arg(&action.args, 0)?; let send_amount: u128 = get_arg(&action.args, 1)?; let initialize_spec: Option<Hash<Vec<u8>>> = get_arg(&action.args, 2)?; let message: Vec<u8> = get_arg(&action.args, 3)?; verify_signature_argument(at.this_account, action, 4)?; pay_fee(at, action.fee).await?; let send = SendInfo { last_main: action.last_main, sender: at.this_account, recipient, send_amount, initialize_spec, message, }; do_send(at, &send).await?; } else if action.command == b"receive" { let sender: HashCode = get_arg(&action.args, 0)?; let send_hash: Hash<SendInfo> = get_arg(&action.args, 1)?; let sig: Signature<Action> = get_arg(&action.args, 2)?; verify_signature_argument(at.this_account, action, 2)?; if at.is_initializing { at.set_data_field(&field_balance(), &0)?; at.set_data_field(&field_stake(), &0)?; at.set_data_field(&field_public_key(), &sig.key)?; } do_receive(at, sender, send_hash).await?; pay_fee(at, action.fee).await?; } else { bail!("unknown command {:?}", action.command); } Ok(()) } /// Creates a send action. pub fn mk_send( last_main: Hash<MainBlock>, fee: u128, recipient: HashCode, send_amount: u128, initialize_spec: Option<Hash<Vec<u8>>>, message: Vec<u8>, key: &ed25519_dalek::Keypair, ) -> (Action, SendInfo) { let mut act = Action { last_main, fee, command: b"send".to_vec(), args: vec![ rmp_serde::to_vec_named(&recipient).unwrap(), rmp_serde::to_vec_named(&send_amount).unwrap(), rmp_serde::to_vec_named(&initialize_spec).unwrap(), rmp_serde::to_vec_named(&message).unwrap(), vec![], ], }; act.args[4] = rmp_serde::to_vec_named(&sign(&key, &act)).unwrap(); let si = SendInfo { last_main, sender: hash(&key.public).code, recipient, send_amount, initialize_spec, message, }; (act, si) } /// Creates a receive action. pub fn mk_receive( last_main: Hash<MainBlock>, fee: u128, sender: HashCode, send_hash: Hash<SendInfo>, key: ed25519_dalek::Keypair, ) -> Action { let mut act = Action { last_main, fee, command: b"receive".to_vec(), args: vec![ rmp_serde::to_vec_named(&sender).unwrap(), rmp_serde::to_vec_named(&send_hash).unwrap(), vec![], ], }; act.args[2] = rmp_serde::to_vec_named(&sign(&key, &act)).unwrap(); act } #[cfg(test)] mod tests { use super::*; use crate::crypto; #[test] fn verify_send() { let last_main = Hash::<MainBlock> { code: [0; 32], phantom: PhantomData, }; let fee: u128 = 5; let recipient: HashCode = [0; 32]; let send_amount: u128 = 25; let init_spec: Option<Hash<Vec<u8>>> = None; let msg: Vec<u8> = vec![]; let key = crypto::gen_private_key(); let (act, si) = mk_send(last_main, fee, recipient, send_amount, init_spec, msg, &key); let res = verify_signature_argument(si.sender, &act, 4); assert!(res.is_ok(), "got error: {}", res.unwrap_err()); } }
AccountTransform
identifier_name
server.rs
// Copyright Materialize, Inc. and contributors. All rights reserved. // // Use of this software is governed by the Business Source License // included in the LICENSE file. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0. //! An interactive cluster server. use std::fmt::{Debug, Formatter}; use std::sync::{Arc, Mutex}; use anyhow::{anyhow, Error}; use async_trait::async_trait; use futures::future; use mz_cluster_client::client::{ClusterStartupEpoch, TimelyConfig}; use mz_ore::cast::CastFrom; use mz_ore::error::ErrorExt; use mz_ore::halt; use mz_ore::metrics::MetricsRegistry; use mz_ore::tracing::TracingHandle; use mz_persist_client::cache::PersistClientCache; use mz_service::client::{GenericClient, Partitionable, Partitioned}; use mz_service::local::LocalClient; use timely::communication::initialize::WorkerGuards; use timely::execute::execute_from; use timely::WorkerConfig; use tokio::runtime::Handle; use tokio::sync::mpsc; use tracing::{info, warn}; use crate::communication::initialize_networking; type PartitionedClient<C, R, A> = Partitioned<LocalClient<C, R, A>, C, R>; /// Configures a cluster server. #[derive(Debug)] pub struct ClusterConfig { /// Metrics registry through which dataflow metrics will be reported. pub metrics_registry: MetricsRegistry, /// `persist` client cache. pub persist_clients: Arc<PersistClientCache>, /// A process-global handle to tracing configuration. pub tracing_handle: Arc<TracingHandle>, } /// A client managing access to the local portion of a Timely cluster pub struct ClusterClient<Client, Worker, C, R> where Worker: crate::types::AsRunnableWorker<C, R>, { /// The actual client to talk to the cluster inner: Option<Client>, /// The running timely instance timely_container: TimelyContainerRef<C, R, Worker::Activatable>, /// Handle to the persist infrastructure. persist_clients: Arc<PersistClientCache>, /// The handle to the Tokio runtime. tokio_handle: tokio::runtime::Handle, /// A process-global handle to tracing configuration. tracing_handle: Arc<TracingHandle>, worker: Worker, } /// Metadata about timely workers in this process. pub struct TimelyContainer<C, R, A> { /// The current timely config in use config: TimelyConfig, /// Channels over which to send endpoints for wiring up a new Client client_txs: Vec< crossbeam_channel::Sender<( crossbeam_channel::Receiver<C>, mpsc::UnboundedSender<R>, crossbeam_channel::Sender<A>, )>, >, /// Thread guards that keep worker threads alive _worker_guards: WorkerGuards<()>, } /// Threadsafe reference to an optional TimelyContainer pub type TimelyContainerRef<C, R, A> = Arc<tokio::sync::Mutex<Option<TimelyContainer<C, R, A>>>>; /// Initiates a timely dataflow computation, processing cluster commands. pub fn serve<Worker, C, R>( config: ClusterConfig, worker_config: Worker, ) -> Result< ( TimelyContainerRef<C, R, Worker::Activatable>, impl Fn() -> Box<ClusterClient<PartitionedClient<C, R, Worker::Activatable>, Worker, C, R>>, ), Error, > where C: Send +'static, R: Send +'static, (C, R): Partitionable<C, R>, Worker: crate::types::AsRunnableWorker<C, R> + Clone + Send + Sync +'static, { let tokio_executor = tokio::runtime::Handle::current(); let timely_container = Arc::new(tokio::sync::Mutex::new(None)); let client_builder = { let timely_container = Arc::clone(&timely_container); move || { let worker_config = worker_config.clone(); let client = ClusterClient::new( Arc::clone(&timely_container), Arc::clone(&config.persist_clients), tokio_executor.clone(), Arc::clone(&config.tracing_handle), worker_config, ); let client = Box::new(client); client } }; Ok((timely_container, client_builder)) } impl<Worker, C, R> ClusterClient<PartitionedClient<C, R, Worker::Activatable>, Worker, C, R> where C: Send +'static, R: Send +'static, (C, R): Partitionable<C, R>, Worker: crate::types::AsRunnableWorker<C, R> + Clone + Send + Sync +'static, { fn new( timely_container: TimelyContainerRef<C, R, Worker::Activatable>, persist_clients: Arc<PersistClientCache>, tokio_handle: tokio::runtime::Handle, tracing_handle: Arc<TracingHandle>, worker_config: Worker, ) -> Self { Self { timely_container, inner: None, persist_clients, tokio_handle, tracing_handle,
async fn build_timely( user_worker_config: Worker, config: TimelyConfig, epoch: ClusterStartupEpoch, persist_clients: Arc<PersistClientCache>, tracing_handle: Arc<TracingHandle>, tokio_executor: Handle, ) -> Result<TimelyContainer<C, R, Worker::Activatable>, Error> { info!("Building timely container with config {config:?}"); let (client_txs, client_rxs): (Vec<_>, Vec<_>) = (0..config.workers) .map(|_| crossbeam_channel::unbounded()) .unzip(); let client_rxs: Mutex<Vec<_>> = Mutex::new(client_rxs.into_iter().map(Some).collect()); let (builders, other) = initialize_networking( config.workers, config.process, config.addresses.clone(), epoch, ) .await?; let mut worker_config = WorkerConfig::default(); differential_dataflow::configure( &mut worker_config, &differential_dataflow::Config { idle_merge_effort: Some(isize::cast_from(config.idle_arrangement_merge_effort)), }, ); let worker_guards = execute_from(builders, other, worker_config, move |timely_worker| { let timely_worker_index = timely_worker.index(); let _tokio_guard = tokio_executor.enter(); let client_rx = client_rxs.lock().unwrap()[timely_worker_index % config.workers] .take() .unwrap(); let persist_clients = Arc::clone(&persist_clients); let user_worker_config = user_worker_config.clone(); let tracing_handle = Arc::clone(&tracing_handle); Worker::build_and_run( user_worker_config, timely_worker, client_rx, persist_clients, tracing_handle, ) }) .map_err(|e| anyhow!("{e}"))?; Ok(TimelyContainer { config, client_txs, _worker_guards: worker_guards, }) } async fn build( &mut self, config: TimelyConfig, epoch: ClusterStartupEpoch, ) -> Result<(), Error> { let workers = config.workers; // Check if we can reuse the existing timely instance. // We currently do not support reinstantiating timely, we simply panic if another config is // requested. This code must panic before dropping the worker guards contained in // timely_container. As we don't terminate timely workers, the thread join would hang // forever, possibly creating a fair share of confusion in the orchestrator. let persist_clients = Arc::clone(&self.persist_clients); let handle = self.tokio_handle.clone(); let tracing_handle = Arc::clone(&self.tracing_handle); let worker_config = self.worker.clone(); let mut timely_lock = self.timely_container.lock().await; let timely = match timely_lock.take() { Some(existing) => { if config!= existing.config { halt!( "new timely configuration does not match existing timely configuration:\n{:?}\nvs\n{:?}", config, existing.config, ); } info!("Timely already initialized; re-using.",); existing } None => { let build_timely_result = Self::build_timely( worker_config, config, epoch, persist_clients, tracing_handle, handle, ) .await; match build_timely_result { Err(e) => { warn!("timely initialization failed: {}", e.display_with_causes()); return Err(e); } Ok(ok) => ok, } } }; let (command_txs, command_rxs): (Vec<_>, Vec<_>) = (0..workers).map(|_| crossbeam_channel::unbounded()).unzip(); let (response_txs, response_rxs): (Vec<_>, Vec<_>) = (0..workers).map(|_| mpsc::unbounded_channel()).unzip(); let activators = timely .client_txs .iter() .zip(command_rxs) .zip(response_txs) .map(|((client_tx, cmd_rx), resp_tx)| { let (activator_tx, activator_rx) = crossbeam_channel::unbounded(); client_tx .send((cmd_rx, resp_tx, activator_tx)) .expect("worker should not drop first"); activator_rx.recv().unwrap() }) .collect(); *timely_lock = Some(timely); self.inner = Some(LocalClient::new_partitioned( response_rxs, command_txs, activators, )); Ok(()) } } impl<Client: Debug, Worker: crate::types::AsRunnableWorker<C, R>, C, R> Debug for ClusterClient<Client, Worker, C, R> { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_struct("ClusterClient") .field("persist_clients", &self.persist_clients) .field("inner", &self.inner) .finish_non_exhaustive() } } #[async_trait] impl<Worker, C, R> GenericClient<C, R> for ClusterClient<PartitionedClient<C, R, Worker::Activatable>, Worker, C, R> where C: Send + Debug + crate::types::TryIntoTimelyConfig +'static, R: Send + Debug +'static, (C, R): Partitionable<C, R>, Worker: crate::types::AsRunnableWorker<C, R> + Send + Sync + Clone +'static, Worker::Activatable: Send + Sync +'static + Debug, { async fn send(&mut self, cmd: C) -> Result<(), Error> { // Changing this debug statement requires changing the replica-isolation test tracing::debug!("ClusterClient send={:?}", &cmd); match cmd.try_into_timely_config() { Ok((config, epoch)) => self.build(config, epoch).await, Err(cmd) => self.inner.as_mut().expect("initialized").send(cmd).await, } } async fn recv(&mut self) -> Result<Option<R>, Error> { if let Some(client) = self.inner.as_mut() { client.recv().await } else { future::pending().await } } }
worker: worker_config, } }
random_line_split
server.rs
// Copyright Materialize, Inc. and contributors. All rights reserved. // // Use of this software is governed by the Business Source License // included in the LICENSE file. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0. //! An interactive cluster server. use std::fmt::{Debug, Formatter}; use std::sync::{Arc, Mutex}; use anyhow::{anyhow, Error}; use async_trait::async_trait; use futures::future; use mz_cluster_client::client::{ClusterStartupEpoch, TimelyConfig}; use mz_ore::cast::CastFrom; use mz_ore::error::ErrorExt; use mz_ore::halt; use mz_ore::metrics::MetricsRegistry; use mz_ore::tracing::TracingHandle; use mz_persist_client::cache::PersistClientCache; use mz_service::client::{GenericClient, Partitionable, Partitioned}; use mz_service::local::LocalClient; use timely::communication::initialize::WorkerGuards; use timely::execute::execute_from; use timely::WorkerConfig; use tokio::runtime::Handle; use tokio::sync::mpsc; use tracing::{info, warn}; use crate::communication::initialize_networking; type PartitionedClient<C, R, A> = Partitioned<LocalClient<C, R, A>, C, R>; /// Configures a cluster server. #[derive(Debug)] pub struct ClusterConfig { /// Metrics registry through which dataflow metrics will be reported. pub metrics_registry: MetricsRegistry, /// `persist` client cache. pub persist_clients: Arc<PersistClientCache>, /// A process-global handle to tracing configuration. pub tracing_handle: Arc<TracingHandle>, } /// A client managing access to the local portion of a Timely cluster pub struct ClusterClient<Client, Worker, C, R> where Worker: crate::types::AsRunnableWorker<C, R>, { /// The actual client to talk to the cluster inner: Option<Client>, /// The running timely instance timely_container: TimelyContainerRef<C, R, Worker::Activatable>, /// Handle to the persist infrastructure. persist_clients: Arc<PersistClientCache>, /// The handle to the Tokio runtime. tokio_handle: tokio::runtime::Handle, /// A process-global handle to tracing configuration. tracing_handle: Arc<TracingHandle>, worker: Worker, } /// Metadata about timely workers in this process. pub struct TimelyContainer<C, R, A> { /// The current timely config in use config: TimelyConfig, /// Channels over which to send endpoints for wiring up a new Client client_txs: Vec< crossbeam_channel::Sender<( crossbeam_channel::Receiver<C>, mpsc::UnboundedSender<R>, crossbeam_channel::Sender<A>, )>, >, /// Thread guards that keep worker threads alive _worker_guards: WorkerGuards<()>, } /// Threadsafe reference to an optional TimelyContainer pub type TimelyContainerRef<C, R, A> = Arc<tokio::sync::Mutex<Option<TimelyContainer<C, R, A>>>>; /// Initiates a timely dataflow computation, processing cluster commands. pub fn
<Worker, C, R>( config: ClusterConfig, worker_config: Worker, ) -> Result< ( TimelyContainerRef<C, R, Worker::Activatable>, impl Fn() -> Box<ClusterClient<PartitionedClient<C, R, Worker::Activatable>, Worker, C, R>>, ), Error, > where C: Send +'static, R: Send +'static, (C, R): Partitionable<C, R>, Worker: crate::types::AsRunnableWorker<C, R> + Clone + Send + Sync +'static, { let tokio_executor = tokio::runtime::Handle::current(); let timely_container = Arc::new(tokio::sync::Mutex::new(None)); let client_builder = { let timely_container = Arc::clone(&timely_container); move || { let worker_config = worker_config.clone(); let client = ClusterClient::new( Arc::clone(&timely_container), Arc::clone(&config.persist_clients), tokio_executor.clone(), Arc::clone(&config.tracing_handle), worker_config, ); let client = Box::new(client); client } }; Ok((timely_container, client_builder)) } impl<Worker, C, R> ClusterClient<PartitionedClient<C, R, Worker::Activatable>, Worker, C, R> where C: Send +'static, R: Send +'static, (C, R): Partitionable<C, R>, Worker: crate::types::AsRunnableWorker<C, R> + Clone + Send + Sync +'static, { fn new( timely_container: TimelyContainerRef<C, R, Worker::Activatable>, persist_clients: Arc<PersistClientCache>, tokio_handle: tokio::runtime::Handle, tracing_handle: Arc<TracingHandle>, worker_config: Worker, ) -> Self { Self { timely_container, inner: None, persist_clients, tokio_handle, tracing_handle, worker: worker_config, } } async fn build_timely( user_worker_config: Worker, config: TimelyConfig, epoch: ClusterStartupEpoch, persist_clients: Arc<PersistClientCache>, tracing_handle: Arc<TracingHandle>, tokio_executor: Handle, ) -> Result<TimelyContainer<C, R, Worker::Activatable>, Error> { info!("Building timely container with config {config:?}"); let (client_txs, client_rxs): (Vec<_>, Vec<_>) = (0..config.workers) .map(|_| crossbeam_channel::unbounded()) .unzip(); let client_rxs: Mutex<Vec<_>> = Mutex::new(client_rxs.into_iter().map(Some).collect()); let (builders, other) = initialize_networking( config.workers, config.process, config.addresses.clone(), epoch, ) .await?; let mut worker_config = WorkerConfig::default(); differential_dataflow::configure( &mut worker_config, &differential_dataflow::Config { idle_merge_effort: Some(isize::cast_from(config.idle_arrangement_merge_effort)), }, ); let worker_guards = execute_from(builders, other, worker_config, move |timely_worker| { let timely_worker_index = timely_worker.index(); let _tokio_guard = tokio_executor.enter(); let client_rx = client_rxs.lock().unwrap()[timely_worker_index % config.workers] .take() .unwrap(); let persist_clients = Arc::clone(&persist_clients); let user_worker_config = user_worker_config.clone(); let tracing_handle = Arc::clone(&tracing_handle); Worker::build_and_run( user_worker_config, timely_worker, client_rx, persist_clients, tracing_handle, ) }) .map_err(|e| anyhow!("{e}"))?; Ok(TimelyContainer { config, client_txs, _worker_guards: worker_guards, }) } async fn build( &mut self, config: TimelyConfig, epoch: ClusterStartupEpoch, ) -> Result<(), Error> { let workers = config.workers; // Check if we can reuse the existing timely instance. // We currently do not support reinstantiating timely, we simply panic if another config is // requested. This code must panic before dropping the worker guards contained in // timely_container. As we don't terminate timely workers, the thread join would hang // forever, possibly creating a fair share of confusion in the orchestrator. let persist_clients = Arc::clone(&self.persist_clients); let handle = self.tokio_handle.clone(); let tracing_handle = Arc::clone(&self.tracing_handle); let worker_config = self.worker.clone(); let mut timely_lock = self.timely_container.lock().await; let timely = match timely_lock.take() { Some(existing) => { if config!= existing.config { halt!( "new timely configuration does not match existing timely configuration:\n{:?}\nvs\n{:?}", config, existing.config, ); } info!("Timely already initialized; re-using.",); existing } None => { let build_timely_result = Self::build_timely( worker_config, config, epoch, persist_clients, tracing_handle, handle, ) .await; match build_timely_result { Err(e) => { warn!("timely initialization failed: {}", e.display_with_causes()); return Err(e); } Ok(ok) => ok, } } }; let (command_txs, command_rxs): (Vec<_>, Vec<_>) = (0..workers).map(|_| crossbeam_channel::unbounded()).unzip(); let (response_txs, response_rxs): (Vec<_>, Vec<_>) = (0..workers).map(|_| mpsc::unbounded_channel()).unzip(); let activators = timely .client_txs .iter() .zip(command_rxs) .zip(response_txs) .map(|((client_tx, cmd_rx), resp_tx)| { let (activator_tx, activator_rx) = crossbeam_channel::unbounded(); client_tx .send((cmd_rx, resp_tx, activator_tx)) .expect("worker should not drop first"); activator_rx.recv().unwrap() }) .collect(); *timely_lock = Some(timely); self.inner = Some(LocalClient::new_partitioned( response_rxs, command_txs, activators, )); Ok(()) } } impl<Client: Debug, Worker: crate::types::AsRunnableWorker<C, R>, C, R> Debug for ClusterClient<Client, Worker, C, R> { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_struct("ClusterClient") .field("persist_clients", &self.persist_clients) .field("inner", &self.inner) .finish_non_exhaustive() } } #[async_trait] impl<Worker, C, R> GenericClient<C, R> for ClusterClient<PartitionedClient<C, R, Worker::Activatable>, Worker, C, R> where C: Send + Debug + crate::types::TryIntoTimelyConfig +'static, R: Send + Debug +'static, (C, R): Partitionable<C, R>, Worker: crate::types::AsRunnableWorker<C, R> + Send + Sync + Clone +'static, Worker::Activatable: Send + Sync +'static + Debug, { async fn send(&mut self, cmd: C) -> Result<(), Error> { // Changing this debug statement requires changing the replica-isolation test tracing::debug!("ClusterClient send={:?}", &cmd); match cmd.try_into_timely_config() { Ok((config, epoch)) => self.build(config, epoch).await, Err(cmd) => self.inner.as_mut().expect("initialized").send(cmd).await, } } async fn recv(&mut self) -> Result<Option<R>, Error> { if let Some(client) = self.inner.as_mut() { client.recv().await } else { future::pending().await } } }
serve
identifier_name
clipmap.rs
use crate::prelude::{ChunkKey, ChunkKey3, ChunkUnits, OctreeNode, OctreeSet, VisitStatus}; use building_blocks_core::prelude::*; #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct ClipMapConfig3 { /// The number of levels of detail. num_lods: u8, /// The radius (in chunks) of a clipbox at any level of detail. clip_box_radius: i32, /// The shape of every chunk, regardless of LOD. Note that while a chunk at a higher LOD takes up more world space, it has /// the same shape as chunks at lower levels, because the voxel size also changes. /// /// **WARNING**: As of now, chunks must be cubes. chunk_shape: Point3i, } impl ClipMapConfig3 { pub fn new(num_lods: u8, clip_box_radius: ChunkUnits<u16>, chunk_shape: Point3i) -> Self { assert!(clip_box_radius.0 >= 2); // Radius 1 doesn't work for any more than a single LOD, so why are you using a clipmap? assert!(chunk_shape.dimensions_are_powers_of_2()); Self { num_lods, clip_box_radius: clip_box_radius.0 as i32, chunk_shape, } } pub fn chunk_edge_length_log2(&self) -> i32 { assert!(self.chunk_shape.is_cube()); self.chunk_shape.x().trailing_zeros() as i32 } } /// Traverse `octree` to find the `ChunkKey3`s that are "active" when the clipmap is centered at `lod0_center`. `active_rx` /// is a callback that receives the chunk keys for active chunks. pub fn active_clipmap_lod_chunks( config: &ClipMapConfig3, octree: &OctreeSet, lod0_center: ChunkUnits<Point3i>, mut active_rx: impl FnMut(ChunkKey3), ) { let chunk_log2 = config.chunk_edge_length_log2(); let centers = all_lod_centers(lod0_center.0, config.num_lods); let high_lod_boundary = config.clip_box_radius >> 1; octree.visit_all_octants_in_preorder(&mut |node: &OctreeNode| { let octant = node.octant(); let lod = octant.exponent(); if lod >= config.num_lods { return VisitStatus::Continue; } let offset_from_center = get_offset_from_lod_center(octant, &centers); if lod == 0 || offset_from_center > high_lod_boundary { // This octant can be rendered at this level of detail. active_rx(octant_chunk_key(chunk_log2, octant)); VisitStatus::Stop } else { // This octant should be rendered with more detail. VisitStatus::Continue } }); } /// A notification that a chunk (at a particular level of detail) must be split or merged. This is usually the result of a /// camera movement. #[derive(Clone, Debug, Eq, PartialEq)] pub enum LodChunkUpdate<N> { Split(SplitChunk<N>), Merge(MergeChunks<N>), } /// A 3-dimensional `LodChunkUpdate`. pub type LodChunkUpdate3 = LodChunkUpdate<[i32; 3]>; /// Split `old_chunk` into many `new_chunks`. The number of new chunks depends on how many levels of detail the octant has /// moved. #[derive(Clone, Debug, Eq, PartialEq)] pub struct SplitChunk<N> { pub old_chunk: ChunkKey<N>, pub new_chunks: Vec<ChunkKey<N>>, } /// Merge many `old_chunks` into `new_chunk`. The number of old chunks depends on how many levels of detail the octant has /// moved. #[derive(Clone, Debug, Eq, PartialEq)] pub struct MergeChunks<N> { pub old_chunks: Vec<ChunkKey<N>>, pub new_chunk: ChunkKey<N>, } /// A transient object used for running the `find_chunk_updates` method on multiple octrees. pub struct ClipMapUpdate3 { chunk_log2: i32, num_lods: u8, low_lod_boundary: i32, high_lod_boundary: i32, old_centers: Vec<Point3i>, new_centers: Vec<Point3i>, } impl ClipMapUpdate3 { /// Prepare to run the `find_chunk_updates` method after the clipmap center has moved from `old_lod0_center` to /// `new_lod0_center`. pub fn new( config: &ClipMapConfig3, old_lod0_center: ChunkUnits<Point3i>, new_lod0_center: ChunkUnits<Point3i>, ) -> Self { Self { chunk_log2: config.chunk_shape.x().trailing_zeros() as i32, num_lods: config.num_lods, low_lod_boundary: config.clip_box_radius, high_lod_boundary: config.clip_box_radius >> 1, old_centers: all_lod_centers(old_lod0_center.0, config.num_lods), new_centers: all_lod_centers(new_lod0_center.0, config.num_lods), } } /// Traverse `octree` and find all chunks that need to be split or merged based on the movement of the center of the /// clipmap. pub fn find_chunk_updates( &self, octree: &OctreeSet, mut update_rx: impl FnMut(LodChunkUpdate3), ) { octree.visit_all_octants_in_preorder(&mut |node: &OctreeNode| { let octant = node.octant(); let lod = octant.exponent(); if lod >= self.num_lods || lod == 0 { return VisitStatus::Continue; } let old_offset_from_center = get_offset_from_lod_center(octant, &self.old_centers); let offset_from_center = get_offset_from_lod_center(octant, &self.new_centers); if old_offset_from_center > self.high_lod_boundary && offset_from_center <= self.high_lod_boundary { // Increase the detail for this octant. // Create the higher detail in descendant octants. let old_chunk = octant_chunk_key(self.chunk_log2, octant); let new_chunks = find_merge_or_split_descendants( self.chunk_log2, octree, node, &self.new_centers, self.high_lod_boundary, ); update_rx(LodChunkUpdate::Split(SplitChunk { old_chunk, new_chunks, })); VisitStatus::Stop } else if offset_from_center > self.high_lod_boundary && old_offset_from_center <= self.high_lod_boundary { // Decrease the detail for this octant. // Delete the higher detail in descendant octants. let new_chunk = octant_chunk_key(self.chunk_log2, octant); let old_chunks = find_merge_or_split_descendants( self.chunk_log2, octree, node, &self.old_centers, self.high_lod_boundary, ); update_rx(LodChunkUpdate::Merge(MergeChunks { old_chunks, new_chunk, })); VisitStatus::Stop } else if offset_from_center > self.low_lod_boundary && old_offset_from_center > self.low_lod_boundary { VisitStatus::Stop } else { VisitStatus::Continue } }); } } fn all_lod_centers(lod0_center: Point3i, num_lods: u8) -> Vec<Point3i> { let mut centers = vec![lod0_center; num_lods as usize]; for i in 1..num_lods as usize { centers[i] = centers[i - 1] >> 1; } centers } fn find_merge_or_split_descendants( chunk_log2: i32, octree: &OctreeSet, node: &OctreeNode, centers: &[Point3i], high_lod_boundary: i32, ) -> Vec<ChunkKey3> { let mut matching_chunks = Vec::with_capacity(8); node.visit_all_octants_in_preorder(octree, &mut |node: &OctreeNode| { let lod = node.octant().exponent(); let old_offset_from_center = get_offset_from_lod_center(node.octant(), centers); if lod == 0 || old_offset_from_center > high_lod_boundary { matching_chunks.push(octant_chunk_key(chunk_log2, node.octant())); VisitStatus::Stop } else { VisitStatus::Continue } }); matching_chunks } fn get_offset_from_lod_center(octant: &Octant, centers: &[Point3i]) -> i32 { let lod = octant.exponent(); let lod_p = octant.minimum() >> lod; let lod_center = centers[lod as usize]; (lod_p - lod_center) // For calculating offsets from the clipmap center, we need to bias any nonnegative components to make voxel coordinates // symmetric about the center. // // Voxel Coordinates // // -3 -2 -1 0 1 2 3 // <--|---|---|---|---|---|---|--> // // Clipmap Coordinates // // -3 -2 -1 1 2 3 // <--|---|---|---|---|---|---|--> .map_components_unary(|c| if c >= 0 { c + 1 } else
) .abs() .max_component() } fn octant_chunk_key(chunk_log2: i32, octant: &Octant) -> ChunkKey3 { let lod = octant.exponent(); ChunkKey { lod, minimum: (octant.minimum() << chunk_log2) >> lod, } } // ████████╗███████╗███████╗████████╗ // ╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝ // ██║ █████╗ ███████╗ ██║ // ██║ ██╔══╝ ╚════██║ ██║ // ██║ ███████╗███████║ ██║ // ╚═╝ ╚══════╝╚══════╝ ╚═╝ #[cfg(test)] mod test { use crate::dev_prelude::{ChunkUnits, SmallKeyHashSet}; use super::*; use itertools::Itertools; use std::iter::FromIterator; #[test] fn active_chunks_in_lod0_and_lod1() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let lod0_center = ChunkUnits(Point3i::ZERO); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let mut octree = OctreeSet::new_empty(domain); let filled_extent = Extent3i::from_min_and_shape(Point3i::fill(-4), Point3i::fill(8)); octree.add_extent(&filled_extent); let active_chunks = ActiveChunks::new(&config, &octree, lod0_center); let lod0_set = Extent3i::from_min_and_shape(Point3i::fill(-2), Point3i::fill(4)) .iter_points() .map(|p| ChunkKey { minimum: p * CHUNK_SHAPE, lod: 0, }); let mut lod1_set = OctreeSet::new_empty(domain); lod1_set.add_extent(&Extent3i::from_min_and_shape( Point3i::fill(-2), Point3i::fill(4), )); lod1_set.subtract_extent(&Extent3i::from_min_and_shape( Point3i::fill(-1), Point3i::fill(2), )); let lod1_set = lod1_set.collect_all_points().into_iter().map(|p| ChunkKey { minimum: p * CHUNK_SHAPE, lod: 1, }); let expected_keys = SmallKeyHashSet::from_iter(lod0_set.chain(lod1_set)); assert_eq!(active_chunks.keys, expected_keys); } #[test] fn no_updates_when_center_does_not_move() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let octree = OctreeSet::new_full(domain); let centers = [ [0, 0, 0], [2, 0, 0], [-2, 0, 0], [0, 2, 0], [0, -2, 0], [0, 0, 2], [0, 0, -2], ]; for p in centers.iter().cloned() { let center = ChunkUnits(PointN(p)); ClipMapUpdate3::new(&config, center, center) .find_chunk_updates(&octree, |_update| panic!("Fail")); } } #[test] fn updates_are_consistent_with_active_chunks() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let octree = OctreeSet::new_full(domain); validate_update_path( &config, &octree, &[ [0, 0, 0], [0, 0, 0], [1, 0, 0], [0, 0, 0], [-1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 0], [0, -1, 0], [0, 0, 0], [0, 0, 1], [0, 0, 0], [0, 0, -1], ], ); } fn validate_update_path(config: &ClipMapConfig3, octree: &OctreeSet, path: &[[i32; 3]]) { let mut active_chunks = ActiveChunks::new(&config, &octree, ChunkUnits(PointN(path[0]))); for (p1, p2) in path.iter().cloned().tuple_windows() { let old_lod0_center = ChunkUnits(PointN(p1)); let new_lod0_center = ChunkUnits(PointN(p2)); ClipMapUpdate3::new(config, old_lod0_center, new_lod0_center) .find_chunk_updates(octree, |update| active_chunks.apply_update(update)); // We should end up with the same result from moving the clipmap as we do just constructing it from scratch at the // new location. assert_eq!( active_chunks, ActiveChunks::new(config, octree, new_lod0_center), "Failed on edge: {:?} --> {:?}", p1, p2 ); } } /// This just stores the state of active chunks so that we can compare a known correct "active set" with one that has been /// modified via any number of calls to `apply_update`. #[derive(Debug, Eq, PartialEq)] struct ActiveChunks { keys: SmallKeyHashSet<ChunkKey3>, } impl ActiveChunks { fn new( config: &ClipMapConfig3, octree: &OctreeSet, lod0_center: ChunkUnits<Point3i>, ) -> Self { let mut keys = SmallKeyHashSet::new(); active_clipmap_lod_chunks(&config, &octree, lod0_center, |key| { keys.insert(key); }); Self { keys } } fn apply_update(&mut self, update: LodChunkUpdate3) { match update { LodChunkUpdate::Merge(MergeChunks { old_chunks, new_chunk, }) => { for chunk in old_chunks.into_iter() { self.keys.remove(&chunk); } assert!(!self.keys.contains(&new_chunk)); self.keys.insert(new_chunk); } LodChunkUpdate::Split(SplitChunk { old_chunk, new_chunks, }) => { self.keys.remove(&old_chunk); for chunk in new_chunks.into_iter() { assert!(!self.keys.contains(&chunk)); self.keys.insert(chunk); } } } } } const CHUNK_SHAPE: Point3i = PointN([16; 3]); const NUM_LODS: u8 = 2; const CLIP_BOX_RADIUS: u16 = 2; }
{ c }
conditional_block
clipmap.rs
use crate::prelude::{ChunkKey, ChunkKey3, ChunkUnits, OctreeNode, OctreeSet, VisitStatus}; use building_blocks_core::prelude::*; #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct ClipMapConfig3 { /// The number of levels of detail. num_lods: u8, /// The radius (in chunks) of a clipbox at any level of detail. clip_box_radius: i32, /// The shape of every chunk, regardless of LOD. Note that while a chunk at a higher LOD takes up more world space, it has /// the same shape as chunks at lower levels, because the voxel size also changes. /// /// **WARNING**: As of now, chunks must be cubes. chunk_shape: Point3i, } impl ClipMapConfig3 { pub fn new(num_lods: u8, clip_box_radius: ChunkUnits<u16>, chunk_shape: Point3i) -> Self { assert!(clip_box_radius.0 >= 2); // Radius 1 doesn't work for any more than a single LOD, so why are you using a clipmap? assert!(chunk_shape.dimensions_are_powers_of_2()); Self { num_lods, clip_box_radius: clip_box_radius.0 as i32, chunk_shape, } } pub fn chunk_edge_length_log2(&self) -> i32 { assert!(self.chunk_shape.is_cube()); self.chunk_shape.x().trailing_zeros() as i32 } } /// Traverse `octree` to find the `ChunkKey3`s that are "active" when the clipmap is centered at `lod0_center`. `active_rx` /// is a callback that receives the chunk keys for active chunks. pub fn active_clipmap_lod_chunks( config: &ClipMapConfig3, octree: &OctreeSet, lod0_center: ChunkUnits<Point3i>, mut active_rx: impl FnMut(ChunkKey3), ) { let chunk_log2 = config.chunk_edge_length_log2(); let centers = all_lod_centers(lod0_center.0, config.num_lods); let high_lod_boundary = config.clip_box_radius >> 1; octree.visit_all_octants_in_preorder(&mut |node: &OctreeNode| { let octant = node.octant(); let lod = octant.exponent(); if lod >= config.num_lods { return VisitStatus::Continue; } let offset_from_center = get_offset_from_lod_center(octant, &centers); if lod == 0 || offset_from_center > high_lod_boundary { // This octant can be rendered at this level of detail. active_rx(octant_chunk_key(chunk_log2, octant)); VisitStatus::Stop } else { // This octant should be rendered with more detail. VisitStatus::Continue } }); } /// A notification that a chunk (at a particular level of detail) must be split or merged. This is usually the result of a /// camera movement. #[derive(Clone, Debug, Eq, PartialEq)] pub enum LodChunkUpdate<N> { Split(SplitChunk<N>), Merge(MergeChunks<N>), } /// A 3-dimensional `LodChunkUpdate`. pub type LodChunkUpdate3 = LodChunkUpdate<[i32; 3]>; /// Split `old_chunk` into many `new_chunks`. The number of new chunks depends on how many levels of detail the octant has /// moved. #[derive(Clone, Debug, Eq, PartialEq)] pub struct SplitChunk<N> { pub old_chunk: ChunkKey<N>, pub new_chunks: Vec<ChunkKey<N>>, } /// Merge many `old_chunks` into `new_chunk`. The number of old chunks depends on how many levels of detail the octant has /// moved. #[derive(Clone, Debug, Eq, PartialEq)] pub struct MergeChunks<N> { pub old_chunks: Vec<ChunkKey<N>>, pub new_chunk: ChunkKey<N>, } /// A transient object used for running the `find_chunk_updates` method on multiple octrees. pub struct ClipMapUpdate3 { chunk_log2: i32, num_lods: u8, low_lod_boundary: i32, high_lod_boundary: i32, old_centers: Vec<Point3i>, new_centers: Vec<Point3i>, } impl ClipMapUpdate3 { /// Prepare to run the `find_chunk_updates` method after the clipmap center has moved from `old_lod0_center` to /// `new_lod0_center`. pub fn new( config: &ClipMapConfig3, old_lod0_center: ChunkUnits<Point3i>, new_lod0_center: ChunkUnits<Point3i>, ) -> Self { Self { chunk_log2: config.chunk_shape.x().trailing_zeros() as i32, num_lods: config.num_lods, low_lod_boundary: config.clip_box_radius, high_lod_boundary: config.clip_box_radius >> 1, old_centers: all_lod_centers(old_lod0_center.0, config.num_lods), new_centers: all_lod_centers(new_lod0_center.0, config.num_lods), } } /// Traverse `octree` and find all chunks that need to be split or merged based on the movement of the center of the /// clipmap. pub fn
( &self, octree: &OctreeSet, mut update_rx: impl FnMut(LodChunkUpdate3), ) { octree.visit_all_octants_in_preorder(&mut |node: &OctreeNode| { let octant = node.octant(); let lod = octant.exponent(); if lod >= self.num_lods || lod == 0 { return VisitStatus::Continue; } let old_offset_from_center = get_offset_from_lod_center(octant, &self.old_centers); let offset_from_center = get_offset_from_lod_center(octant, &self.new_centers); if old_offset_from_center > self.high_lod_boundary && offset_from_center <= self.high_lod_boundary { // Increase the detail for this octant. // Create the higher detail in descendant octants. let old_chunk = octant_chunk_key(self.chunk_log2, octant); let new_chunks = find_merge_or_split_descendants( self.chunk_log2, octree, node, &self.new_centers, self.high_lod_boundary, ); update_rx(LodChunkUpdate::Split(SplitChunk { old_chunk, new_chunks, })); VisitStatus::Stop } else if offset_from_center > self.high_lod_boundary && old_offset_from_center <= self.high_lod_boundary { // Decrease the detail for this octant. // Delete the higher detail in descendant octants. let new_chunk = octant_chunk_key(self.chunk_log2, octant); let old_chunks = find_merge_or_split_descendants( self.chunk_log2, octree, node, &self.old_centers, self.high_lod_boundary, ); update_rx(LodChunkUpdate::Merge(MergeChunks { old_chunks, new_chunk, })); VisitStatus::Stop } else if offset_from_center > self.low_lod_boundary && old_offset_from_center > self.low_lod_boundary { VisitStatus::Stop } else { VisitStatus::Continue } }); } } fn all_lod_centers(lod0_center: Point3i, num_lods: u8) -> Vec<Point3i> { let mut centers = vec![lod0_center; num_lods as usize]; for i in 1..num_lods as usize { centers[i] = centers[i - 1] >> 1; } centers } fn find_merge_or_split_descendants( chunk_log2: i32, octree: &OctreeSet, node: &OctreeNode, centers: &[Point3i], high_lod_boundary: i32, ) -> Vec<ChunkKey3> { let mut matching_chunks = Vec::with_capacity(8); node.visit_all_octants_in_preorder(octree, &mut |node: &OctreeNode| { let lod = node.octant().exponent(); let old_offset_from_center = get_offset_from_lod_center(node.octant(), centers); if lod == 0 || old_offset_from_center > high_lod_boundary { matching_chunks.push(octant_chunk_key(chunk_log2, node.octant())); VisitStatus::Stop } else { VisitStatus::Continue } }); matching_chunks } fn get_offset_from_lod_center(octant: &Octant, centers: &[Point3i]) -> i32 { let lod = octant.exponent(); let lod_p = octant.minimum() >> lod; let lod_center = centers[lod as usize]; (lod_p - lod_center) // For calculating offsets from the clipmap center, we need to bias any nonnegative components to make voxel coordinates // symmetric about the center. // // Voxel Coordinates // // -3 -2 -1 0 1 2 3 // <--|---|---|---|---|---|---|--> // // Clipmap Coordinates // // -3 -2 -1 1 2 3 // <--|---|---|---|---|---|---|--> .map_components_unary(|c| if c >= 0 { c + 1 } else { c }) .abs() .max_component() } fn octant_chunk_key(chunk_log2: i32, octant: &Octant) -> ChunkKey3 { let lod = octant.exponent(); ChunkKey { lod, minimum: (octant.minimum() << chunk_log2) >> lod, } } // ████████╗███████╗███████╗████████╗ // ╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝ // ██║ █████╗ ███████╗ ██║ // ██║ ██╔══╝ ╚════██║ ██║ // ██║ ███████╗███████║ ██║ // ╚═╝ ╚══════╝╚══════╝ ╚═╝ #[cfg(test)] mod test { use crate::dev_prelude::{ChunkUnits, SmallKeyHashSet}; use super::*; use itertools::Itertools; use std::iter::FromIterator; #[test] fn active_chunks_in_lod0_and_lod1() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let lod0_center = ChunkUnits(Point3i::ZERO); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let mut octree = OctreeSet::new_empty(domain); let filled_extent = Extent3i::from_min_and_shape(Point3i::fill(-4), Point3i::fill(8)); octree.add_extent(&filled_extent); let active_chunks = ActiveChunks::new(&config, &octree, lod0_center); let lod0_set = Extent3i::from_min_and_shape(Point3i::fill(-2), Point3i::fill(4)) .iter_points() .map(|p| ChunkKey { minimum: p * CHUNK_SHAPE, lod: 0, }); let mut lod1_set = OctreeSet::new_empty(domain); lod1_set.add_extent(&Extent3i::from_min_and_shape( Point3i::fill(-2), Point3i::fill(4), )); lod1_set.subtract_extent(&Extent3i::from_min_and_shape( Point3i::fill(-1), Point3i::fill(2), )); let lod1_set = lod1_set.collect_all_points().into_iter().map(|p| ChunkKey { minimum: p * CHUNK_SHAPE, lod: 1, }); let expected_keys = SmallKeyHashSet::from_iter(lod0_set.chain(lod1_set)); assert_eq!(active_chunks.keys, expected_keys); } #[test] fn no_updates_when_center_does_not_move() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let octree = OctreeSet::new_full(domain); let centers = [ [0, 0, 0], [2, 0, 0], [-2, 0, 0], [0, 2, 0], [0, -2, 0], [0, 0, 2], [0, 0, -2], ]; for p in centers.iter().cloned() { let center = ChunkUnits(PointN(p)); ClipMapUpdate3::new(&config, center, center) .find_chunk_updates(&octree, |_update| panic!("Fail")); } } #[test] fn updates_are_consistent_with_active_chunks() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let octree = OctreeSet::new_full(domain); validate_update_path( &config, &octree, &[ [0, 0, 0], [0, 0, 0], [1, 0, 0], [0, 0, 0], [-1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 0], [0, -1, 0], [0, 0, 0], [0, 0, 1], [0, 0, 0], [0, 0, -1], ], ); } fn validate_update_path(config: &ClipMapConfig3, octree: &OctreeSet, path: &[[i32; 3]]) { let mut active_chunks = ActiveChunks::new(&config, &octree, ChunkUnits(PointN(path[0]))); for (p1, p2) in path.iter().cloned().tuple_windows() { let old_lod0_center = ChunkUnits(PointN(p1)); let new_lod0_center = ChunkUnits(PointN(p2)); ClipMapUpdate3::new(config, old_lod0_center, new_lod0_center) .find_chunk_updates(octree, |update| active_chunks.apply_update(update)); // We should end up with the same result from moving the clipmap as we do just constructing it from scratch at the // new location. assert_eq!( active_chunks, ActiveChunks::new(config, octree, new_lod0_center), "Failed on edge: {:?} --> {:?}", p1, p2 ); } } /// This just stores the state of active chunks so that we can compare a known correct "active set" with one that has been /// modified via any number of calls to `apply_update`. #[derive(Debug, Eq, PartialEq)] struct ActiveChunks { keys: SmallKeyHashSet<ChunkKey3>, } impl ActiveChunks { fn new( config: &ClipMapConfig3, octree: &OctreeSet, lod0_center: ChunkUnits<Point3i>, ) -> Self { let mut keys = SmallKeyHashSet::new(); active_clipmap_lod_chunks(&config, &octree, lod0_center, |key| { keys.insert(key); }); Self { keys } } fn apply_update(&mut self, update: LodChunkUpdate3) { match update { LodChunkUpdate::Merge(MergeChunks { old_chunks, new_chunk, }) => { for chunk in old_chunks.into_iter() { self.keys.remove(&chunk); } assert!(!self.keys.contains(&new_chunk)); self.keys.insert(new_chunk); } LodChunkUpdate::Split(SplitChunk { old_chunk, new_chunks, }) => { self.keys.remove(&old_chunk); for chunk in new_chunks.into_iter() { assert!(!self.keys.contains(&chunk)); self.keys.insert(chunk); } } } } } const CHUNK_SHAPE: Point3i = PointN([16; 3]); const NUM_LODS: u8 = 2; const CLIP_BOX_RADIUS: u16 = 2; }
find_chunk_updates
identifier_name
clipmap.rs
use crate::prelude::{ChunkKey, ChunkKey3, ChunkUnits, OctreeNode, OctreeSet, VisitStatus}; use building_blocks_core::prelude::*; #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct ClipMapConfig3 { /// The number of levels of detail. num_lods: u8, /// The radius (in chunks) of a clipbox at any level of detail. clip_box_radius: i32, /// The shape of every chunk, regardless of LOD. Note that while a chunk at a higher LOD takes up more world space, it has /// the same shape as chunks at lower levels, because the voxel size also changes. /// /// **WARNING**: As of now, chunks must be cubes. chunk_shape: Point3i, } impl ClipMapConfig3 { pub fn new(num_lods: u8, clip_box_radius: ChunkUnits<u16>, chunk_shape: Point3i) -> Self { assert!(clip_box_radius.0 >= 2); // Radius 1 doesn't work for any more than a single LOD, so why are you using a clipmap? assert!(chunk_shape.dimensions_are_powers_of_2()); Self { num_lods, clip_box_radius: clip_box_radius.0 as i32, chunk_shape, } } pub fn chunk_edge_length_log2(&self) -> i32 { assert!(self.chunk_shape.is_cube()); self.chunk_shape.x().trailing_zeros() as i32 } } /// Traverse `octree` to find the `ChunkKey3`s that are "active" when the clipmap is centered at `lod0_center`. `active_rx` /// is a callback that receives the chunk keys for active chunks. pub fn active_clipmap_lod_chunks( config: &ClipMapConfig3, octree: &OctreeSet, lod0_center: ChunkUnits<Point3i>, mut active_rx: impl FnMut(ChunkKey3), ) { let chunk_log2 = config.chunk_edge_length_log2(); let centers = all_lod_centers(lod0_center.0, config.num_lods); let high_lod_boundary = config.clip_box_radius >> 1; octree.visit_all_octants_in_preorder(&mut |node: &OctreeNode| { let octant = node.octant(); let lod = octant.exponent(); if lod >= config.num_lods { return VisitStatus::Continue; } let offset_from_center = get_offset_from_lod_center(octant, &centers); if lod == 0 || offset_from_center > high_lod_boundary { // This octant can be rendered at this level of detail. active_rx(octant_chunk_key(chunk_log2, octant)); VisitStatus::Stop } else { // This octant should be rendered with more detail. VisitStatus::Continue } }); } /// A notification that a chunk (at a particular level of detail) must be split or merged. This is usually the result of a /// camera movement. #[derive(Clone, Debug, Eq, PartialEq)] pub enum LodChunkUpdate<N> { Split(SplitChunk<N>), Merge(MergeChunks<N>), } /// A 3-dimensional `LodChunkUpdate`. pub type LodChunkUpdate3 = LodChunkUpdate<[i32; 3]>; /// Split `old_chunk` into many `new_chunks`. The number of new chunks depends on how many levels of detail the octant has /// moved. #[derive(Clone, Debug, Eq, PartialEq)] pub struct SplitChunk<N> { pub old_chunk: ChunkKey<N>, pub new_chunks: Vec<ChunkKey<N>>, } /// Merge many `old_chunks` into `new_chunk`. The number of old chunks depends on how many levels of detail the octant has /// moved. #[derive(Clone, Debug, Eq, PartialEq)] pub struct MergeChunks<N> { pub old_chunks: Vec<ChunkKey<N>>, pub new_chunk: ChunkKey<N>, } /// A transient object used for running the `find_chunk_updates` method on multiple octrees. pub struct ClipMapUpdate3 { chunk_log2: i32, num_lods: u8, low_lod_boundary: i32, high_lod_boundary: i32, old_centers: Vec<Point3i>, new_centers: Vec<Point3i>, } impl ClipMapUpdate3 { /// Prepare to run the `find_chunk_updates` method after the clipmap center has moved from `old_lod0_center` to /// `new_lod0_center`. pub fn new( config: &ClipMapConfig3, old_lod0_center: ChunkUnits<Point3i>, new_lod0_center: ChunkUnits<Point3i>, ) -> Self { Self { chunk_log2: config.chunk_shape.x().trailing_zeros() as i32, num_lods: config.num_lods, low_lod_boundary: config.clip_box_radius, high_lod_boundary: config.clip_box_radius >> 1, old_centers: all_lod_centers(old_lod0_center.0, config.num_lods), new_centers: all_lod_centers(new_lod0_center.0, config.num_lods), } } /// Traverse `octree` and find all chunks that need to be split or merged based on the movement of the center of the /// clipmap. pub fn find_chunk_updates( &self, octree: &OctreeSet, mut update_rx: impl FnMut(LodChunkUpdate3), ) { octree.visit_all_octants_in_preorder(&mut |node: &OctreeNode| { let octant = node.octant(); let lod = octant.exponent(); if lod >= self.num_lods || lod == 0 { return VisitStatus::Continue; } let old_offset_from_center = get_offset_from_lod_center(octant, &self.old_centers); let offset_from_center = get_offset_from_lod_center(octant, &self.new_centers); if old_offset_from_center > self.high_lod_boundary && offset_from_center <= self.high_lod_boundary { // Increase the detail for this octant. // Create the higher detail in descendant octants. let old_chunk = octant_chunk_key(self.chunk_log2, octant); let new_chunks = find_merge_or_split_descendants( self.chunk_log2, octree, node, &self.new_centers, self.high_lod_boundary, ); update_rx(LodChunkUpdate::Split(SplitChunk { old_chunk, new_chunks, })); VisitStatus::Stop } else if offset_from_center > self.high_lod_boundary && old_offset_from_center <= self.high_lod_boundary { // Decrease the detail for this octant. // Delete the higher detail in descendant octants. let new_chunk = octant_chunk_key(self.chunk_log2, octant); let old_chunks = find_merge_or_split_descendants( self.chunk_log2, octree, node, &self.old_centers, self.high_lod_boundary, ); update_rx(LodChunkUpdate::Merge(MergeChunks { old_chunks, new_chunk, })); VisitStatus::Stop } else if offset_from_center > self.low_lod_boundary && old_offset_from_center > self.low_lod_boundary { VisitStatus::Stop } else { VisitStatus::Continue } }); } } fn all_lod_centers(lod0_center: Point3i, num_lods: u8) -> Vec<Point3i> { let mut centers = vec![lod0_center; num_lods as usize]; for i in 1..num_lods as usize { centers[i] = centers[i - 1] >> 1; } centers } fn find_merge_or_split_descendants( chunk_log2: i32, octree: &OctreeSet, node: &OctreeNode, centers: &[Point3i], high_lod_boundary: i32, ) -> Vec<ChunkKey3> { let mut matching_chunks = Vec::with_capacity(8); node.visit_all_octants_in_preorder(octree, &mut |node: &OctreeNode| { let lod = node.octant().exponent(); let old_offset_from_center = get_offset_from_lod_center(node.octant(), centers); if lod == 0 || old_offset_from_center > high_lod_boundary { matching_chunks.push(octant_chunk_key(chunk_log2, node.octant())); VisitStatus::Stop } else { VisitStatus::Continue } }); matching_chunks } fn get_offset_from_lod_center(octant: &Octant, centers: &[Point3i]) -> i32 { let lod = octant.exponent(); let lod_p = octant.minimum() >> lod; let lod_center = centers[lod as usize]; (lod_p - lod_center) // For calculating offsets from the clipmap center, we need to bias any nonnegative components to make voxel coordinates // symmetric about the center. // // Voxel Coordinates // // -3 -2 -1 0 1 2 3 // <--|---|---|---|---|---|---|--> // // Clipmap Coordinates // // -3 -2 -1 1 2 3 // <--|---|---|---|---|---|---|--> .map_components_unary(|c| if c >= 0 { c + 1 } else { c }) .abs() .max_component() } fn octant_chunk_key(chunk_log2: i32, octant: &Octant) -> ChunkKey3 { let lod = octant.exponent(); ChunkKey { lod, minimum: (octant.minimum() << chunk_log2) >> lod, } } // ████████╗███████╗███████╗████████╗ // ╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝ // ██║ █████╗ ███████╗ ██║ // ██║ ██╔══╝ ╚════██║ ██║ // ██║ ███████╗███████║ ██║ // ╚═╝ ╚══════╝╚══════╝ ╚═╝ #[cfg(test)] mod test { use crate::dev_prelude::{ChunkUnits, SmallKeyHashSet}; use super::*; use itertools::Itertools; use std::iter::FromIterator; #[test] fn active_chunks_in_lod0_and_lod1() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let lod0_center = ChunkUnits(Point3i::ZERO); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let mut octree = OctreeSet::new_empty(domain); let filled_extent = Extent3i::from_min_and_shape(Point3i::fill(-4), Point3i::fill(8)); octree.add_extent(&filled_extent); let active_chunks = ActiveChunks::new(&config, &octree, lod0_center); let lod0_set = Extent3i::from_min_and_shape(Point3i::fill(-2), Point3i::fill(4)) .iter_points() .map(|p| ChunkKey { minimum: p * CHUNK_SHAPE, lod: 0, }); let mut lod1_set = OctreeSet::new_empty(domain); lod1_set.add_extent(&Extent3i::from_min_and_shape( Point3i::fill(-2), Point3i::fill(4), )); lod1_set.subtract_extent(&Extent3i::from_min_and_shape( Point3i::fill(-1), Point3i::fill(2), )); let lod1_set = lod1_set.collect_all_points().into_iter().map(|p| ChunkKey { minimum: p * CHUNK_SHAPE, lod: 1, }); let expected_keys = SmallKeyHashSet::from_iter(lod0_set.chain(lod1_set)); assert_eq!(active_chunks.keys, expected_keys); } #[test] fn no_updates_when_center_does_not_move() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let octree = OctreeSet::new_full(domain); let centers = [ [0, 0, 0], [2, 0, 0], [-2, 0, 0], [0, 2, 0], [0, -2, 0], [0, 0, 2], [0, 0, -2], ]; for p in centers.iter().cloned() { let center = ChunkUnits(PointN(p)); ClipMapUpdate3::new(&config, center, center) .find_chunk_updates(&octree, |_update| panic!("Fail")); } } #[test] fn updates_are_consistent_with_active_chunks() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let octree = OctreeSet::new_full(domain); validate_update_path( &config, &octree, &[ [0, 0, 0], [0, 0, 0], [1, 0, 0], [0, 0, 0], [-1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 0], [0, -1, 0], [0, 0, 0], [0, 0, 1], [0, 0, 0], [0, 0, -1], ], ); } fn validate_update_path(config: &ClipMapConfig3, octree: &OctreeSet, path: &[[i32; 3]]) { let mut active_chunks = ActiveChunks::new(&config, &octree, ChunkUnits(PointN(path[0]))); for (p1, p2) in path.iter().cloned().tuple_windows() { let old_lod0_center = ChunkUnits(PointN(p1)); let new_lod0_center = ChunkUnits(PointN(p2));
// We should end up with the same result from moving the clipmap as we do just constructing it from scratch at the // new location. assert_eq!( active_chunks, ActiveChunks::new(config, octree, new_lod0_center), "Failed on edge: {:?} --> {:?}", p1, p2 ); } } /// This just stores the state of active chunks so that we can compare a known correct "active set" with one that has been /// modified via any number of calls to `apply_update`. #[derive(Debug, Eq, PartialEq)] struct ActiveChunks { keys: SmallKeyHashSet<ChunkKey3>, } impl ActiveChunks { fn new( config: &ClipMapConfig3, octree: &OctreeSet, lod0_center: ChunkUnits<Point3i>, ) -> Self { let mut keys = SmallKeyHashSet::new(); active_clipmap_lod_chunks(&config, &octree, lod0_center, |key| { keys.insert(key); }); Self { keys } } fn apply_update(&mut self, update: LodChunkUpdate3) { match update { LodChunkUpdate::Merge(MergeChunks { old_chunks, new_chunk, }) => { for chunk in old_chunks.into_iter() { self.keys.remove(&chunk); } assert!(!self.keys.contains(&new_chunk)); self.keys.insert(new_chunk); } LodChunkUpdate::Split(SplitChunk { old_chunk, new_chunks, }) => { self.keys.remove(&old_chunk); for chunk in new_chunks.into_iter() { assert!(!self.keys.contains(&chunk)); self.keys.insert(chunk); } } } } } const CHUNK_SHAPE: Point3i = PointN([16; 3]); const NUM_LODS: u8 = 2; const CLIP_BOX_RADIUS: u16 = 2; }
ClipMapUpdate3::new(config, old_lod0_center, new_lod0_center) .find_chunk_updates(octree, |update| active_chunks.apply_update(update));
random_line_split
clipmap.rs
use crate::prelude::{ChunkKey, ChunkKey3, ChunkUnits, OctreeNode, OctreeSet, VisitStatus}; use building_blocks_core::prelude::*; #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct ClipMapConfig3 { /// The number of levels of detail. num_lods: u8, /// The radius (in chunks) of a clipbox at any level of detail. clip_box_radius: i32, /// The shape of every chunk, regardless of LOD. Note that while a chunk at a higher LOD takes up more world space, it has /// the same shape as chunks at lower levels, because the voxel size also changes. /// /// **WARNING**: As of now, chunks must be cubes. chunk_shape: Point3i, } impl ClipMapConfig3 { pub fn new(num_lods: u8, clip_box_radius: ChunkUnits<u16>, chunk_shape: Point3i) -> Self { assert!(clip_box_radius.0 >= 2); // Radius 1 doesn't work for any more than a single LOD, so why are you using a clipmap? assert!(chunk_shape.dimensions_are_powers_of_2()); Self { num_lods, clip_box_radius: clip_box_radius.0 as i32, chunk_shape, } } pub fn chunk_edge_length_log2(&self) -> i32 { assert!(self.chunk_shape.is_cube()); self.chunk_shape.x().trailing_zeros() as i32 } } /// Traverse `octree` to find the `ChunkKey3`s that are "active" when the clipmap is centered at `lod0_center`. `active_rx` /// is a callback that receives the chunk keys for active chunks. pub fn active_clipmap_lod_chunks( config: &ClipMapConfig3, octree: &OctreeSet, lod0_center: ChunkUnits<Point3i>, mut active_rx: impl FnMut(ChunkKey3), ) { let chunk_log2 = config.chunk_edge_length_log2(); let centers = all_lod_centers(lod0_center.0, config.num_lods); let high_lod_boundary = config.clip_box_radius >> 1; octree.visit_all_octants_in_preorder(&mut |node: &OctreeNode| { let octant = node.octant(); let lod = octant.exponent(); if lod >= config.num_lods { return VisitStatus::Continue; } let offset_from_center = get_offset_from_lod_center(octant, &centers); if lod == 0 || offset_from_center > high_lod_boundary { // This octant can be rendered at this level of detail. active_rx(octant_chunk_key(chunk_log2, octant)); VisitStatus::Stop } else { // This octant should be rendered with more detail. VisitStatus::Continue } }); } /// A notification that a chunk (at a particular level of detail) must be split or merged. This is usually the result of a /// camera movement. #[derive(Clone, Debug, Eq, PartialEq)] pub enum LodChunkUpdate<N> { Split(SplitChunk<N>), Merge(MergeChunks<N>), } /// A 3-dimensional `LodChunkUpdate`. pub type LodChunkUpdate3 = LodChunkUpdate<[i32; 3]>; /// Split `old_chunk` into many `new_chunks`. The number of new chunks depends on how many levels of detail the octant has /// moved. #[derive(Clone, Debug, Eq, PartialEq)] pub struct SplitChunk<N> { pub old_chunk: ChunkKey<N>, pub new_chunks: Vec<ChunkKey<N>>, } /// Merge many `old_chunks` into `new_chunk`. The number of old chunks depends on how many levels of detail the octant has /// moved. #[derive(Clone, Debug, Eq, PartialEq)] pub struct MergeChunks<N> { pub old_chunks: Vec<ChunkKey<N>>, pub new_chunk: ChunkKey<N>, } /// A transient object used for running the `find_chunk_updates` method on multiple octrees. pub struct ClipMapUpdate3 { chunk_log2: i32, num_lods: u8, low_lod_boundary: i32, high_lod_boundary: i32, old_centers: Vec<Point3i>, new_centers: Vec<Point3i>, } impl ClipMapUpdate3 { /// Prepare to run the `find_chunk_updates` method after the clipmap center has moved from `old_lod0_center` to /// `new_lod0_center`. pub fn new( config: &ClipMapConfig3, old_lod0_center: ChunkUnits<Point3i>, new_lod0_center: ChunkUnits<Point3i>, ) -> Self { Self { chunk_log2: config.chunk_shape.x().trailing_zeros() as i32, num_lods: config.num_lods, low_lod_boundary: config.clip_box_radius, high_lod_boundary: config.clip_box_radius >> 1, old_centers: all_lod_centers(old_lod0_center.0, config.num_lods), new_centers: all_lod_centers(new_lod0_center.0, config.num_lods), } } /// Traverse `octree` and find all chunks that need to be split or merged based on the movement of the center of the /// clipmap. pub fn find_chunk_updates( &self, octree: &OctreeSet, mut update_rx: impl FnMut(LodChunkUpdate3), ) { octree.visit_all_octants_in_preorder(&mut |node: &OctreeNode| { let octant = node.octant(); let lod = octant.exponent(); if lod >= self.num_lods || lod == 0 { return VisitStatus::Continue; } let old_offset_from_center = get_offset_from_lod_center(octant, &self.old_centers); let offset_from_center = get_offset_from_lod_center(octant, &self.new_centers); if old_offset_from_center > self.high_lod_boundary && offset_from_center <= self.high_lod_boundary { // Increase the detail for this octant. // Create the higher detail in descendant octants. let old_chunk = octant_chunk_key(self.chunk_log2, octant); let new_chunks = find_merge_or_split_descendants( self.chunk_log2, octree, node, &self.new_centers, self.high_lod_boundary, ); update_rx(LodChunkUpdate::Split(SplitChunk { old_chunk, new_chunks, })); VisitStatus::Stop } else if offset_from_center > self.high_lod_boundary && old_offset_from_center <= self.high_lod_boundary { // Decrease the detail for this octant. // Delete the higher detail in descendant octants. let new_chunk = octant_chunk_key(self.chunk_log2, octant); let old_chunks = find_merge_or_split_descendants( self.chunk_log2, octree, node, &self.old_centers, self.high_lod_boundary, ); update_rx(LodChunkUpdate::Merge(MergeChunks { old_chunks, new_chunk, })); VisitStatus::Stop } else if offset_from_center > self.low_lod_boundary && old_offset_from_center > self.low_lod_boundary { VisitStatus::Stop } else { VisitStatus::Continue } }); } } fn all_lod_centers(lod0_center: Point3i, num_lods: u8) -> Vec<Point3i> { let mut centers = vec![lod0_center; num_lods as usize]; for i in 1..num_lods as usize { centers[i] = centers[i - 1] >> 1; } centers } fn find_merge_or_split_descendants( chunk_log2: i32, octree: &OctreeSet, node: &OctreeNode, centers: &[Point3i], high_lod_boundary: i32, ) -> Vec<ChunkKey3> { let mut matching_chunks = Vec::with_capacity(8); node.visit_all_octants_in_preorder(octree, &mut |node: &OctreeNode| { let lod = node.octant().exponent(); let old_offset_from_center = get_offset_from_lod_center(node.octant(), centers); if lod == 0 || old_offset_from_center > high_lod_boundary { matching_chunks.push(octant_chunk_key(chunk_log2, node.octant())); VisitStatus::Stop } else { VisitStatus::Continue } }); matching_chunks } fn get_offset_from_lod_center(octant: &Octant, centers: &[Point3i]) -> i32
.max_component() } fn octant_chunk_key(chunk_log2: i32, octant: &Octant) -> ChunkKey3 { let lod = octant.exponent(); ChunkKey { lod, minimum: (octant.minimum() << chunk_log2) >> lod, } } // ████████╗███████╗███████╗████████╗ // ╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝ // ██║ █████╗ ███████╗ ██║ // ██║ ██╔══╝ ╚════██║ ██║ // ██║ ███████╗███████║ ██║ // ╚═╝ ╚══════╝╚══════╝ ╚═╝ #[cfg(test)] mod test { use crate::dev_prelude::{ChunkUnits, SmallKeyHashSet}; use super::*; use itertools::Itertools; use std::iter::FromIterator; #[test] fn active_chunks_in_lod0_and_lod1() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let lod0_center = ChunkUnits(Point3i::ZERO); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let mut octree = OctreeSet::new_empty(domain); let filled_extent = Extent3i::from_min_and_shape(Point3i::fill(-4), Point3i::fill(8)); octree.add_extent(&filled_extent); let active_chunks = ActiveChunks::new(&config, &octree, lod0_center); let lod0_set = Extent3i::from_min_and_shape(Point3i::fill(-2), Point3i::fill(4)) .iter_points() .map(|p| ChunkKey { minimum: p * CHUNK_SHAPE, lod: 0, }); let mut lod1_set = OctreeSet::new_empty(domain); lod1_set.add_extent(&Extent3i::from_min_and_shape( Point3i::fill(-2), Point3i::fill(4), )); lod1_set.subtract_extent(&Extent3i::from_min_and_shape( Point3i::fill(-1), Point3i::fill(2), )); let lod1_set = lod1_set.collect_all_points().into_iter().map(|p| ChunkKey { minimum: p * CHUNK_SHAPE, lod: 1, }); let expected_keys = SmallKeyHashSet::from_iter(lod0_set.chain(lod1_set)); assert_eq!(active_chunks.keys, expected_keys); } #[test] fn no_updates_when_center_does_not_move() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let octree = OctreeSet::new_full(domain); let centers = [ [0, 0, 0], [2, 0, 0], [-2, 0, 0], [0, 2, 0], [0, -2, 0], [0, 0, 2], [0, 0, -2], ]; for p in centers.iter().cloned() { let center = ChunkUnits(PointN(p)); ClipMapUpdate3::new(&config, center, center) .find_chunk_updates(&octree, |_update| panic!("Fail")); } } #[test] fn updates_are_consistent_with_active_chunks() { let config = ClipMapConfig3::new(NUM_LODS, ChunkUnits(CLIP_BOX_RADIUS), CHUNK_SHAPE); let domain = Extent3i::from_min_and_shape(Point3i::fill(-16), Point3i::fill(32)); let octree = OctreeSet::new_full(domain); validate_update_path( &config, &octree, &[ [0, 0, 0], [0, 0, 0], [1, 0, 0], [0, 0, 0], [-1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 0], [0, -1, 0], [0, 0, 0], [0, 0, 1], [0, 0, 0], [0, 0, -1], ], ); } fn validate_update_path(config: &ClipMapConfig3, octree: &OctreeSet, path: &[[i32; 3]]) { let mut active_chunks = ActiveChunks::new(&config, &octree, ChunkUnits(PointN(path[0]))); for (p1, p2) in path.iter().cloned().tuple_windows() { let old_lod0_center = ChunkUnits(PointN(p1)); let new_lod0_center = ChunkUnits(PointN(p2)); ClipMapUpdate3::new(config, old_lod0_center, new_lod0_center) .find_chunk_updates(octree, |update| active_chunks.apply_update(update)); // We should end up with the same result from moving the clipmap as we do just constructing it from scratch at the // new location. assert_eq!( active_chunks, ActiveChunks::new(config, octree, new_lod0_center), "Failed on edge: {:?} --> {:?}", p1, p2 ); } } /// This just stores the state of active chunks so that we can compare a known correct "active set" with one that has been /// modified via any number of calls to `apply_update`. #[derive(Debug, Eq, PartialEq)] struct ActiveChunks { keys: SmallKeyHashSet<ChunkKey3>, } impl ActiveChunks { fn new( config: &ClipMapConfig3, octree: &OctreeSet, lod0_center: ChunkUnits<Point3i>, ) -> Self { let mut keys = SmallKeyHashSet::new(); active_clipmap_lod_chunks(&config, &octree, lod0_center, |key| { keys.insert(key); }); Self { keys } } fn apply_update(&mut self, update: LodChunkUpdate3) { match update { LodChunkUpdate::Merge(MergeChunks { old_chunks, new_chunk, }) => { for chunk in old_chunks.into_iter() { self.keys.remove(&chunk); } assert!(!self.keys.contains(&new_chunk)); self.keys.insert(new_chunk); } LodChunkUpdate::Split(SplitChunk { old_chunk, new_chunks, }) => { self.keys.remove(&old_chunk); for chunk in new_chunks.into_iter() { assert!(!self.keys.contains(&chunk)); self.keys.insert(chunk); } } } } } const CHUNK_SHAPE: Point3i = PointN([16; 3]); const NUM_LODS: u8 = 2; const CLIP_BOX_RADIUS: u16 = 2; }
{ let lod = octant.exponent(); let lod_p = octant.minimum() >> lod; let lod_center = centers[lod as usize]; (lod_p - lod_center) // For calculating offsets from the clipmap center, we need to bias any nonnegative components to make voxel coordinates // symmetric about the center. // // Voxel Coordinates // // -3 -2 -1 0 1 2 3 // <--|---|---|---|---|---|---|--> // // Clipmap Coordinates // // -3 -2 -1 1 2 3 // <--|---|---|---|---|---|---|--> .map_components_unary(|c| if c >= 0 { c + 1 } else { c }) .abs()
identifier_body
pool.rs
// This module provides a relatively simple thread-safe pool of reusable // objects. For the most part, it's implemented by a stack represented by a // Mutex<Vec<T>>. It has one small trick: because unlocking a mutex is somewhat // costly, in the case where a pool is accessed by the first thread that tried // to get a value, we bypass the mutex. Here are some benchmarks showing the // difference. // // 1) misc::anchored_literal_long_non_match 21 (18571 MB/s) // 2) misc::anchored_literal_long_non_match 107 (3644 MB/s) // 3) misc::anchored_literal_long_non_match 45 (8666 MB/s) // 4) misc::anchored_literal_long_non_match 19 (20526 MB/s) // // (1) represents our baseline: the master branch at the time of writing when // using the 'thread_local' crate to implement the pool below. // // (2) represents a naive pool implemented completely via Mutex<Vec<T>>. There // is no special trick for bypassing the mutex. // // (3) is the same as (2), except it uses Mutex<Vec<Box<T>>>. It is twice as // fast because a Box<T> is much smaller than the T we use with a Pool in this // crate. So pushing and popping a Box<T> from a Vec is quite a bit faster // than for T. // // (4) is the same as (3), but with the trick for bypassing the mutex in the // case of the first-to-get thread. // // Why move off of thread_local? Even though (4) is a hair faster than (1) // above, this was not the main goal. The main goal was to move off of // thread_local and find a way to *simply* re-capture some of its speed for // regex's specific case. So again, why move off of it? The *primary* reason is // because of memory leaks. See https://github.com/rust-lang/regex/issues/362 // for example. (Why do I want it to be simple? Well, I suppose what I mean is, // "use as much safe code as possible to minimize risk and be as sure as I can // be that it is correct.") // // My guess is that the thread_local design is probably not appropriate for // regex since its memory usage scales to the number of active threads that // have used a regex, where as the pool below scales to the number of threads // that simultaneously use a regex. While neither case permits contraction, // since we own the pool data structure below, we can add contraction if a // clear use case pops up in the wild. More pressingly though, it seems that // there are at least some use case patterns where one might have many threads // sitting around that might have used a regex at one point. While thread_local // does try to reuse space previously used by a thread that has since stopped, // its maximal memory usage still scales with the total number of active // threads. In contrast, the pool below scales with the total number of threads // *simultaneously* using the pool. The hope is that this uses less memory // overall. And if it doesn't, we can hopefully tune it somehow. // // It seems that these sort of conditions happen frequently // in FFI inside of other more "managed" languages. This was // mentioned in the issue linked above, and also mentioned here: // https://github.com/BurntSushi/rure-go/issues/3. And in particular, users // confirm that disabling the use of thread_local resolves the leak. // // There were other weaker reasons for moving off of thread_local as well. // Namely, at the time, I was looking to reduce dependencies. And for something // like regex, maintenance can be simpler when we own the full dependency tree. use std::panic::{RefUnwindSafe, UnwindSafe}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Mutex; /// An atomic counter used to allocate thread IDs. static COUNTER: AtomicUsize = AtomicUsize::new(1); thread_local!( /// A thread local used to assign an ID to a thread. static THREAD_ID: usize = { let next = COUNTER.fetch_add(1, Ordering::Relaxed); // SAFETY: We cannot permit the reuse of thread IDs since reusing a // thread ID might result in more than one thread "owning" a pool, // and thus, permit accessing a mutable value from multiple threads // simultaneously without synchronization. The intent of this panic is // to be a sanity check. It is not expected that the thread ID space // will actually be exhausted in practice. // // This checks that the counter never wraps around, since atomic // addition wraps around on overflow. if next == 0 { panic!("regex: thread ID allocation space exhausted"); } next }; ); /// The type of the function used to create values in a pool when the pool is /// empty and the caller requests one. type CreateFn<T> = Box<dyn Fn() -> T + Send + Sync + UnwindSafe + RefUnwindSafe +'static>; /// A simple thread safe pool for reusing values. /// /// Getting a value out comes with a guard. When that guard is dropped, the /// value is automatically put back in the pool. /// /// A Pool<T> impls Sync when T is Send (even if it's not Sync). This means /// that T can use interior mutability. This is possible because a pool is /// guaranteed to provide a value to exactly one thread at any time. /// /// Currently, a pool never contracts in size. Its size is proportional to the /// number of simultaneous uses. pub struct Pool<T> { /// A stack of T values to hand out. These are used when a Pool is /// accessed by a thread that didn't create it. stack: Mutex<Vec<Box<T>>>, /// A function to create more T values when stack is empty and a caller /// has requested a T. create: CreateFn<T>, /// The ID of the thread that owns this pool. The owner is the thread /// that makes the first call to 'get'. When the owner calls 'get', it /// gets 'owner_val' directly instead of returning a T from'stack'. /// See comments elsewhere for details, but this is intended to be an /// optimization for the common case that makes getting a T faster. /// /// It is initialized to a value of zero (an impossible thread ID) as a /// sentinel to indicate that it is unowned. owner: AtomicUsize, /// A value to return when the caller is in the same thread that created /// the Pool. owner_val: T, } // SAFETY: Since we want to use a Pool from multiple threads simultaneously // behind an Arc, we need for it to be Sync. In cases where T is sync, Pool<T> // would be Sync. However, since we use a Pool to store mutable scratch space, // we wind up using a T that has interior mutability and is thus itself not // Sync. So what we *really* want is for our Pool<T> to by Sync even when T is // not Sync (but is at least Send). // // The only non-sync aspect of a Pool is its 'owner_val' field, which is used // to implement faster access to a pool value in the common case of a pool // being accessed in the same thread in which it was created. The'stack' field // is also shared, but a Mutex<T> where T: Send is already Sync. So we only // need to worry about 'owner_val'. // // The key is to guarantee that 'owner_val' can only ever be accessed from one // thread. In our implementation below, we guarantee this by only returning the // 'owner_val' when the ID of the current thread matches the ID of the thread // that created the Pool. Since this can only ever be one thread, it follows // that only one thread can access 'owner_val' at any point in time. Thus, it // is safe to declare that Pool<T> is Sync when T is Send. // // NOTE: It would also be possible to make the owning thread be the *first* // thread that tries to get a value out of a Pool. However, the current // implementation is a little simpler and it's not clear if making the first // thread (rather than the creating thread) is meaningfully better. // // If there is a way to achieve our performance goals using safe code, then // I would very much welcome a patch. As it stands, the implementation below // tries to balance safety with performance. The case where a Regex is used // from multiple threads simultaneously will suffer a bit since getting a cache // will require unlocking a mutex. unsafe impl<T: Send> Sync for Pool<T> {} impl<T: ::std::fmt::Debug> ::std::fmt::Debug for Pool<T> { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { f.debug_struct("Pool") .field("stack", &self.stack) .field("owner", &self.owner) .field("owner_val", &self.owner_val) .finish() } } /// A guard that is returned when a caller requests a value from the pool. /// /// The purpose of the guard is to use RAII to automatically put the value back /// in the pool once it's dropped. #[derive(Debug)] pub struct PoolGuard<'a, T: Send> { /// The pool that this guard is attached to. pool: &'a Pool<T>, /// This is None when the guard represents the special "owned" value. In /// which case, the value is retrieved from 'pool.owner_val'. value: Option<Box<T>>, } impl<T: Send> Pool<T> { /// Create a new pool. The given closure is used to create values in the /// pool when necessary. pub fn new(create: CreateFn<T>) -> Pool<T> { let owner = AtomicUsize::new(0); let owner_val = create(); Pool { stack: Mutex::new(vec![]), create, owner, owner_val } } /// Get a value from the pool. The caller is guaranteed to have exclusive /// access to the given value. /// /// Note that there is no guarantee provided about which value in the /// pool is returned. That is, calling get, dropping the guard (causing /// the value to go back into the pool) and then calling get again is NOT /// guaranteed to return the same value received in the first get call. #[cfg_attr(feature = "perf-inline", inline(always))] pub fn get(&self) -> PoolGuard<'_, T> { // Our fast path checks if the caller is the thread that "owns" this // pool. Or stated differently, whether it is the first thread that // tried to extract a value from the pool. If it is, then we can return // a T to the caller without going through a mutex. // // SAFETY: We must guarantee that only one thread gets access to this // value. Since a thread is uniquely identified by the THREAD_ID thread // local, it follows that is the caller's thread ID is equal to the // owner, then only one thread may receive this value. let caller = THREAD_ID.with(|id| *id); let owner = self.owner.load(Ordering::Relaxed); if caller == owner { return self.guard_owned(); } self.get_slow(caller, owner) } /// This is the "slow" version that goes through a mutex to pop an /// allocated value off a stack to return to the caller. (Or, if the stack /// is empty, a new value is created.) /// /// If the pool has no owner, then this will set the owner. #[cold] fn get_slow(&self, caller: usize, owner: usize) -> PoolGuard<'_, T> { use std::sync::atomic::Ordering::Relaxed; if owner == 0 { // The sentinel 0 value means this pool is not yet owned. We // try to atomically set the owner. If we do, then this thread // becomes the owner and we can return a guard that represents // the special T for the owner. let res = self.owner.compare_exchange(0, caller, Relaxed, Relaxed); if res.is_ok() { return self.guard_owned(); } } let mut stack = self.stack.lock().unwrap(); let value = match stack.pop() { None => Box::new((self.create)()), Some(value) => value, }; self.guard_stack(value) } /// Puts a value back into the pool. Callers don't need to call this. Once /// the guard that's returned by 'get' is dropped, it is put back into the /// pool automatically. fn put(&self, value: Box<T>) { let mut stack = self.stack.lock().unwrap(); stack.push(value); } /// Create a guard that represents the special owned T. fn guard_owned(&self) -> PoolGuard<'_, T> { PoolGuard { pool: self, value: None } } /// Create a guard that contains a value from the pool's stack. fn guard_stack(&self, value: Box<T>) -> PoolGuard<'_, T>
} impl<'a, T: Send> PoolGuard<'a, T> { /// Return the underlying value. pub fn value(&self) -> &T { match self.value { None => &self.pool.owner_val, Some(ref v) => &**v, } } } impl<'a, T: Send> Drop for PoolGuard<'a, T> { #[cfg_attr(feature = "perf-inline", inline(always))] fn drop(&mut self) { if let Some(value) = self.value.take() { self.pool.put(value); } } } #[cfg(test)] mod tests { use std::panic::{RefUnwindSafe, UnwindSafe}; use super::*; #[test] fn oibits() { use crate::exec::ProgramCache; fn has_oibits<T: Send + Sync + UnwindSafe + RefUnwindSafe>() {} has_oibits::<Pool<ProgramCache>>(); } // Tests that Pool implements the "single owner" optimization. That is, the // thread that first accesses the pool gets its own copy, while all other // threads get distinct copies. #[test] fn thread_owner_optimization() { use std::cell::RefCell; use std::sync::Arc; let pool: Arc<Pool<RefCell<Vec<char>>>> = Arc::new(Pool::new(Box::new(|| RefCell::new(vec!['a'])))); pool.get().value().borrow_mut().push('x'); let pool1 = pool.clone(); let t1 = std::thread::spawn(move || { let guard = pool1.get(); let v = guard.value(); v.borrow_mut().push('y'); }); let pool2 = pool.clone(); let t2 = std::thread::spawn(move || { let guard = pool2.get(); let v = guard.value(); v.borrow_mut().push('z'); }); t1.join().unwrap(); t2.join().unwrap(); // If we didn't implement the single owner optimization, then one of // the threads above is likely to have mutated the [a, x] vec that // we stuffed in the pool before spawning the threads. But since // neither thread was first to access the pool, and because of the // optimization, we should be guaranteed that neither thread mutates // the special owned pool value. // // (Technically this is an implementation detail and not a contract of // Pool's API.) assert_eq!(vec!['a', 'x'], *pool.get().value().borrow()); } }
{ PoolGuard { pool: self, value: Some(value) } }
identifier_body
pool.rs
// This module provides a relatively simple thread-safe pool of reusable // objects. For the most part, it's implemented by a stack represented by a // Mutex<Vec<T>>. It has one small trick: because unlocking a mutex is somewhat // costly, in the case where a pool is accessed by the first thread that tried // to get a value, we bypass the mutex. Here are some benchmarks showing the // difference. // // 1) misc::anchored_literal_long_non_match 21 (18571 MB/s) // 2) misc::anchored_literal_long_non_match 107 (3644 MB/s) // 3) misc::anchored_literal_long_non_match 45 (8666 MB/s) // 4) misc::anchored_literal_long_non_match 19 (20526 MB/s) // // (1) represents our baseline: the master branch at the time of writing when // using the 'thread_local' crate to implement the pool below. // // (2) represents a naive pool implemented completely via Mutex<Vec<T>>. There // is no special trick for bypassing the mutex. // // (3) is the same as (2), except it uses Mutex<Vec<Box<T>>>. It is twice as // fast because a Box<T> is much smaller than the T we use with a Pool in this // crate. So pushing and popping a Box<T> from a Vec is quite a bit faster // than for T. // // (4) is the same as (3), but with the trick for bypassing the mutex in the // case of the first-to-get thread. // // Why move off of thread_local? Even though (4) is a hair faster than (1) // above, this was not the main goal. The main goal was to move off of // thread_local and find a way to *simply* re-capture some of its speed for // regex's specific case. So again, why move off of it? The *primary* reason is // because of memory leaks. See https://github.com/rust-lang/regex/issues/362 // for example. (Why do I want it to be simple? Well, I suppose what I mean is, // "use as much safe code as possible to minimize risk and be as sure as I can // be that it is correct.") // // My guess is that the thread_local design is probably not appropriate for // regex since its memory usage scales to the number of active threads that // have used a regex, where as the pool below scales to the number of threads // that simultaneously use a regex. While neither case permits contraction, // since we own the pool data structure below, we can add contraction if a // clear use case pops up in the wild. More pressingly though, it seems that // there are at least some use case patterns where one might have many threads // sitting around that might have used a regex at one point. While thread_local // does try to reuse space previously used by a thread that has since stopped, // its maximal memory usage still scales with the total number of active // threads. In contrast, the pool below scales with the total number of threads // *simultaneously* using the pool. The hope is that this uses less memory // overall. And if it doesn't, we can hopefully tune it somehow. // // It seems that these sort of conditions happen frequently // in FFI inside of other more "managed" languages. This was // mentioned in the issue linked above, and also mentioned here: // https://github.com/BurntSushi/rure-go/issues/3. And in particular, users // confirm that disabling the use of thread_local resolves the leak. // // There were other weaker reasons for moving off of thread_local as well. // Namely, at the time, I was looking to reduce dependencies. And for something // like regex, maintenance can be simpler when we own the full dependency tree. use std::panic::{RefUnwindSafe, UnwindSafe}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Mutex; /// An atomic counter used to allocate thread IDs. static COUNTER: AtomicUsize = AtomicUsize::new(1); thread_local!( /// A thread local used to assign an ID to a thread. static THREAD_ID: usize = { let next = COUNTER.fetch_add(1, Ordering::Relaxed); // SAFETY: We cannot permit the reuse of thread IDs since reusing a // thread ID might result in more than one thread "owning" a pool, // and thus, permit accessing a mutable value from multiple threads // simultaneously without synchronization. The intent of this panic is // to be a sanity check. It is not expected that the thread ID space // will actually be exhausted in practice. // // This checks that the counter never wraps around, since atomic // addition wraps around on overflow. if next == 0 { panic!("regex: thread ID allocation space exhausted"); } next }; ); /// The type of the function used to create values in a pool when the pool is /// empty and the caller requests one. type CreateFn<T> = Box<dyn Fn() -> T + Send + Sync + UnwindSafe + RefUnwindSafe +'static>; /// A simple thread safe pool for reusing values. /// /// Getting a value out comes with a guard. When that guard is dropped, the /// value is automatically put back in the pool. /// /// A Pool<T> impls Sync when T is Send (even if it's not Sync). This means /// that T can use interior mutability. This is possible because a pool is /// guaranteed to provide a value to exactly one thread at any time. /// /// Currently, a pool never contracts in size. Its size is proportional to the /// number of simultaneous uses. pub struct
<T> { /// A stack of T values to hand out. These are used when a Pool is /// accessed by a thread that didn't create it. stack: Mutex<Vec<Box<T>>>, /// A function to create more T values when stack is empty and a caller /// has requested a T. create: CreateFn<T>, /// The ID of the thread that owns this pool. The owner is the thread /// that makes the first call to 'get'. When the owner calls 'get', it /// gets 'owner_val' directly instead of returning a T from'stack'. /// See comments elsewhere for details, but this is intended to be an /// optimization for the common case that makes getting a T faster. /// /// It is initialized to a value of zero (an impossible thread ID) as a /// sentinel to indicate that it is unowned. owner: AtomicUsize, /// A value to return when the caller is in the same thread that created /// the Pool. owner_val: T, } // SAFETY: Since we want to use a Pool from multiple threads simultaneously // behind an Arc, we need for it to be Sync. In cases where T is sync, Pool<T> // would be Sync. However, since we use a Pool to store mutable scratch space, // we wind up using a T that has interior mutability and is thus itself not // Sync. So what we *really* want is for our Pool<T> to by Sync even when T is // not Sync (but is at least Send). // // The only non-sync aspect of a Pool is its 'owner_val' field, which is used // to implement faster access to a pool value in the common case of a pool // being accessed in the same thread in which it was created. The'stack' field // is also shared, but a Mutex<T> where T: Send is already Sync. So we only // need to worry about 'owner_val'. // // The key is to guarantee that 'owner_val' can only ever be accessed from one // thread. In our implementation below, we guarantee this by only returning the // 'owner_val' when the ID of the current thread matches the ID of the thread // that created the Pool. Since this can only ever be one thread, it follows // that only one thread can access 'owner_val' at any point in time. Thus, it // is safe to declare that Pool<T> is Sync when T is Send. // // NOTE: It would also be possible to make the owning thread be the *first* // thread that tries to get a value out of a Pool. However, the current // implementation is a little simpler and it's not clear if making the first // thread (rather than the creating thread) is meaningfully better. // // If there is a way to achieve our performance goals using safe code, then // I would very much welcome a patch. As it stands, the implementation below // tries to balance safety with performance. The case where a Regex is used // from multiple threads simultaneously will suffer a bit since getting a cache // will require unlocking a mutex. unsafe impl<T: Send> Sync for Pool<T> {} impl<T: ::std::fmt::Debug> ::std::fmt::Debug for Pool<T> { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { f.debug_struct("Pool") .field("stack", &self.stack) .field("owner", &self.owner) .field("owner_val", &self.owner_val) .finish() } } /// A guard that is returned when a caller requests a value from the pool. /// /// The purpose of the guard is to use RAII to automatically put the value back /// in the pool once it's dropped. #[derive(Debug)] pub struct PoolGuard<'a, T: Send> { /// The pool that this guard is attached to. pool: &'a Pool<T>, /// This is None when the guard represents the special "owned" value. In /// which case, the value is retrieved from 'pool.owner_val'. value: Option<Box<T>>, } impl<T: Send> Pool<T> { /// Create a new pool. The given closure is used to create values in the /// pool when necessary. pub fn new(create: CreateFn<T>) -> Pool<T> { let owner = AtomicUsize::new(0); let owner_val = create(); Pool { stack: Mutex::new(vec![]), create, owner, owner_val } } /// Get a value from the pool. The caller is guaranteed to have exclusive /// access to the given value. /// /// Note that there is no guarantee provided about which value in the /// pool is returned. That is, calling get, dropping the guard (causing /// the value to go back into the pool) and then calling get again is NOT /// guaranteed to return the same value received in the first get call. #[cfg_attr(feature = "perf-inline", inline(always))] pub fn get(&self) -> PoolGuard<'_, T> { // Our fast path checks if the caller is the thread that "owns" this // pool. Or stated differently, whether it is the first thread that // tried to extract a value from the pool. If it is, then we can return // a T to the caller without going through a mutex. // // SAFETY: We must guarantee that only one thread gets access to this // value. Since a thread is uniquely identified by the THREAD_ID thread // local, it follows that is the caller's thread ID is equal to the // owner, then only one thread may receive this value. let caller = THREAD_ID.with(|id| *id); let owner = self.owner.load(Ordering::Relaxed); if caller == owner { return self.guard_owned(); } self.get_slow(caller, owner) } /// This is the "slow" version that goes through a mutex to pop an /// allocated value off a stack to return to the caller. (Or, if the stack /// is empty, a new value is created.) /// /// If the pool has no owner, then this will set the owner. #[cold] fn get_slow(&self, caller: usize, owner: usize) -> PoolGuard<'_, T> { use std::sync::atomic::Ordering::Relaxed; if owner == 0 { // The sentinel 0 value means this pool is not yet owned. We // try to atomically set the owner. If we do, then this thread // becomes the owner and we can return a guard that represents // the special T for the owner. let res = self.owner.compare_exchange(0, caller, Relaxed, Relaxed); if res.is_ok() { return self.guard_owned(); } } let mut stack = self.stack.lock().unwrap(); let value = match stack.pop() { None => Box::new((self.create)()), Some(value) => value, }; self.guard_stack(value) } /// Puts a value back into the pool. Callers don't need to call this. Once /// the guard that's returned by 'get' is dropped, it is put back into the /// pool automatically. fn put(&self, value: Box<T>) { let mut stack = self.stack.lock().unwrap(); stack.push(value); } /// Create a guard that represents the special owned T. fn guard_owned(&self) -> PoolGuard<'_, T> { PoolGuard { pool: self, value: None } } /// Create a guard that contains a value from the pool's stack. fn guard_stack(&self, value: Box<T>) -> PoolGuard<'_, T> { PoolGuard { pool: self, value: Some(value) } } } impl<'a, T: Send> PoolGuard<'a, T> { /// Return the underlying value. pub fn value(&self) -> &T { match self.value { None => &self.pool.owner_val, Some(ref v) => &**v, } } } impl<'a, T: Send> Drop for PoolGuard<'a, T> { #[cfg_attr(feature = "perf-inline", inline(always))] fn drop(&mut self) { if let Some(value) = self.value.take() { self.pool.put(value); } } } #[cfg(test)] mod tests { use std::panic::{RefUnwindSafe, UnwindSafe}; use super::*; #[test] fn oibits() { use crate::exec::ProgramCache; fn has_oibits<T: Send + Sync + UnwindSafe + RefUnwindSafe>() {} has_oibits::<Pool<ProgramCache>>(); } // Tests that Pool implements the "single owner" optimization. That is, the // thread that first accesses the pool gets its own copy, while all other // threads get distinct copies. #[test] fn thread_owner_optimization() { use std::cell::RefCell; use std::sync::Arc; let pool: Arc<Pool<RefCell<Vec<char>>>> = Arc::new(Pool::new(Box::new(|| RefCell::new(vec!['a'])))); pool.get().value().borrow_mut().push('x'); let pool1 = pool.clone(); let t1 = std::thread::spawn(move || { let guard = pool1.get(); let v = guard.value(); v.borrow_mut().push('y'); }); let pool2 = pool.clone(); let t2 = std::thread::spawn(move || { let guard = pool2.get(); let v = guard.value(); v.borrow_mut().push('z'); }); t1.join().unwrap(); t2.join().unwrap(); // If we didn't implement the single owner optimization, then one of // the threads above is likely to have mutated the [a, x] vec that // we stuffed in the pool before spawning the threads. But since // neither thread was first to access the pool, and because of the // optimization, we should be guaranteed that neither thread mutates // the special owned pool value. // // (Technically this is an implementation detail and not a contract of // Pool's API.) assert_eq!(vec!['a', 'x'], *pool.get().value().borrow()); } }
Pool
identifier_name
pool.rs
// This module provides a relatively simple thread-safe pool of reusable // objects. For the most part, it's implemented by a stack represented by a // Mutex<Vec<T>>. It has one small trick: because unlocking a mutex is somewhat // costly, in the case where a pool is accessed by the first thread that tried // to get a value, we bypass the mutex. Here are some benchmarks showing the // difference. // // 1) misc::anchored_literal_long_non_match 21 (18571 MB/s) // 2) misc::anchored_literal_long_non_match 107 (3644 MB/s) // 3) misc::anchored_literal_long_non_match 45 (8666 MB/s) // 4) misc::anchored_literal_long_non_match 19 (20526 MB/s) // // (1) represents our baseline: the master branch at the time of writing when // using the 'thread_local' crate to implement the pool below. // // (2) represents a naive pool implemented completely via Mutex<Vec<T>>. There // is no special trick for bypassing the mutex. // // (3) is the same as (2), except it uses Mutex<Vec<Box<T>>>. It is twice as // fast because a Box<T> is much smaller than the T we use with a Pool in this // crate. So pushing and popping a Box<T> from a Vec is quite a bit faster // than for T. // // (4) is the same as (3), but with the trick for bypassing the mutex in the // case of the first-to-get thread. // // Why move off of thread_local? Even though (4) is a hair faster than (1) // above, this was not the main goal. The main goal was to move off of // thread_local and find a way to *simply* re-capture some of its speed for // regex's specific case. So again, why move off of it? The *primary* reason is // because of memory leaks. See https://github.com/rust-lang/regex/issues/362 // for example. (Why do I want it to be simple? Well, I suppose what I mean is, // "use as much safe code as possible to minimize risk and be as sure as I can // be that it is correct.") // // My guess is that the thread_local design is probably not appropriate for // regex since its memory usage scales to the number of active threads that // have used a regex, where as the pool below scales to the number of threads // that simultaneously use a regex. While neither case permits contraction, // since we own the pool data structure below, we can add contraction if a // clear use case pops up in the wild. More pressingly though, it seems that // there are at least some use case patterns where one might have many threads // sitting around that might have used a regex at one point. While thread_local // does try to reuse space previously used by a thread that has since stopped, // its maximal memory usage still scales with the total number of active // threads. In contrast, the pool below scales with the total number of threads // *simultaneously* using the pool. The hope is that this uses less memory // overall. And if it doesn't, we can hopefully tune it somehow. // // It seems that these sort of conditions happen frequently // in FFI inside of other more "managed" languages. This was // mentioned in the issue linked above, and also mentioned here: // https://github.com/BurntSushi/rure-go/issues/3. And in particular, users // confirm that disabling the use of thread_local resolves the leak. // // There were other weaker reasons for moving off of thread_local as well. // Namely, at the time, I was looking to reduce dependencies. And for something // like regex, maintenance can be simpler when we own the full dependency tree. use std::panic::{RefUnwindSafe, UnwindSafe}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Mutex; /// An atomic counter used to allocate thread IDs. static COUNTER: AtomicUsize = AtomicUsize::new(1); thread_local!( /// A thread local used to assign an ID to a thread. static THREAD_ID: usize = { let next = COUNTER.fetch_add(1, Ordering::Relaxed); // SAFETY: We cannot permit the reuse of thread IDs since reusing a // thread ID might result in more than one thread "owning" a pool, // and thus, permit accessing a mutable value from multiple threads // simultaneously without synchronization. The intent of this panic is // to be a sanity check. It is not expected that the thread ID space // will actually be exhausted in practice. // // This checks that the counter never wraps around, since atomic // addition wraps around on overflow. if next == 0 { panic!("regex: thread ID allocation space exhausted"); } next }; ); /// The type of the function used to create values in a pool when the pool is /// empty and the caller requests one. type CreateFn<T> = Box<dyn Fn() -> T + Send + Sync + UnwindSafe + RefUnwindSafe +'static>; /// A simple thread safe pool for reusing values. /// /// Getting a value out comes with a guard. When that guard is dropped, the /// value is automatically put back in the pool. /// /// A Pool<T> impls Sync when T is Send (even if it's not Sync). This means /// that T can use interior mutability. This is possible because a pool is /// guaranteed to provide a value to exactly one thread at any time. /// /// Currently, a pool never contracts in size. Its size is proportional to the /// number of simultaneous uses. pub struct Pool<T> { /// A stack of T values to hand out. These are used when a Pool is /// accessed by a thread that didn't create it. stack: Mutex<Vec<Box<T>>>, /// A function to create more T values when stack is empty and a caller /// has requested a T. create: CreateFn<T>,
/// gets 'owner_val' directly instead of returning a T from'stack'. /// See comments elsewhere for details, but this is intended to be an /// optimization for the common case that makes getting a T faster. /// /// It is initialized to a value of zero (an impossible thread ID) as a /// sentinel to indicate that it is unowned. owner: AtomicUsize, /// A value to return when the caller is in the same thread that created /// the Pool. owner_val: T, } // SAFETY: Since we want to use a Pool from multiple threads simultaneously // behind an Arc, we need for it to be Sync. In cases where T is sync, Pool<T> // would be Sync. However, since we use a Pool to store mutable scratch space, // we wind up using a T that has interior mutability and is thus itself not // Sync. So what we *really* want is for our Pool<T> to by Sync even when T is // not Sync (but is at least Send). // // The only non-sync aspect of a Pool is its 'owner_val' field, which is used // to implement faster access to a pool value in the common case of a pool // being accessed in the same thread in which it was created. The'stack' field // is also shared, but a Mutex<T> where T: Send is already Sync. So we only // need to worry about 'owner_val'. // // The key is to guarantee that 'owner_val' can only ever be accessed from one // thread. In our implementation below, we guarantee this by only returning the // 'owner_val' when the ID of the current thread matches the ID of the thread // that created the Pool. Since this can only ever be one thread, it follows // that only one thread can access 'owner_val' at any point in time. Thus, it // is safe to declare that Pool<T> is Sync when T is Send. // // NOTE: It would also be possible to make the owning thread be the *first* // thread that tries to get a value out of a Pool. However, the current // implementation is a little simpler and it's not clear if making the first // thread (rather than the creating thread) is meaningfully better. // // If there is a way to achieve our performance goals using safe code, then // I would very much welcome a patch. As it stands, the implementation below // tries to balance safety with performance. The case where a Regex is used // from multiple threads simultaneously will suffer a bit since getting a cache // will require unlocking a mutex. unsafe impl<T: Send> Sync for Pool<T> {} impl<T: ::std::fmt::Debug> ::std::fmt::Debug for Pool<T> { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { f.debug_struct("Pool") .field("stack", &self.stack) .field("owner", &self.owner) .field("owner_val", &self.owner_val) .finish() } } /// A guard that is returned when a caller requests a value from the pool. /// /// The purpose of the guard is to use RAII to automatically put the value back /// in the pool once it's dropped. #[derive(Debug)] pub struct PoolGuard<'a, T: Send> { /// The pool that this guard is attached to. pool: &'a Pool<T>, /// This is None when the guard represents the special "owned" value. In /// which case, the value is retrieved from 'pool.owner_val'. value: Option<Box<T>>, } impl<T: Send> Pool<T> { /// Create a new pool. The given closure is used to create values in the /// pool when necessary. pub fn new(create: CreateFn<T>) -> Pool<T> { let owner = AtomicUsize::new(0); let owner_val = create(); Pool { stack: Mutex::new(vec![]), create, owner, owner_val } } /// Get a value from the pool. The caller is guaranteed to have exclusive /// access to the given value. /// /// Note that there is no guarantee provided about which value in the /// pool is returned. That is, calling get, dropping the guard (causing /// the value to go back into the pool) and then calling get again is NOT /// guaranteed to return the same value received in the first get call. #[cfg_attr(feature = "perf-inline", inline(always))] pub fn get(&self) -> PoolGuard<'_, T> { // Our fast path checks if the caller is the thread that "owns" this // pool. Or stated differently, whether it is the first thread that // tried to extract a value from the pool. If it is, then we can return // a T to the caller without going through a mutex. // // SAFETY: We must guarantee that only one thread gets access to this // value. Since a thread is uniquely identified by the THREAD_ID thread // local, it follows that is the caller's thread ID is equal to the // owner, then only one thread may receive this value. let caller = THREAD_ID.with(|id| *id); let owner = self.owner.load(Ordering::Relaxed); if caller == owner { return self.guard_owned(); } self.get_slow(caller, owner) } /// This is the "slow" version that goes through a mutex to pop an /// allocated value off a stack to return to the caller. (Or, if the stack /// is empty, a new value is created.) /// /// If the pool has no owner, then this will set the owner. #[cold] fn get_slow(&self, caller: usize, owner: usize) -> PoolGuard<'_, T> { use std::sync::atomic::Ordering::Relaxed; if owner == 0 { // The sentinel 0 value means this pool is not yet owned. We // try to atomically set the owner. If we do, then this thread // becomes the owner and we can return a guard that represents // the special T for the owner. let res = self.owner.compare_exchange(0, caller, Relaxed, Relaxed); if res.is_ok() { return self.guard_owned(); } } let mut stack = self.stack.lock().unwrap(); let value = match stack.pop() { None => Box::new((self.create)()), Some(value) => value, }; self.guard_stack(value) } /// Puts a value back into the pool. Callers don't need to call this. Once /// the guard that's returned by 'get' is dropped, it is put back into the /// pool automatically. fn put(&self, value: Box<T>) { let mut stack = self.stack.lock().unwrap(); stack.push(value); } /// Create a guard that represents the special owned T. fn guard_owned(&self) -> PoolGuard<'_, T> { PoolGuard { pool: self, value: None } } /// Create a guard that contains a value from the pool's stack. fn guard_stack(&self, value: Box<T>) -> PoolGuard<'_, T> { PoolGuard { pool: self, value: Some(value) } } } impl<'a, T: Send> PoolGuard<'a, T> { /// Return the underlying value. pub fn value(&self) -> &T { match self.value { None => &self.pool.owner_val, Some(ref v) => &**v, } } } impl<'a, T: Send> Drop for PoolGuard<'a, T> { #[cfg_attr(feature = "perf-inline", inline(always))] fn drop(&mut self) { if let Some(value) = self.value.take() { self.pool.put(value); } } } #[cfg(test)] mod tests { use std::panic::{RefUnwindSafe, UnwindSafe}; use super::*; #[test] fn oibits() { use crate::exec::ProgramCache; fn has_oibits<T: Send + Sync + UnwindSafe + RefUnwindSafe>() {} has_oibits::<Pool<ProgramCache>>(); } // Tests that Pool implements the "single owner" optimization. That is, the // thread that first accesses the pool gets its own copy, while all other // threads get distinct copies. #[test] fn thread_owner_optimization() { use std::cell::RefCell; use std::sync::Arc; let pool: Arc<Pool<RefCell<Vec<char>>>> = Arc::new(Pool::new(Box::new(|| RefCell::new(vec!['a'])))); pool.get().value().borrow_mut().push('x'); let pool1 = pool.clone(); let t1 = std::thread::spawn(move || { let guard = pool1.get(); let v = guard.value(); v.borrow_mut().push('y'); }); let pool2 = pool.clone(); let t2 = std::thread::spawn(move || { let guard = pool2.get(); let v = guard.value(); v.borrow_mut().push('z'); }); t1.join().unwrap(); t2.join().unwrap(); // If we didn't implement the single owner optimization, then one of // the threads above is likely to have mutated the [a, x] vec that // we stuffed in the pool before spawning the threads. But since // neither thread was first to access the pool, and because of the // optimization, we should be guaranteed that neither thread mutates // the special owned pool value. // // (Technically this is an implementation detail and not a contract of // Pool's API.) assert_eq!(vec!['a', 'x'], *pool.get().value().borrow()); } }
/// The ID of the thread that owns this pool. The owner is the thread /// that makes the first call to 'get'. When the owner calls 'get', it
random_line_split
vmctx.rs
//! Interfaces for accessing instance data from hostcalls. //! //! This module contains both a Rust-friendly API ([`Vmctx`](struct.Vmctx.html)) as well as C-style //! exports for compatibility with hostcalls written against `lucet-runtime-c`. pub use crate::c_api::lucet_vmctx; use crate::alloc::instance_heap_offset; use crate::context::Context; use crate::error::Error; use crate::instance::{ Instance, InstanceInternal, State, TerminationDetails, CURRENT_INSTANCE, HOST_CTX, }; use lucet_module::{FunctionHandle, GlobalValue}; use std::any::Any; use std::borrow::{Borrow, BorrowMut}; use std::cell::{Ref, RefCell, RefMut}; /// An opaque handle to a running instance's context. #[derive(Debug)] pub struct Vmctx { vmctx: *mut lucet_vmctx, /// A view of the underlying instance's heap. /// /// This must never be dropped automatically, as the view does not own the heap. Rather, this is /// a value used to implement dynamic borrowing of the heap contents that are owned and managed /// by the instance and its `Alloc`. heap_view: RefCell<Box<[u8]>>, /// A view of the underlying instance's globals. /// /// This must never be dropped automatically, as the view does not own the globals. Rather, this /// is a value used to implement dynamic borrowing of the globals that are owned and managed by /// the instance and its `Alloc`. globals_view: RefCell<Box<[GlobalValue]>>, } impl Drop for Vmctx { fn drop(&mut self) { let heap_view = self.heap_view.replace(Box::new([])); let globals_view = self.globals_view.replace(Box::new([])); // as described in the definition of `Vmctx`, we cannot allow the boxed views of the heap // and globals to be dropped Box::leak(heap_view); Box::leak(globals_view); } } pub trait VmctxInternal { /// Get a reference to the `Instance` for this guest. fn instance(&self) -> &Instance; /// Get a mutable reference to the `Instance` for this guest. /// /// ### Safety /// /// Using this method, you could hold on to multiple mutable references to the same /// `Instance`. Only use one at a time! This method does not take `&mut self` because otherwise /// you could not use orthogonal `&mut` refs that come from `Vmctx`, like the heap or /// terminating the instance. unsafe fn instance_mut(&self) -> &mut Instance; } impl VmctxInternal for Vmctx { fn instance(&self) -> &Instance { unsafe { instance_from_vmctx(self.vmctx) } } unsafe fn instance_mut(&self) -> &mut Instance { instance_from_vmctx(self.vmctx) } } impl Vmctx { /// Create a `Vmctx` from the compiler-inserted `vmctx` argument in a guest function. /// /// This is almost certainly not what you want to use to get a `Vmctx`; instead use the `&mut /// Vmctx` argument to a `lucet_hostcalls!`-wrapped function. pub unsafe fn from_raw(vmctx: *mut lucet_vmctx) -> Vmctx
/// Return the underlying `vmctx` pointer. pub fn as_raw(&self) -> *mut lucet_vmctx { self.vmctx } /// Return the WebAssembly heap as a slice of bytes. /// /// If the heap is already mutably borrowed by `heap_mut()`, the instance will /// terminate with `TerminationDetails::BorrowError`. pub fn heap(&self) -> Ref<'_, [u8]> { unsafe { self.reconstitute_heap_view_if_needed(); } let r = self .heap_view .try_borrow() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("heap"))); Ref::map(r, |b| b.borrow()) } /// Return the WebAssembly heap as a mutable slice of bytes. /// /// If the heap is already borrowed by `heap()` or `heap_mut()`, the instance will terminate /// with `TerminationDetails::BorrowError`. pub fn heap_mut(&self) -> RefMut<'_, [u8]> { unsafe { self.reconstitute_heap_view_if_needed(); } let r = self .heap_view .try_borrow_mut() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("heap_mut"))); RefMut::map(r, |b| b.borrow_mut()) } /// Check whether the heap has grown, and replace the heap view if it has. /// /// This handles the case where `Vmctx::grow_memory()` and `Vmctx::heap()` are called in /// sequence. Since `Vmctx::grow_memory()` takes `&mut self`, heap references cannot live across /// it. /// /// TODO: There is still an unsound case, though, when a heap reference is held across a call /// back into the guest via `Vmctx::get_func_from_idx()`. That guest code may grow the heap as /// well, causing any outstanding heap references to become invalid. We will address this when /// we rework the interface for calling back into the guest. unsafe fn reconstitute_heap_view_if_needed(&self) { let inst = self.instance_mut(); if inst.heap_mut().len()!= self.heap_view.borrow().len() { let old_heap_view = self .heap_view .replace(Box::<[u8]>::from_raw(inst.heap_mut())); // as described in the definition of `Vmctx`, we cannot allow the boxed view of the heap // to be dropped Box::leak(old_heap_view); } } /// Check whether a given range in the host address space overlaps with the memory that backs /// the instance heap. pub fn check_heap<T>(&self, ptr: *const T, len: usize) -> bool { self.instance().check_heap(ptr, len) } /// Check whether a context value of a particular type exists. pub fn contains_embed_ctx<T: Any>(&self) -> bool { self.instance().contains_embed_ctx::<T>() } /// Get a reference to a context value of a particular type. /// /// If a context of that type does not exist, the instance will terminate with /// `TerminationDetails::CtxNotFound`. /// /// If the context is already mutably borrowed by `get_embed_ctx_mut`, the instance will /// terminate with `TerminationDetails::BorrowError`. pub fn get_embed_ctx<T: Any>(&self) -> Ref<'_, T> { match self.instance().embed_ctx.try_get::<T>() { Some(Ok(t)) => t, Some(Err(_)) => panic!(TerminationDetails::BorrowError("get_embed_ctx")), None => panic!(TerminationDetails::CtxNotFound), } } /// Get a mutable reference to a context value of a particular type. /// /// If a context of that type does not exist, the instance will terminate with /// `TerminationDetails::CtxNotFound`. /// /// If the context is already borrowed by some other use of `get_embed_ctx` or /// `get_embed_ctx_mut`, the instance will terminate with `TerminationDetails::BorrowError`. pub fn get_embed_ctx_mut<T: Any>(&self) -> RefMut<'_, T> { match unsafe { self.instance_mut().embed_ctx.try_get_mut::<T>() } { Some(Ok(t)) => t, Some(Err(_)) => panic!(TerminationDetails::BorrowError("get_embed_ctx_mut")), None => panic!(TerminationDetails::CtxNotFound), } } /// Terminate this guest and return to the host context without unwinding. /// /// This is almost certainly not what you want to use to terminate an instance from a hostcall, /// as any resources currently in scope will not be dropped. Instead, use /// `lucet_hostcall_terminate!` which unwinds to the enclosing hostcall body. pub unsafe fn terminate_no_unwind(&mut self, details: TerminationDetails) ->! { self.instance_mut().terminate(details) } /// Grow the guest memory by the given number of WebAssembly pages. /// /// On success, returns the number of pages that existed before the call. pub fn grow_memory(&mut self, additional_pages: u32) -> Result<u32, Error> { unsafe { self.instance_mut().grow_memory(additional_pages) } } /// Return the WebAssembly globals as a slice of `i64`s. /// /// If the globals are already mutably borrowed by `globals_mut()`, the instance will terminate /// with `TerminationDetails::BorrowError`. pub fn globals(&self) -> Ref<'_, [GlobalValue]> { let r = self .globals_view .try_borrow() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("globals"))); Ref::map(r, |b| b.borrow()) } /// Return the WebAssembly globals as a mutable slice of `i64`s. /// /// If the globals are already borrowed by `globals()` or `globals_mut()`, the instance will /// terminate with `TerminationDetails::BorrowError`. pub fn globals_mut(&self) -> RefMut<'_, [GlobalValue]> { let r = self .globals_view .try_borrow_mut() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("globals_mut"))); RefMut::map(r, |b| b.borrow_mut()) } /// Get a function pointer by WebAssembly table and function index. /// /// This is useful when a hostcall takes a function pointer as its argument, as WebAssembly uses /// table indices as its runtime representation of function pointers. /// /// We do not currently reflect function type information into the Rust type system, so callers /// of the returned function must take care to cast it to the correct type before calling. The /// correct type will include the `vmctx` argument, which the caller is responsible for passing /// from its own context. /// /// ```no_run /// use lucet_runtime_internals::{lucet_hostcalls, lucet_hostcall_terminate}; /// use lucet_runtime_internals::vmctx::{lucet_vmctx, Vmctx}; /// /// lucet_hostcalls! { /// #[no_mangle] /// pub unsafe extern "C" fn hostcall_call_binop( /// &mut vmctx, /// binop_table_idx: u32, /// binop_func_idx: u32, /// operand1: u32, /// operand2: u32, /// ) -> u32 { /// if let Ok(binop) = vmctx.get_func_from_idx(binop_table_idx, binop_func_idx) { /// let typed_binop = std::mem::transmute::< /// usize, /// extern "C" fn(*mut lucet_vmctx, u32, u32) -> u32 /// >(binop.ptr.as_usize()); /// unsafe { (typed_binop)(vmctx.as_raw(), operand1, operand2) } /// } else { /// lucet_hostcall_terminate!("invalid function index") /// } /// } /// } pub fn get_func_from_idx( &self, table_idx: u32, func_idx: u32, ) -> Result<FunctionHandle, Error> { self.instance() .module() .get_func_from_idx(table_idx, func_idx) } } /// Get an `Instance` from the `vmctx` pointer. /// /// Only safe to call from within the guest context. pub unsafe fn instance_from_vmctx<'a>(vmctx: *mut lucet_vmctx) -> &'a mut Instance { assert!(!vmctx.is_null(), "vmctx is not null"); let inst_ptr = (vmctx as usize - instance_heap_offset()) as *mut Instance; // We shouldn't actually need to access the thread local, only the exception handler should // need to. But, as long as the thread local exists, we should make sure that the guest // hasn't pulled any shenanigans and passed a bad vmctx. (Codegen should ensure the guest // cant pull any shenanigans but there have been bugs before.) CURRENT_INSTANCE.with(|current_instance| { if let Some(current_inst_ptr) = current_instance.borrow().map(|nn| nn.as_ptr()) { assert_eq!( inst_ptr, current_inst_ptr, "vmctx corresponds to current instance" ); } else { panic!( "current instance is not set; thread local storage failure can indicate \ dynamic linking issues" ); } }); let inst = inst_ptr.as_mut().unwrap(); assert!(inst.valid_magic()); inst } impl Instance { /// Terminate the guest and swap back to the host context without unwinding. /// /// This is almost certainly not what you want to use to terminate from a hostcall; use panics /// with `TerminationDetails` instead. unsafe fn terminate(&mut self, details: TerminationDetails) ->! { self.state = State::Terminated { details }; #[allow(unused_unsafe)] // The following unsafe will be incorrectly warned as unused HOST_CTX.with(|host_ctx| unsafe { Context::set(&*host_ctx.get()) }) } }
{ let inst = instance_from_vmctx(vmctx); assert!(inst.valid_magic()); let res = Vmctx { vmctx, heap_view: RefCell::new(Box::<[u8]>::from_raw(inst.heap_mut())), globals_view: RefCell::new(Box::<[GlobalValue]>::from_raw(inst.globals_mut())), }; res }
identifier_body
vmctx.rs
//! Interfaces for accessing instance data from hostcalls. //! //! This module contains both a Rust-friendly API ([`Vmctx`](struct.Vmctx.html)) as well as C-style //! exports for compatibility with hostcalls written against `lucet-runtime-c`. pub use crate::c_api::lucet_vmctx; use crate::alloc::instance_heap_offset; use crate::context::Context; use crate::error::Error; use crate::instance::{ Instance, InstanceInternal, State, TerminationDetails, CURRENT_INSTANCE, HOST_CTX, }; use lucet_module::{FunctionHandle, GlobalValue}; use std::any::Any; use std::borrow::{Borrow, BorrowMut}; use std::cell::{Ref, RefCell, RefMut}; /// An opaque handle to a running instance's context. #[derive(Debug)] pub struct Vmctx { vmctx: *mut lucet_vmctx, /// A view of the underlying instance's heap. /// /// This must never be dropped automatically, as the view does not own the heap. Rather, this is /// a value used to implement dynamic borrowing of the heap contents that are owned and managed /// by the instance and its `Alloc`. heap_view: RefCell<Box<[u8]>>, /// A view of the underlying instance's globals. /// /// This must never be dropped automatically, as the view does not own the globals. Rather, this /// is a value used to implement dynamic borrowing of the globals that are owned and managed by /// the instance and its `Alloc`. globals_view: RefCell<Box<[GlobalValue]>>, } impl Drop for Vmctx { fn drop(&mut self) { let heap_view = self.heap_view.replace(Box::new([])); let globals_view = self.globals_view.replace(Box::new([])); // as described in the definition of `Vmctx`, we cannot allow the boxed views of the heap // and globals to be dropped Box::leak(heap_view); Box::leak(globals_view); } } pub trait VmctxInternal { /// Get a reference to the `Instance` for this guest. fn instance(&self) -> &Instance; /// Get a mutable reference to the `Instance` for this guest. /// /// ### Safety /// /// Using this method, you could hold on to multiple mutable references to the same /// `Instance`. Only use one at a time! This method does not take `&mut self` because otherwise /// you could not use orthogonal `&mut` refs that come from `Vmctx`, like the heap or /// terminating the instance. unsafe fn instance_mut(&self) -> &mut Instance; } impl VmctxInternal for Vmctx { fn instance(&self) -> &Instance { unsafe { instance_from_vmctx(self.vmctx) } } unsafe fn instance_mut(&self) -> &mut Instance { instance_from_vmctx(self.vmctx) } } impl Vmctx { /// Create a `Vmctx` from the compiler-inserted `vmctx` argument in a guest function. /// /// This is almost certainly not what you want to use to get a `Vmctx`; instead use the `&mut /// Vmctx` argument to a `lucet_hostcalls!`-wrapped function. pub unsafe fn from_raw(vmctx: *mut lucet_vmctx) -> Vmctx { let inst = instance_from_vmctx(vmctx); assert!(inst.valid_magic()); let res = Vmctx { vmctx, heap_view: RefCell::new(Box::<[u8]>::from_raw(inst.heap_mut())), globals_view: RefCell::new(Box::<[GlobalValue]>::from_raw(inst.globals_mut())), }; res } /// Return the underlying `vmctx` pointer. pub fn as_raw(&self) -> *mut lucet_vmctx { self.vmctx } /// Return the WebAssembly heap as a slice of bytes. /// /// If the heap is already mutably borrowed by `heap_mut()`, the instance will /// terminate with `TerminationDetails::BorrowError`. pub fn heap(&self) -> Ref<'_, [u8]> { unsafe { self.reconstitute_heap_view_if_needed(); } let r = self .heap_view .try_borrow() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("heap"))); Ref::map(r, |b| b.borrow()) } /// Return the WebAssembly heap as a mutable slice of bytes. /// /// If the heap is already borrowed by `heap()` or `heap_mut()`, the instance will terminate /// with `TerminationDetails::BorrowError`. pub fn heap_mut(&self) -> RefMut<'_, [u8]> { unsafe { self.reconstitute_heap_view_if_needed(); } let r = self .heap_view .try_borrow_mut() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("heap_mut"))); RefMut::map(r, |b| b.borrow_mut()) } /// Check whether the heap has grown, and replace the heap view if it has. /// /// This handles the case where `Vmctx::grow_memory()` and `Vmctx::heap()` are called in /// sequence. Since `Vmctx::grow_memory()` takes `&mut self`, heap references cannot live across /// it. /// /// TODO: There is still an unsound case, though, when a heap reference is held across a call /// back into the guest via `Vmctx::get_func_from_idx()`. That guest code may grow the heap as /// well, causing any outstanding heap references to become invalid. We will address this when /// we rework the interface for calling back into the guest. unsafe fn reconstitute_heap_view_if_needed(&self) { let inst = self.instance_mut(); if inst.heap_mut().len()!= self.heap_view.borrow().len() { let old_heap_view = self .heap_view .replace(Box::<[u8]>::from_raw(inst.heap_mut())); // as described in the definition of `Vmctx`, we cannot allow the boxed view of the heap // to be dropped Box::leak(old_heap_view); } } /// Check whether a given range in the host address space overlaps with the memory that backs /// the instance heap. pub fn check_heap<T>(&self, ptr: *const T, len: usize) -> bool { self.instance().check_heap(ptr, len) } /// Check whether a context value of a particular type exists. pub fn contains_embed_ctx<T: Any>(&self) -> bool { self.instance().contains_embed_ctx::<T>() } /// Get a reference to a context value of a particular type. /// /// If a context of that type does not exist, the instance will terminate with /// `TerminationDetails::CtxNotFound`. /// /// If the context is already mutably borrowed by `get_embed_ctx_mut`, the instance will /// terminate with `TerminationDetails::BorrowError`. pub fn get_embed_ctx<T: Any>(&self) -> Ref<'_, T> { match self.instance().embed_ctx.try_get::<T>() { Some(Ok(t)) => t, Some(Err(_)) => panic!(TerminationDetails::BorrowError("get_embed_ctx")), None => panic!(TerminationDetails::CtxNotFound), } } /// Get a mutable reference to a context value of a particular type. /// /// If a context of that type does not exist, the instance will terminate with /// `TerminationDetails::CtxNotFound`. /// /// If the context is already borrowed by some other use of `get_embed_ctx` or /// `get_embed_ctx_mut`, the instance will terminate with `TerminationDetails::BorrowError`. pub fn get_embed_ctx_mut<T: Any>(&self) -> RefMut<'_, T> { match unsafe { self.instance_mut().embed_ctx.try_get_mut::<T>() } { Some(Ok(t)) => t, Some(Err(_)) => panic!(TerminationDetails::BorrowError("get_embed_ctx_mut")), None => panic!(TerminationDetails::CtxNotFound), } } /// Terminate this guest and return to the host context without unwinding. /// /// This is almost certainly not what you want to use to terminate an instance from a hostcall, /// as any resources currently in scope will not be dropped. Instead, use /// `lucet_hostcall_terminate!` which unwinds to the enclosing hostcall body. pub unsafe fn terminate_no_unwind(&mut self, details: TerminationDetails) ->! { self.instance_mut().terminate(details) } /// Grow the guest memory by the given number of WebAssembly pages. /// /// On success, returns the number of pages that existed before the call. pub fn grow_memory(&mut self, additional_pages: u32) -> Result<u32, Error> { unsafe { self.instance_mut().grow_memory(additional_pages) } } /// Return the WebAssembly globals as a slice of `i64`s. /// /// If the globals are already mutably borrowed by `globals_mut()`, the instance will terminate /// with `TerminationDetails::BorrowError`. pub fn globals(&self) -> Ref<'_, [GlobalValue]> { let r = self .globals_view .try_borrow() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("globals"))); Ref::map(r, |b| b.borrow()) } /// Return the WebAssembly globals as a mutable slice of `i64`s. /// /// If the globals are already borrowed by `globals()` or `globals_mut()`, the instance will /// terminate with `TerminationDetails::BorrowError`. pub fn globals_mut(&self) -> RefMut<'_, [GlobalValue]> { let r = self .globals_view .try_borrow_mut() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("globals_mut"))); RefMut::map(r, |b| b.borrow_mut()) } /// Get a function pointer by WebAssembly table and function index. /// /// This is useful when a hostcall takes a function pointer as its argument, as WebAssembly uses /// table indices as its runtime representation of function pointers. /// /// We do not currently reflect function type information into the Rust type system, so callers /// of the returned function must take care to cast it to the correct type before calling. The /// correct type will include the `vmctx` argument, which the caller is responsible for passing /// from its own context. /// /// ```no_run /// use lucet_runtime_internals::{lucet_hostcalls, lucet_hostcall_terminate}; /// use lucet_runtime_internals::vmctx::{lucet_vmctx, Vmctx}; /// /// lucet_hostcalls! { /// #[no_mangle] /// pub unsafe extern "C" fn hostcall_call_binop( /// &mut vmctx, /// binop_table_idx: u32, /// binop_func_idx: u32, /// operand1: u32, /// operand2: u32, /// ) -> u32 { /// if let Ok(binop) = vmctx.get_func_from_idx(binop_table_idx, binop_func_idx) { /// let typed_binop = std::mem::transmute::< /// usize, /// extern "C" fn(*mut lucet_vmctx, u32, u32) -> u32 /// >(binop.ptr.as_usize()); /// unsafe { (typed_binop)(vmctx.as_raw(), operand1, operand2) } /// } else { /// lucet_hostcall_terminate!("invalid function index") /// } /// } /// } pub fn get_func_from_idx( &self, table_idx: u32, func_idx: u32, ) -> Result<FunctionHandle, Error> { self.instance() .module() .get_func_from_idx(table_idx, func_idx) } } /// Get an `Instance` from the `vmctx` pointer. /// /// Only safe to call from within the guest context. pub unsafe fn instance_from_vmctx<'a>(vmctx: *mut lucet_vmctx) -> &'a mut Instance { assert!(!vmctx.is_null(), "vmctx is not null"); let inst_ptr = (vmctx as usize - instance_heap_offset()) as *mut Instance; // We shouldn't actually need to access the thread local, only the exception handler should // need to. But, as long as the thread local exists, we should make sure that the guest // hasn't pulled any shenanigans and passed a bad vmctx. (Codegen should ensure the guest // cant pull any shenanigans but there have been bugs before.) CURRENT_INSTANCE.with(|current_instance| { if let Some(current_inst_ptr) = current_instance.borrow().map(|nn| nn.as_ptr()) { assert_eq!( inst_ptr, current_inst_ptr, "vmctx corresponds to current instance" ); } else { panic!( "current instance is not set; thread local storage failure can indicate \ dynamic linking issues" ); } }); let inst = inst_ptr.as_mut().unwrap(); assert!(inst.valid_magic()); inst } impl Instance { /// Terminate the guest and swap back to the host context without unwinding. /// /// This is almost certainly not what you want to use to terminate from a hostcall; use panics /// with `TerminationDetails` instead. unsafe fn terminate(&mut self, details: TerminationDetails) ->! { self.state = State::Terminated { details }; #[allow(unused_unsafe)] // The following unsafe will be incorrectly warned as unused
}
HOST_CTX.with(|host_ctx| unsafe { Context::set(&*host_ctx.get()) }) }
random_line_split
vmctx.rs
//! Interfaces for accessing instance data from hostcalls. //! //! This module contains both a Rust-friendly API ([`Vmctx`](struct.Vmctx.html)) as well as C-style //! exports for compatibility with hostcalls written against `lucet-runtime-c`. pub use crate::c_api::lucet_vmctx; use crate::alloc::instance_heap_offset; use crate::context::Context; use crate::error::Error; use crate::instance::{ Instance, InstanceInternal, State, TerminationDetails, CURRENT_INSTANCE, HOST_CTX, }; use lucet_module::{FunctionHandle, GlobalValue}; use std::any::Any; use std::borrow::{Borrow, BorrowMut}; use std::cell::{Ref, RefCell, RefMut}; /// An opaque handle to a running instance's context. #[derive(Debug)] pub struct Vmctx { vmctx: *mut lucet_vmctx, /// A view of the underlying instance's heap. /// /// This must never be dropped automatically, as the view does not own the heap. Rather, this is /// a value used to implement dynamic borrowing of the heap contents that are owned and managed /// by the instance and its `Alloc`. heap_view: RefCell<Box<[u8]>>, /// A view of the underlying instance's globals. /// /// This must never be dropped automatically, as the view does not own the globals. Rather, this /// is a value used to implement dynamic borrowing of the globals that are owned and managed by /// the instance and its `Alloc`. globals_view: RefCell<Box<[GlobalValue]>>, } impl Drop for Vmctx { fn drop(&mut self) { let heap_view = self.heap_view.replace(Box::new([])); let globals_view = self.globals_view.replace(Box::new([])); // as described in the definition of `Vmctx`, we cannot allow the boxed views of the heap // and globals to be dropped Box::leak(heap_view); Box::leak(globals_view); } } pub trait VmctxInternal { /// Get a reference to the `Instance` for this guest. fn instance(&self) -> &Instance; /// Get a mutable reference to the `Instance` for this guest. /// /// ### Safety /// /// Using this method, you could hold on to multiple mutable references to the same /// `Instance`. Only use one at a time! This method does not take `&mut self` because otherwise /// you could not use orthogonal `&mut` refs that come from `Vmctx`, like the heap or /// terminating the instance. unsafe fn instance_mut(&self) -> &mut Instance; } impl VmctxInternal for Vmctx { fn instance(&self) -> &Instance { unsafe { instance_from_vmctx(self.vmctx) } } unsafe fn instance_mut(&self) -> &mut Instance { instance_from_vmctx(self.vmctx) } } impl Vmctx { /// Create a `Vmctx` from the compiler-inserted `vmctx` argument in a guest function. /// /// This is almost certainly not what you want to use to get a `Vmctx`; instead use the `&mut /// Vmctx` argument to a `lucet_hostcalls!`-wrapped function. pub unsafe fn from_raw(vmctx: *mut lucet_vmctx) -> Vmctx { let inst = instance_from_vmctx(vmctx); assert!(inst.valid_magic()); let res = Vmctx { vmctx, heap_view: RefCell::new(Box::<[u8]>::from_raw(inst.heap_mut())), globals_view: RefCell::new(Box::<[GlobalValue]>::from_raw(inst.globals_mut())), }; res } /// Return the underlying `vmctx` pointer. pub fn as_raw(&self) -> *mut lucet_vmctx { self.vmctx } /// Return the WebAssembly heap as a slice of bytes. /// /// If the heap is already mutably borrowed by `heap_mut()`, the instance will /// terminate with `TerminationDetails::BorrowError`. pub fn heap(&self) -> Ref<'_, [u8]> { unsafe { self.reconstitute_heap_view_if_needed(); } let r = self .heap_view .try_borrow() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("heap"))); Ref::map(r, |b| b.borrow()) } /// Return the WebAssembly heap as a mutable slice of bytes. /// /// If the heap is already borrowed by `heap()` or `heap_mut()`, the instance will terminate /// with `TerminationDetails::BorrowError`. pub fn heap_mut(&self) -> RefMut<'_, [u8]> { unsafe { self.reconstitute_heap_view_if_needed(); } let r = self .heap_view .try_borrow_mut() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("heap_mut"))); RefMut::map(r, |b| b.borrow_mut()) } /// Check whether the heap has grown, and replace the heap view if it has. /// /// This handles the case where `Vmctx::grow_memory()` and `Vmctx::heap()` are called in /// sequence. Since `Vmctx::grow_memory()` takes `&mut self`, heap references cannot live across /// it. /// /// TODO: There is still an unsound case, though, when a heap reference is held across a call /// back into the guest via `Vmctx::get_func_from_idx()`. That guest code may grow the heap as /// well, causing any outstanding heap references to become invalid. We will address this when /// we rework the interface for calling back into the guest. unsafe fn reconstitute_heap_view_if_needed(&self) { let inst = self.instance_mut(); if inst.heap_mut().len()!= self.heap_view.borrow().len() { let old_heap_view = self .heap_view .replace(Box::<[u8]>::from_raw(inst.heap_mut())); // as described in the definition of `Vmctx`, we cannot allow the boxed view of the heap // to be dropped Box::leak(old_heap_view); } } /// Check whether a given range in the host address space overlaps with the memory that backs /// the instance heap. pub fn check_heap<T>(&self, ptr: *const T, len: usize) -> bool { self.instance().check_heap(ptr, len) } /// Check whether a context value of a particular type exists. pub fn contains_embed_ctx<T: Any>(&self) -> bool { self.instance().contains_embed_ctx::<T>() } /// Get a reference to a context value of a particular type. /// /// If a context of that type does not exist, the instance will terminate with /// `TerminationDetails::CtxNotFound`. /// /// If the context is already mutably borrowed by `get_embed_ctx_mut`, the instance will /// terminate with `TerminationDetails::BorrowError`. pub fn get_embed_ctx<T: Any>(&self) -> Ref<'_, T> { match self.instance().embed_ctx.try_get::<T>() { Some(Ok(t)) => t, Some(Err(_)) => panic!(TerminationDetails::BorrowError("get_embed_ctx")), None => panic!(TerminationDetails::CtxNotFound), } } /// Get a mutable reference to a context value of a particular type. /// /// If a context of that type does not exist, the instance will terminate with /// `TerminationDetails::CtxNotFound`. /// /// If the context is already borrowed by some other use of `get_embed_ctx` or /// `get_embed_ctx_mut`, the instance will terminate with `TerminationDetails::BorrowError`. pub fn get_embed_ctx_mut<T: Any>(&self) -> RefMut<'_, T> { match unsafe { self.instance_mut().embed_ctx.try_get_mut::<T>() } { Some(Ok(t)) => t, Some(Err(_)) => panic!(TerminationDetails::BorrowError("get_embed_ctx_mut")), None => panic!(TerminationDetails::CtxNotFound), } } /// Terminate this guest and return to the host context without unwinding. /// /// This is almost certainly not what you want to use to terminate an instance from a hostcall, /// as any resources currently in scope will not be dropped. Instead, use /// `lucet_hostcall_terminate!` which unwinds to the enclosing hostcall body. pub unsafe fn terminate_no_unwind(&mut self, details: TerminationDetails) ->! { self.instance_mut().terminate(details) } /// Grow the guest memory by the given number of WebAssembly pages. /// /// On success, returns the number of pages that existed before the call. pub fn grow_memory(&mut self, additional_pages: u32) -> Result<u32, Error> { unsafe { self.instance_mut().grow_memory(additional_pages) } } /// Return the WebAssembly globals as a slice of `i64`s. /// /// If the globals are already mutably borrowed by `globals_mut()`, the instance will terminate /// with `TerminationDetails::BorrowError`. pub fn globals(&self) -> Ref<'_, [GlobalValue]> { let r = self .globals_view .try_borrow() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("globals"))); Ref::map(r, |b| b.borrow()) } /// Return the WebAssembly globals as a mutable slice of `i64`s. /// /// If the globals are already borrowed by `globals()` or `globals_mut()`, the instance will /// terminate with `TerminationDetails::BorrowError`. pub fn
(&self) -> RefMut<'_, [GlobalValue]> { let r = self .globals_view .try_borrow_mut() .unwrap_or_else(|_| panic!(TerminationDetails::BorrowError("globals_mut"))); RefMut::map(r, |b| b.borrow_mut()) } /// Get a function pointer by WebAssembly table and function index. /// /// This is useful when a hostcall takes a function pointer as its argument, as WebAssembly uses /// table indices as its runtime representation of function pointers. /// /// We do not currently reflect function type information into the Rust type system, so callers /// of the returned function must take care to cast it to the correct type before calling. The /// correct type will include the `vmctx` argument, which the caller is responsible for passing /// from its own context. /// /// ```no_run /// use lucet_runtime_internals::{lucet_hostcalls, lucet_hostcall_terminate}; /// use lucet_runtime_internals::vmctx::{lucet_vmctx, Vmctx}; /// /// lucet_hostcalls! { /// #[no_mangle] /// pub unsafe extern "C" fn hostcall_call_binop( /// &mut vmctx, /// binop_table_idx: u32, /// binop_func_idx: u32, /// operand1: u32, /// operand2: u32, /// ) -> u32 { /// if let Ok(binop) = vmctx.get_func_from_idx(binop_table_idx, binop_func_idx) { /// let typed_binop = std::mem::transmute::< /// usize, /// extern "C" fn(*mut lucet_vmctx, u32, u32) -> u32 /// >(binop.ptr.as_usize()); /// unsafe { (typed_binop)(vmctx.as_raw(), operand1, operand2) } /// } else { /// lucet_hostcall_terminate!("invalid function index") /// } /// } /// } pub fn get_func_from_idx( &self, table_idx: u32, func_idx: u32, ) -> Result<FunctionHandle, Error> { self.instance() .module() .get_func_from_idx(table_idx, func_idx) } } /// Get an `Instance` from the `vmctx` pointer. /// /// Only safe to call from within the guest context. pub unsafe fn instance_from_vmctx<'a>(vmctx: *mut lucet_vmctx) -> &'a mut Instance { assert!(!vmctx.is_null(), "vmctx is not null"); let inst_ptr = (vmctx as usize - instance_heap_offset()) as *mut Instance; // We shouldn't actually need to access the thread local, only the exception handler should // need to. But, as long as the thread local exists, we should make sure that the guest // hasn't pulled any shenanigans and passed a bad vmctx. (Codegen should ensure the guest // cant pull any shenanigans but there have been bugs before.) CURRENT_INSTANCE.with(|current_instance| { if let Some(current_inst_ptr) = current_instance.borrow().map(|nn| nn.as_ptr()) { assert_eq!( inst_ptr, current_inst_ptr, "vmctx corresponds to current instance" ); } else { panic!( "current instance is not set; thread local storage failure can indicate \ dynamic linking issues" ); } }); let inst = inst_ptr.as_mut().unwrap(); assert!(inst.valid_magic()); inst } impl Instance { /// Terminate the guest and swap back to the host context without unwinding. /// /// This is almost certainly not what you want to use to terminate from a hostcall; use panics /// with `TerminationDetails` instead. unsafe fn terminate(&mut self, details: TerminationDetails) ->! { self.state = State::Terminated { details }; #[allow(unused_unsafe)] // The following unsafe will be incorrectly warned as unused HOST_CTX.with(|host_ctx| unsafe { Context::set(&*host_ctx.get()) }) } }
globals_mut
identifier_name
hwdetect.rs
use anyhow::anyhow; use nom::character::complete::{newline, satisfy, space0}; use nom::combinator::{map, map_res, opt}; use nom::multi::{many1, separated_list1}; use nom::sequence::{preceded, terminated, tuple}; use nom::Parser; use nom_supreme::tag::complete::tag; use tako::hwstats::GpuFamily; use tako::internal::has_unique_elements; use tako::resources::{ ResourceDescriptorItem, ResourceDescriptorKind, ResourceIndex, ResourceLabel, AMD_GPU_RESOURCE_NAME, MEM_RESOURCE_NAME, NVIDIA_GPU_RESOURCE_NAME, }; use tako::{format_comma_delimited, Set}; use crate::common::format::human_size; use crate::common::parser::{consume_all, p_u32, NomResult}; pub fn detect_cpus() -> anyhow::Result<ResourceDescriptorKind> { read_linux_numa() .map(|numa_nodes| { let filtered = filter_masked_cpus(numa_nodes.clone()); if filtered.iter().flatten().count()!= numa_nodes.iter().flatten().count() { log::info!( "Some cores were filtered by a CPU mask. All cores: {:?}. Allowed cores: {:?}.", numa_nodes .iter() .map(|c| format_comma_delimited(c.iter().map(|c| c.as_num()))) .collect::<Vec<_>>(), filtered .iter() .map(|c| format_comma_delimited(c.iter().map(|c| c.as_num()))) .collect::<Vec<_>>() ); } filtered }) .and_then(|groups| { ResourceDescriptorKind::groups_numeric(groups) .map_err(|_| anyhow!("Inconsistent CPU naming got from detection")) }) .or_else(|e| { log::debug!("Detecting linux failed: {}", e); let n_cpus = num_cpus::get() as u32; if n_cpus < 1 { anyhow::bail!("Cpu detection failed"); }; Ok(ResourceDescriptorKind::simple_indices(n_cpus)) }) } /// Filter cores that are not allowed because of CPU affinity mask. fn filter_masked_cpus(numa_nodes: Vec<Vec<ResourceIndex>>) -> Vec<Vec<ResourceIndex>> { match core_affinity::get_core_ids() { Some(allowed) => { let cpu_set: Set<usize> = allowed.into_iter().map(|core_id| core_id.id).collect(); numa_nodes .into_iter() .map(|mut numa_node| { numa_node.retain(|&cpu| cpu_set.contains(&cpu.as_usize())); numa_node }) .collect() } None => { log::error!("Failed to found CPU mask. Allowing all cores."); numa_nodes } } } pub fn prune_hyper_threading( kind: &ResourceDescriptorKind, ) -> anyhow::Result<ResourceDescriptorKind> { let groups = kind.as_groups(); let mut new_desc = Vec::new(); for group in groups { let mut new_group = Vec::new(); for cpu_id in group { if read_linux_thread_siblings(&cpu_id)? .iter() .min() .ok_or_else(|| anyhow::anyhow!("Thread siblings are empty")) .map(|v| *v == cpu_id)? { new_group.push(cpu_id); } } new_desc.push(new_group); } Ok(ResourceDescriptorKind::groups(new_desc).unwrap()) } /// Detects additional resources (apart from CPU) on this worker. /// Also returns the detected GPU families. pub fn detect_additional_resources( items: &mut Vec<ResourceDescriptorItem>, ) -> anyhow::Result<Set<GpuFamily>> { let mut gpu_families = Set::new(); let has_resource = |items: &[ResourceDescriptorItem], name: &str| items.iter().any(|x| x.name == name); let detected_gpus = detect_gpus_from_env(); if detected_gpus.is_empty() &&!has_resource(items, NVIDIA_GPU_RESOURCE_NAME) { if let Ok(count) = read_nvidia_linux_gpu_count() { if count > 0 { gpu_families.insert(GpuFamily::Nvidia); log::info!("Detected {} GPUs from procs", count); items.push(ResourceDescriptorItem { name: NVIDIA_GPU_RESOURCE_NAME.to_string(), kind: ResourceDescriptorKind::simple_indices(count as u32), }); } } } else { for gpu in detected_gpus { if!has_resource(items, gpu.resource_name) { gpu_families.insert(gpu.family); items.push(ResourceDescriptorItem { name: gpu.resource_name.to_string(), kind: gpu.resource, }); } } } if!has_resource(items, MEM_RESOURCE_NAME) { if let Ok(mem) = read_linux_memory() { log::info!("Detected {mem}B of memory ({})", human_size(mem)); items.push(ResourceDescriptorItem { name: MEM_RESOURCE_NAME.to_string(), kind: ResourceDescriptorKind::Sum { size: mem }, }); } } Ok(gpu_families) } /// GPU resource that can be detected from an environment variable. pub struct GpuEnvironmentRecord { env_var: &'static str, pub resource_name: &'static str, pub family: GpuFamily, } impl GpuEnvironmentRecord { const fn new(env_var: &'static str, resource_name: &'static str, family: GpuFamily) -> Self { Self { env_var, resource_name, family, } } } pub const GPU_ENVIRONMENTS: &[GpuEnvironmentRecord; 2] = &[ GpuEnvironmentRecord::new( "CUDA_VISIBLE_DEVICES", NVIDIA_GPU_RESOURCE_NAME, GpuFamily::Nvidia, ), GpuEnvironmentRecord::new( "ROCR_VISIBLE_DEVICES", AMD_GPU_RESOURCE_NAME, GpuFamily::Amd, ), ]; struct DetectedGpu { resource_name: &'static str, resource: ResourceDescriptorKind, family: GpuFamily, } /// Tries to detect available GPUs from one of the `GPU_ENV_KEYS` environment variables. fn detect_gpus_from_env() -> Vec<DetectedGpu> { let mut gpus = Vec::new(); for gpu_env in GPU_ENVIRONMENTS { if let Ok(devices_str) = std::env::var(gpu_env.env_var)
} } } gpus } /// Try to find out how many Nvidia GPUs are available on the current node. fn read_nvidia_linux_gpu_count() -> anyhow::Result<usize> { Ok(std::fs::read_dir("/proc/driver/nvidia/gpus")?.count()) } /// Try to get total memory on the current node. fn read_linux_memory() -> anyhow::Result<u64> { Ok(psutil::memory::virtual_memory()?.total()) } /// Try to find the CPU NUMA configuration. /// /// Returns a list of NUMA nodes, each node contains a list of assigned CPUs. fn read_linux_numa() -> anyhow::Result<Vec<Vec<ResourceIndex>>> { let nodes = parse_range(&std::fs::read_to_string( "/sys/devices/system/node/possible", )?)?; let mut numa_nodes: Vec<Vec<ResourceIndex>> = Vec::new(); for numa_index in nodes { let filename = format!("/sys/devices/system/node/node{numa_index}/cpulist"); numa_nodes.push(parse_range(&std::fs::read_to_string(filename)?)?); } log::debug!("Linux numa detection is successful"); Ok(numa_nodes) } fn read_linux_thread_siblings(cpu_id: &ResourceLabel) -> anyhow::Result<Vec<ResourceLabel>> { let filename = format!( "/sys/devices/system/cpu/cpu{}/topology/thread_siblings_list", cpu_id ); log::debug!("Reading {}", filename); parse_range(&std::fs::read_to_string(filename)?) .map(|indices| indices.into_iter().map(|i| i.to_string()).collect()) } fn p_cpu_range(input: &str) -> NomResult<Vec<ResourceIndex>> { map_res( tuple(( terminated(p_u32, space0), opt(terminated( preceded(tuple((tag("-"), space0)), p_u32), space0, )), )), |(u, v)| crate::Result::Ok((u..=v.unwrap_or(u)).map(|id| id.into()).collect()), ) .parse(input) } fn p_cpu_ranges(input: &str) -> NomResult<Vec<ResourceIndex>> { separated_list1(terminated(tag(","), space0), p_cpu_range)(input) .map(|(a, b)| (a, b.into_iter().flatten().collect())) } fn parse_range(input: &str) -> anyhow::Result<Vec<ResourceIndex>> { let parser = terminated(p_cpu_ranges, opt(newline)); consume_all(parser, input) } fn parse_comma_separated_values(input: &str) -> anyhow::Result<Vec<String>> { let any_except_comma = map(many1(satisfy(|c| c!= ',')), |items| { items.into_iter().collect::<String>() }); consume_all(separated_list1(tag(","), any_except_comma), input) } #[cfg(test)] mod tests { use tako::AsIdVec; use super::{parse_range, read_linux_numa}; #[test] fn test_parse_range() { assert_eq!(parse_range("10").unwrap(), vec![10].to_ids()); assert_eq!(parse_range("10\n").unwrap(), vec![10].to_ids()); assert_eq!(parse_range("0-3\n").unwrap(), vec![0, 1, 2, 3].to_ids()); assert_eq!( parse_range("111-115\n").unwrap(), vec![111, 112, 113, 114, 115].to_ids() ); assert_eq!(parse_range("2,7, 10").unwrap(), vec![2, 7, 10].to_ids()); assert_eq!( parse_range("2-7,10-12,20").unwrap(), vec![2, 3, 4, 5, 6, 7, 10, 11, 12, 20].to_ids() ); assert!(parse_range("xx\n").is_err()); assert!(parse_range("-\n").is_err()); assert!(parse_range("-2\n").is_err()); assert!(parse_range("0-1-2\n").is_err()); assert!(parse_range(",,").is_err()); } #[test] fn test_read_linux_numa() { let cpus = read_linux_numa().unwrap(); assert_eq!(cpus.iter().map(|x| x.len()).sum::<usize>(), num_cpus::get()); } }
{ if let Ok(devices) = parse_comma_separated_values(&devices_str) { log::info!( "Detected GPUs {} from `{}`", format_comma_delimited(&devices), gpu_env.env_var, ); if !has_unique_elements(&devices) { log::warn!("{} contains duplicates ({devices_str})", gpu_env.env_var); continue; } let list = ResourceDescriptorKind::list(devices).expect("List values were not unique"); gpus.push(DetectedGpu { resource_name: gpu_env.resource_name, resource: list, family: gpu_env.family, });
conditional_block
hwdetect.rs
use anyhow::anyhow; use nom::character::complete::{newline, satisfy, space0}; use nom::combinator::{map, map_res, opt}; use nom::multi::{many1, separated_list1}; use nom::sequence::{preceded, terminated, tuple}; use nom::Parser; use nom_supreme::tag::complete::tag; use tako::hwstats::GpuFamily; use tako::internal::has_unique_elements; use tako::resources::{ ResourceDescriptorItem, ResourceDescriptorKind, ResourceIndex, ResourceLabel, AMD_GPU_RESOURCE_NAME, MEM_RESOURCE_NAME, NVIDIA_GPU_RESOURCE_NAME, }; use tako::{format_comma_delimited, Set}; use crate::common::format::human_size; use crate::common::parser::{consume_all, p_u32, NomResult}; pub fn detect_cpus() -> anyhow::Result<ResourceDescriptorKind> { read_linux_numa() .map(|numa_nodes| { let filtered = filter_masked_cpus(numa_nodes.clone()); if filtered.iter().flatten().count()!= numa_nodes.iter().flatten().count() { log::info!( "Some cores were filtered by a CPU mask. All cores: {:?}. Allowed cores: {:?}.", numa_nodes .iter() .map(|c| format_comma_delimited(c.iter().map(|c| c.as_num()))) .collect::<Vec<_>>(), filtered .iter() .map(|c| format_comma_delimited(c.iter().map(|c| c.as_num()))) .collect::<Vec<_>>() ); } filtered }) .and_then(|groups| { ResourceDescriptorKind::groups_numeric(groups) .map_err(|_| anyhow!("Inconsistent CPU naming got from detection")) }) .or_else(|e| { log::debug!("Detecting linux failed: {}", e); let n_cpus = num_cpus::get() as u32; if n_cpus < 1 { anyhow::bail!("Cpu detection failed"); }; Ok(ResourceDescriptorKind::simple_indices(n_cpus)) }) } /// Filter cores that are not allowed because of CPU affinity mask. fn filter_masked_cpus(numa_nodes: Vec<Vec<ResourceIndex>>) -> Vec<Vec<ResourceIndex>> { match core_affinity::get_core_ids() { Some(allowed) => { let cpu_set: Set<usize> = allowed.into_iter().map(|core_id| core_id.id).collect(); numa_nodes .into_iter() .map(|mut numa_node| { numa_node.retain(|&cpu| cpu_set.contains(&cpu.as_usize())); numa_node }) .collect() } None => { log::error!("Failed to found CPU mask. Allowing all cores."); numa_nodes } } } pub fn prune_hyper_threading( kind: &ResourceDescriptorKind, ) -> anyhow::Result<ResourceDescriptorKind> { let groups = kind.as_groups(); let mut new_desc = Vec::new(); for group in groups { let mut new_group = Vec::new(); for cpu_id in group { if read_linux_thread_siblings(&cpu_id)? .iter() .min() .ok_or_else(|| anyhow::anyhow!("Thread siblings are empty")) .map(|v| *v == cpu_id)? { new_group.push(cpu_id); } } new_desc.push(new_group); } Ok(ResourceDescriptorKind::groups(new_desc).unwrap()) } /// Detects additional resources (apart from CPU) on this worker. /// Also returns the detected GPU families. pub fn detect_additional_resources( items: &mut Vec<ResourceDescriptorItem>, ) -> anyhow::Result<Set<GpuFamily>>
gpu_families.insert(gpu.family); items.push(ResourceDescriptorItem { name: gpu.resource_name.to_string(), kind: gpu.resource, }); } } } if!has_resource(items, MEM_RESOURCE_NAME) { if let Ok(mem) = read_linux_memory() { log::info!("Detected {mem}B of memory ({})", human_size(mem)); items.push(ResourceDescriptorItem { name: MEM_RESOURCE_NAME.to_string(), kind: ResourceDescriptorKind::Sum { size: mem }, }); } } Ok(gpu_families) } /// GPU resource that can be detected from an environment variable. pub struct GpuEnvironmentRecord { env_var: &'static str, pub resource_name: &'static str, pub family: GpuFamily, } impl GpuEnvironmentRecord { const fn new(env_var: &'static str, resource_name: &'static str, family: GpuFamily) -> Self { Self { env_var, resource_name, family, } } } pub const GPU_ENVIRONMENTS: &[GpuEnvironmentRecord; 2] = &[ GpuEnvironmentRecord::new( "CUDA_VISIBLE_DEVICES", NVIDIA_GPU_RESOURCE_NAME, GpuFamily::Nvidia, ), GpuEnvironmentRecord::new( "ROCR_VISIBLE_DEVICES", AMD_GPU_RESOURCE_NAME, GpuFamily::Amd, ), ]; struct DetectedGpu { resource_name: &'static str, resource: ResourceDescriptorKind, family: GpuFamily, } /// Tries to detect available GPUs from one of the `GPU_ENV_KEYS` environment variables. fn detect_gpus_from_env() -> Vec<DetectedGpu> { let mut gpus = Vec::new(); for gpu_env in GPU_ENVIRONMENTS { if let Ok(devices_str) = std::env::var(gpu_env.env_var) { if let Ok(devices) = parse_comma_separated_values(&devices_str) { log::info!( "Detected GPUs {} from `{}`", format_comma_delimited(&devices), gpu_env.env_var, ); if!has_unique_elements(&devices) { log::warn!("{} contains duplicates ({devices_str})", gpu_env.env_var); continue; } let list = ResourceDescriptorKind::list(devices).expect("List values were not unique"); gpus.push(DetectedGpu { resource_name: gpu_env.resource_name, resource: list, family: gpu_env.family, }); } } } gpus } /// Try to find out how many Nvidia GPUs are available on the current node. fn read_nvidia_linux_gpu_count() -> anyhow::Result<usize> { Ok(std::fs::read_dir("/proc/driver/nvidia/gpus")?.count()) } /// Try to get total memory on the current node. fn read_linux_memory() -> anyhow::Result<u64> { Ok(psutil::memory::virtual_memory()?.total()) } /// Try to find the CPU NUMA configuration. /// /// Returns a list of NUMA nodes, each node contains a list of assigned CPUs. fn read_linux_numa() -> anyhow::Result<Vec<Vec<ResourceIndex>>> { let nodes = parse_range(&std::fs::read_to_string( "/sys/devices/system/node/possible", )?)?; let mut numa_nodes: Vec<Vec<ResourceIndex>> = Vec::new(); for numa_index in nodes { let filename = format!("/sys/devices/system/node/node{numa_index}/cpulist"); numa_nodes.push(parse_range(&std::fs::read_to_string(filename)?)?); } log::debug!("Linux numa detection is successful"); Ok(numa_nodes) } fn read_linux_thread_siblings(cpu_id: &ResourceLabel) -> anyhow::Result<Vec<ResourceLabel>> { let filename = format!( "/sys/devices/system/cpu/cpu{}/topology/thread_siblings_list", cpu_id ); log::debug!("Reading {}", filename); parse_range(&std::fs::read_to_string(filename)?) .map(|indices| indices.into_iter().map(|i| i.to_string()).collect()) } fn p_cpu_range(input: &str) -> NomResult<Vec<ResourceIndex>> { map_res( tuple(( terminated(p_u32, space0), opt(terminated( preceded(tuple((tag("-"), space0)), p_u32), space0, )), )), |(u, v)| crate::Result::Ok((u..=v.unwrap_or(u)).map(|id| id.into()).collect()), ) .parse(input) } fn p_cpu_ranges(input: &str) -> NomResult<Vec<ResourceIndex>> { separated_list1(terminated(tag(","), space0), p_cpu_range)(input) .map(|(a, b)| (a, b.into_iter().flatten().collect())) } fn parse_range(input: &str) -> anyhow::Result<Vec<ResourceIndex>> { let parser = terminated(p_cpu_ranges, opt(newline)); consume_all(parser, input) } fn parse_comma_separated_values(input: &str) -> anyhow::Result<Vec<String>> { let any_except_comma = map(many1(satisfy(|c| c!= ',')), |items| { items.into_iter().collect::<String>() }); consume_all(separated_list1(tag(","), any_except_comma), input) } #[cfg(test)] mod tests { use tako::AsIdVec; use super::{parse_range, read_linux_numa}; #[test] fn test_parse_range() { assert_eq!(parse_range("10").unwrap(), vec![10].to_ids()); assert_eq!(parse_range("10\n").unwrap(), vec![10].to_ids()); assert_eq!(parse_range("0-3\n").unwrap(), vec![0, 1, 2, 3].to_ids()); assert_eq!( parse_range("111-115\n").unwrap(), vec![111, 112, 113, 114, 115].to_ids() ); assert_eq!(parse_range("2,7, 10").unwrap(), vec![2, 7, 10].to_ids()); assert_eq!( parse_range("2-7,10-12,20").unwrap(), vec![2, 3, 4, 5, 6, 7, 10, 11, 12, 20].to_ids() ); assert!(parse_range("xx\n").is_err()); assert!(parse_range("-\n").is_err()); assert!(parse_range("-2\n").is_err()); assert!(parse_range("0-1-2\n").is_err()); assert!(parse_range(",,").is_err()); } #[test] fn test_read_linux_numa() { let cpus = read_linux_numa().unwrap(); assert_eq!(cpus.iter().map(|x| x.len()).sum::<usize>(), num_cpus::get()); } }
{ let mut gpu_families = Set::new(); let has_resource = |items: &[ResourceDescriptorItem], name: &str| items.iter().any(|x| x.name == name); let detected_gpus = detect_gpus_from_env(); if detected_gpus.is_empty() && !has_resource(items, NVIDIA_GPU_RESOURCE_NAME) { if let Ok(count) = read_nvidia_linux_gpu_count() { if count > 0 { gpu_families.insert(GpuFamily::Nvidia); log::info!("Detected {} GPUs from procs", count); items.push(ResourceDescriptorItem { name: NVIDIA_GPU_RESOURCE_NAME.to_string(), kind: ResourceDescriptorKind::simple_indices(count as u32), }); } } } else { for gpu in detected_gpus { if !has_resource(items, gpu.resource_name) {
identifier_body
hwdetect.rs
use nom::sequence::{preceded, terminated, tuple}; use nom::Parser; use nom_supreme::tag::complete::tag; use tako::hwstats::GpuFamily; use tako::internal::has_unique_elements; use tako::resources::{ ResourceDescriptorItem, ResourceDescriptorKind, ResourceIndex, ResourceLabel, AMD_GPU_RESOURCE_NAME, MEM_RESOURCE_NAME, NVIDIA_GPU_RESOURCE_NAME, }; use tako::{format_comma_delimited, Set}; use crate::common::format::human_size; use crate::common::parser::{consume_all, p_u32, NomResult}; pub fn detect_cpus() -> anyhow::Result<ResourceDescriptorKind> { read_linux_numa() .map(|numa_nodes| { let filtered = filter_masked_cpus(numa_nodes.clone()); if filtered.iter().flatten().count()!= numa_nodes.iter().flatten().count() { log::info!( "Some cores were filtered by a CPU mask. All cores: {:?}. Allowed cores: {:?}.", numa_nodes .iter() .map(|c| format_comma_delimited(c.iter().map(|c| c.as_num()))) .collect::<Vec<_>>(), filtered .iter() .map(|c| format_comma_delimited(c.iter().map(|c| c.as_num()))) .collect::<Vec<_>>() ); } filtered }) .and_then(|groups| { ResourceDescriptorKind::groups_numeric(groups) .map_err(|_| anyhow!("Inconsistent CPU naming got from detection")) }) .or_else(|e| { log::debug!("Detecting linux failed: {}", e); let n_cpus = num_cpus::get() as u32; if n_cpus < 1 { anyhow::bail!("Cpu detection failed"); }; Ok(ResourceDescriptorKind::simple_indices(n_cpus)) }) } /// Filter cores that are not allowed because of CPU affinity mask. fn filter_masked_cpus(numa_nodes: Vec<Vec<ResourceIndex>>) -> Vec<Vec<ResourceIndex>> { match core_affinity::get_core_ids() { Some(allowed) => { let cpu_set: Set<usize> = allowed.into_iter().map(|core_id| core_id.id).collect(); numa_nodes .into_iter() .map(|mut numa_node| { numa_node.retain(|&cpu| cpu_set.contains(&cpu.as_usize())); numa_node }) .collect() } None => { log::error!("Failed to found CPU mask. Allowing all cores."); numa_nodes } } } pub fn prune_hyper_threading( kind: &ResourceDescriptorKind, ) -> anyhow::Result<ResourceDescriptorKind> { let groups = kind.as_groups(); let mut new_desc = Vec::new(); for group in groups { let mut new_group = Vec::new(); for cpu_id in group { if read_linux_thread_siblings(&cpu_id)? .iter() .min() .ok_or_else(|| anyhow::anyhow!("Thread siblings are empty")) .map(|v| *v == cpu_id)? { new_group.push(cpu_id); } } new_desc.push(new_group); } Ok(ResourceDescriptorKind::groups(new_desc).unwrap()) } /// Detects additional resources (apart from CPU) on this worker. /// Also returns the detected GPU families. pub fn detect_additional_resources( items: &mut Vec<ResourceDescriptorItem>, ) -> anyhow::Result<Set<GpuFamily>> { let mut gpu_families = Set::new(); let has_resource = |items: &[ResourceDescriptorItem], name: &str| items.iter().any(|x| x.name == name); let detected_gpus = detect_gpus_from_env(); if detected_gpus.is_empty() &&!has_resource(items, NVIDIA_GPU_RESOURCE_NAME) { if let Ok(count) = read_nvidia_linux_gpu_count() { if count > 0 { gpu_families.insert(GpuFamily::Nvidia); log::info!("Detected {} GPUs from procs", count); items.push(ResourceDescriptorItem { name: NVIDIA_GPU_RESOURCE_NAME.to_string(), kind: ResourceDescriptorKind::simple_indices(count as u32), }); } } } else { for gpu in detected_gpus { if!has_resource(items, gpu.resource_name) { gpu_families.insert(gpu.family); items.push(ResourceDescriptorItem { name: gpu.resource_name.to_string(), kind: gpu.resource, }); } } } if!has_resource(items, MEM_RESOURCE_NAME) { if let Ok(mem) = read_linux_memory() { log::info!("Detected {mem}B of memory ({})", human_size(mem)); items.push(ResourceDescriptorItem { name: MEM_RESOURCE_NAME.to_string(), kind: ResourceDescriptorKind::Sum { size: mem }, }); } } Ok(gpu_families) } /// GPU resource that can be detected from an environment variable. pub struct GpuEnvironmentRecord { env_var: &'static str, pub resource_name: &'static str, pub family: GpuFamily, } impl GpuEnvironmentRecord { const fn new(env_var: &'static str, resource_name: &'static str, family: GpuFamily) -> Self { Self { env_var, resource_name, family, } } } pub const GPU_ENVIRONMENTS: &[GpuEnvironmentRecord; 2] = &[ GpuEnvironmentRecord::new( "CUDA_VISIBLE_DEVICES", NVIDIA_GPU_RESOURCE_NAME, GpuFamily::Nvidia, ), GpuEnvironmentRecord::new( "ROCR_VISIBLE_DEVICES", AMD_GPU_RESOURCE_NAME, GpuFamily::Amd, ), ]; struct DetectedGpu { resource_name: &'static str, resource: ResourceDescriptorKind, family: GpuFamily, } /// Tries to detect available GPUs from one of the `GPU_ENV_KEYS` environment variables. fn detect_gpus_from_env() -> Vec<DetectedGpu> { let mut gpus = Vec::new(); for gpu_env in GPU_ENVIRONMENTS { if let Ok(devices_str) = std::env::var(gpu_env.env_var) { if let Ok(devices) = parse_comma_separated_values(&devices_str) { log::info!( "Detected GPUs {} from `{}`", format_comma_delimited(&devices), gpu_env.env_var, ); if!has_unique_elements(&devices) { log::warn!("{} contains duplicates ({devices_str})", gpu_env.env_var); continue; } let list = ResourceDescriptorKind::list(devices).expect("List values were not unique"); gpus.push(DetectedGpu { resource_name: gpu_env.resource_name, resource: list, family: gpu_env.family, }); } } } gpus } /// Try to find out how many Nvidia GPUs are available on the current node. fn read_nvidia_linux_gpu_count() -> anyhow::Result<usize> { Ok(std::fs::read_dir("/proc/driver/nvidia/gpus")?.count()) } /// Try to get total memory on the current node. fn read_linux_memory() -> anyhow::Result<u64> { Ok(psutil::memory::virtual_memory()?.total()) } /// Try to find the CPU NUMA configuration. /// /// Returns a list of NUMA nodes, each node contains a list of assigned CPUs. fn read_linux_numa() -> anyhow::Result<Vec<Vec<ResourceIndex>>> { let nodes = parse_range(&std::fs::read_to_string( "/sys/devices/system/node/possible", )?)?; let mut numa_nodes: Vec<Vec<ResourceIndex>> = Vec::new(); for numa_index in nodes { let filename = format!("/sys/devices/system/node/node{numa_index}/cpulist"); numa_nodes.push(parse_range(&std::fs::read_to_string(filename)?)?); } log::debug!("Linux numa detection is successful"); Ok(numa_nodes) } fn read_linux_thread_siblings(cpu_id: &ResourceLabel) -> anyhow::Result<Vec<ResourceLabel>> { let filename = format!( "/sys/devices/system/cpu/cpu{}/topology/thread_siblings_list", cpu_id ); log::debug!("Reading {}", filename); parse_range(&std::fs::read_to_string(filename)?) .map(|indices| indices.into_iter().map(|i| i.to_string()).collect()) } fn p_cpu_range(input: &str) -> NomResult<Vec<ResourceIndex>> { map_res( tuple(( terminated(p_u32, space0), opt(terminated( preceded(tuple((tag("-"), space0)), p_u32), space0, )), )), |(u, v)| crate::Result::Ok((u..=v.unwrap_or(u)).map(|id| id.into()).collect()), ) .parse(input) } fn p_cpu_ranges(input: &str) -> NomResult<Vec<ResourceIndex>> { separated_list1(terminated(tag(","), space0), p_cpu_range)(input) .map(|(a, b)| (a, b.into_iter().flatten().collect())) } fn parse_range(input: &str) -> anyhow::Result<Vec<ResourceIndex>> { let parser = terminated(p_cpu_ranges, opt(newline)); consume_all(parser, input) } fn parse_comma_separated_values(input: &str) -> anyhow::Result<Vec<String>> { let any_except_comma = map(many1(satisfy(|c| c!= ',')), |items| { items.into_iter().collect::<String>() }); consume_all(separated_list1(tag(","), any_except_comma), input) } #[cfg(test)] mod tests { use tako::AsIdVec; use super::{parse_range, read_linux_numa}; #[test] fn test_parse_range() { assert_eq!(parse_range("10").unwrap(), vec![10].to_ids()); assert_eq!(parse_range("10\n").unwrap(), vec![10].to_ids()); assert_eq!(parse_range("0-3\n").unwrap(), vec![0, 1, 2, 3].to_ids()); assert_eq!( parse_range("111-115\n").unwrap(), vec![111, 112, 113, 114, 115].to_ids() ); assert_eq!(parse_range("2,7, 10").unwrap(), vec![2, 7, 10].to_ids()); assert_eq!( parse_range("2-7,10-12,20").unwrap(), vec![2, 3, 4, 5, 6, 7, 10, 11, 12, 20].to_ids() ); assert!(parse_range("xx\n").is_err()); assert!(parse_range("-\n").is_err()); assert!(parse_range("-2\n").is_err()); assert!(parse_range("0-1-2\n").is_err()); assert!(parse_range(",,").is_err()); } #[test] fn test_read_linux_numa() { let cpus = read_linux_numa().unwrap(); assert_eq!(cpus.iter().map(|x| x.len()).sum::<usize>(), num_cpus::get()); } }
use anyhow::anyhow; use nom::character::complete::{newline, satisfy, space0}; use nom::combinator::{map, map_res, opt}; use nom::multi::{many1, separated_list1};
random_line_split
hwdetect.rs
use anyhow::anyhow; use nom::character::complete::{newline, satisfy, space0}; use nom::combinator::{map, map_res, opt}; use nom::multi::{many1, separated_list1}; use nom::sequence::{preceded, terminated, tuple}; use nom::Parser; use nom_supreme::tag::complete::tag; use tako::hwstats::GpuFamily; use tako::internal::has_unique_elements; use tako::resources::{ ResourceDescriptorItem, ResourceDescriptorKind, ResourceIndex, ResourceLabel, AMD_GPU_RESOURCE_NAME, MEM_RESOURCE_NAME, NVIDIA_GPU_RESOURCE_NAME, }; use tako::{format_comma_delimited, Set}; use crate::common::format::human_size; use crate::common::parser::{consume_all, p_u32, NomResult}; pub fn detect_cpus() -> anyhow::Result<ResourceDescriptorKind> { read_linux_numa() .map(|numa_nodes| { let filtered = filter_masked_cpus(numa_nodes.clone()); if filtered.iter().flatten().count()!= numa_nodes.iter().flatten().count() { log::info!( "Some cores were filtered by a CPU mask. All cores: {:?}. Allowed cores: {:?}.", numa_nodes .iter() .map(|c| format_comma_delimited(c.iter().map(|c| c.as_num()))) .collect::<Vec<_>>(), filtered .iter() .map(|c| format_comma_delimited(c.iter().map(|c| c.as_num()))) .collect::<Vec<_>>() ); } filtered }) .and_then(|groups| { ResourceDescriptorKind::groups_numeric(groups) .map_err(|_| anyhow!("Inconsistent CPU naming got from detection")) }) .or_else(|e| { log::debug!("Detecting linux failed: {}", e); let n_cpus = num_cpus::get() as u32; if n_cpus < 1 { anyhow::bail!("Cpu detection failed"); }; Ok(ResourceDescriptorKind::simple_indices(n_cpus)) }) } /// Filter cores that are not allowed because of CPU affinity mask. fn filter_masked_cpus(numa_nodes: Vec<Vec<ResourceIndex>>) -> Vec<Vec<ResourceIndex>> { match core_affinity::get_core_ids() { Some(allowed) => { let cpu_set: Set<usize> = allowed.into_iter().map(|core_id| core_id.id).collect(); numa_nodes .into_iter() .map(|mut numa_node| { numa_node.retain(|&cpu| cpu_set.contains(&cpu.as_usize())); numa_node }) .collect() } None => { log::error!("Failed to found CPU mask. Allowing all cores."); numa_nodes } } } pub fn prune_hyper_threading( kind: &ResourceDescriptorKind, ) -> anyhow::Result<ResourceDescriptorKind> { let groups = kind.as_groups(); let mut new_desc = Vec::new(); for group in groups { let mut new_group = Vec::new(); for cpu_id in group { if read_linux_thread_siblings(&cpu_id)? .iter() .min() .ok_or_else(|| anyhow::anyhow!("Thread siblings are empty")) .map(|v| *v == cpu_id)? { new_group.push(cpu_id); } } new_desc.push(new_group); } Ok(ResourceDescriptorKind::groups(new_desc).unwrap()) } /// Detects additional resources (apart from CPU) on this worker. /// Also returns the detected GPU families. pub fn detect_additional_resources( items: &mut Vec<ResourceDescriptorItem>, ) -> anyhow::Result<Set<GpuFamily>> { let mut gpu_families = Set::new(); let has_resource = |items: &[ResourceDescriptorItem], name: &str| items.iter().any(|x| x.name == name); let detected_gpus = detect_gpus_from_env(); if detected_gpus.is_empty() &&!has_resource(items, NVIDIA_GPU_RESOURCE_NAME) { if let Ok(count) = read_nvidia_linux_gpu_count() { if count > 0 { gpu_families.insert(GpuFamily::Nvidia); log::info!("Detected {} GPUs from procs", count); items.push(ResourceDescriptorItem { name: NVIDIA_GPU_RESOURCE_NAME.to_string(), kind: ResourceDescriptorKind::simple_indices(count as u32), }); } } } else { for gpu in detected_gpus { if!has_resource(items, gpu.resource_name) { gpu_families.insert(gpu.family); items.push(ResourceDescriptorItem { name: gpu.resource_name.to_string(), kind: gpu.resource, }); } } } if!has_resource(items, MEM_RESOURCE_NAME) { if let Ok(mem) = read_linux_memory() { log::info!("Detected {mem}B of memory ({})", human_size(mem)); items.push(ResourceDescriptorItem { name: MEM_RESOURCE_NAME.to_string(), kind: ResourceDescriptorKind::Sum { size: mem }, }); } } Ok(gpu_families) } /// GPU resource that can be detected from an environment variable. pub struct GpuEnvironmentRecord { env_var: &'static str, pub resource_name: &'static str, pub family: GpuFamily, } impl GpuEnvironmentRecord { const fn new(env_var: &'static str, resource_name: &'static str, family: GpuFamily) -> Self { Self { env_var, resource_name, family, } } } pub const GPU_ENVIRONMENTS: &[GpuEnvironmentRecord; 2] = &[ GpuEnvironmentRecord::new( "CUDA_VISIBLE_DEVICES", NVIDIA_GPU_RESOURCE_NAME, GpuFamily::Nvidia, ), GpuEnvironmentRecord::new( "ROCR_VISIBLE_DEVICES", AMD_GPU_RESOURCE_NAME, GpuFamily::Amd, ), ]; struct DetectedGpu { resource_name: &'static str, resource: ResourceDescriptorKind, family: GpuFamily, } /// Tries to detect available GPUs from one of the `GPU_ENV_KEYS` environment variables. fn detect_gpus_from_env() -> Vec<DetectedGpu> { let mut gpus = Vec::new(); for gpu_env in GPU_ENVIRONMENTS { if let Ok(devices_str) = std::env::var(gpu_env.env_var) { if let Ok(devices) = parse_comma_separated_values(&devices_str) { log::info!( "Detected GPUs {} from `{}`", format_comma_delimited(&devices), gpu_env.env_var, ); if!has_unique_elements(&devices) { log::warn!("{} contains duplicates ({devices_str})", gpu_env.env_var); continue; } let list = ResourceDescriptorKind::list(devices).expect("List values were not unique"); gpus.push(DetectedGpu { resource_name: gpu_env.resource_name, resource: list, family: gpu_env.family, }); } } } gpus } /// Try to find out how many Nvidia GPUs are available on the current node. fn read_nvidia_linux_gpu_count() -> anyhow::Result<usize> { Ok(std::fs::read_dir("/proc/driver/nvidia/gpus")?.count()) } /// Try to get total memory on the current node. fn
() -> anyhow::Result<u64> { Ok(psutil::memory::virtual_memory()?.total()) } /// Try to find the CPU NUMA configuration. /// /// Returns a list of NUMA nodes, each node contains a list of assigned CPUs. fn read_linux_numa() -> anyhow::Result<Vec<Vec<ResourceIndex>>> { let nodes = parse_range(&std::fs::read_to_string( "/sys/devices/system/node/possible", )?)?; let mut numa_nodes: Vec<Vec<ResourceIndex>> = Vec::new(); for numa_index in nodes { let filename = format!("/sys/devices/system/node/node{numa_index}/cpulist"); numa_nodes.push(parse_range(&std::fs::read_to_string(filename)?)?); } log::debug!("Linux numa detection is successful"); Ok(numa_nodes) } fn read_linux_thread_siblings(cpu_id: &ResourceLabel) -> anyhow::Result<Vec<ResourceLabel>> { let filename = format!( "/sys/devices/system/cpu/cpu{}/topology/thread_siblings_list", cpu_id ); log::debug!("Reading {}", filename); parse_range(&std::fs::read_to_string(filename)?) .map(|indices| indices.into_iter().map(|i| i.to_string()).collect()) } fn p_cpu_range(input: &str) -> NomResult<Vec<ResourceIndex>> { map_res( tuple(( terminated(p_u32, space0), opt(terminated( preceded(tuple((tag("-"), space0)), p_u32), space0, )), )), |(u, v)| crate::Result::Ok((u..=v.unwrap_or(u)).map(|id| id.into()).collect()), ) .parse(input) } fn p_cpu_ranges(input: &str) -> NomResult<Vec<ResourceIndex>> { separated_list1(terminated(tag(","), space0), p_cpu_range)(input) .map(|(a, b)| (a, b.into_iter().flatten().collect())) } fn parse_range(input: &str) -> anyhow::Result<Vec<ResourceIndex>> { let parser = terminated(p_cpu_ranges, opt(newline)); consume_all(parser, input) } fn parse_comma_separated_values(input: &str) -> anyhow::Result<Vec<String>> { let any_except_comma = map(many1(satisfy(|c| c!= ',')), |items| { items.into_iter().collect::<String>() }); consume_all(separated_list1(tag(","), any_except_comma), input) } #[cfg(test)] mod tests { use tako::AsIdVec; use super::{parse_range, read_linux_numa}; #[test] fn test_parse_range() { assert_eq!(parse_range("10").unwrap(), vec![10].to_ids()); assert_eq!(parse_range("10\n").unwrap(), vec![10].to_ids()); assert_eq!(parse_range("0-3\n").unwrap(), vec![0, 1, 2, 3].to_ids()); assert_eq!( parse_range("111-115\n").unwrap(), vec![111, 112, 113, 114, 115].to_ids() ); assert_eq!(parse_range("2,7, 10").unwrap(), vec![2, 7, 10].to_ids()); assert_eq!( parse_range("2-7,10-12,20").unwrap(), vec![2, 3, 4, 5, 6, 7, 10, 11, 12, 20].to_ids() ); assert!(parse_range("xx\n").is_err()); assert!(parse_range("-\n").is_err()); assert!(parse_range("-2\n").is_err()); assert!(parse_range("0-1-2\n").is_err()); assert!(parse_range(",,").is_err()); } #[test] fn test_read_linux_numa() { let cpus = read_linux_numa().unwrap(); assert_eq!(cpus.iter().map(|x| x.len()).sum::<usize>(), num_cpus::get()); } }
read_linux_memory
identifier_name
server.rs
packet: &PublicPacket) -> Self { Self { src_cid: ConnectionId::from(packet.scid()), dst_cid: ConnectionId::from(packet.dcid()), token: packet.token().to_vec(), version: packet.version().unwrap(), } } } struct EchConfig { config: u8, public_name: String, sk: PrivateKey, pk: PublicKey, encoded: Vec<u8>, } impl EchConfig { fn new(config: u8, public_name: &str, sk: &PrivateKey, pk: &PublicKey) -> Res<Self> { let encoded = encode_ech_config(config, public_name, pk)?; Ok(Self { config, public_name: String::from(public_name), sk: sk.clone(), pk: pk.clone(), encoded, }) } } pub struct Server { /// The names of certificates. certs: Vec<String>, /// The ALPN values that the server supports. protocols: Vec<String>, /// The cipher suites that the server supports. ciphers: Vec<Cipher>, /// Anti-replay configuration for 0-RTT. anti_replay: AntiReplay, /// A function for determining if 0-RTT can be accepted. zero_rtt_checker: ServerZeroRttChecker, /// A connection ID generator. cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>, /// Connection parameters. conn_params: ConnectionParameters, /// Active connection attempts, keyed by `AttemptKey`. Initial packets with /// the same key are routed to the connection that was first accepted. /// This is cleared out when the connection is closed or established. active_attempts: HashMap<AttemptKey, StateRef>, /// All connections, keyed by ConnectionId. connections: ConnectionTableRef, /// The connections that have new events. active: HashSet<ActiveConnectionRef>, /// The set of connections that need immediate processing. waiting: VecDeque<StateRef>, /// Outstanding timers for connections. timers: Timer<StateRef>, /// Address validation logic, which determines whether we send a Retry. address_validation: Rc<RefCell<AddressValidation>>, /// Directory to create qlog traces in qlog_dir: Option<PathBuf>, /// Encrypted client hello (ECH) configuration. ech_config: Option<EchConfig>, } impl Server { /// Construct a new server. /// * `now` is the time that the server is instantiated. /// * `certs` is a list of the certificates that should be configured. /// * `protocols` is the preference list of ALPN values. /// * `anti_replay` is an anti-replay context. /// * `zero_rtt_checker` determines whether 0-RTT should be accepted. This /// will be passed the value of the `extra` argument that was passed to /// `Connection::send_ticket` to see if it is OK. /// * `cid_generator` is responsible for generating connection IDs and parsing them; /// connection IDs produced by the manager cannot be zero-length. pub fn new( now: Instant, certs: &[impl AsRef<str>], protocols: &[impl AsRef<str>], anti_replay: AntiReplay, zero_rtt_checker: Box<dyn ZeroRttChecker>, cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>, conn_params: ConnectionParameters, ) -> Res<Self> { let validation = AddressValidation::new(now, ValidateAddress::Never)?; Ok(Self { certs: certs.iter().map(|x| String::from(x.as_ref())).collect(), protocols: protocols.iter().map(|x| String::from(x.as_ref())).collect(), ciphers: Vec::new(), anti_replay, zero_rtt_checker: ServerZeroRttChecker::new(zero_rtt_checker), cid_generator, conn_params, active_attempts: HashMap::default(), connections: Rc::default(), active: HashSet::default(), waiting: VecDeque::default(), timers: Timer::new(now, TIMER_GRANULARITY, TIMER_CAPACITY), address_validation: Rc::new(RefCell::new(validation)), qlog_dir: None, ech_config: None, }) } /// Set or clear directory to create logs of connection events in QLOG format. pub fn set_qlog_dir(&mut self, dir: Option<PathBuf>) { self.qlog_dir = dir; } /// Set the policy for address validation. pub fn set_validation(&mut self, v: ValidateAddress) { self.address_validation.borrow_mut().set_validation(v); } /// Set the cipher suites that should be used. Set an empty value to use /// default values. pub fn set_ciphers(&mut self, ciphers: impl AsRef<[Cipher]>) { self.ciphers = Vec::from(ciphers.as_ref()); } pub fn enable_ech( &mut self, config: u8, public_name: &str, sk: &PrivateKey, pk: &PublicKey, ) -> Res<()> { self.ech_config = Some(EchConfig::new(config, public_name, sk, pk)?); Ok(()) } pub fn ech_config(&self) -> &[u8] { self.ech_config.as_ref().map_or(&[], |cfg| &cfg.encoded) } fn remove_timer(&mut self, c: &StateRef) { let last = c.borrow().last_timer; self.timers.remove(last, |t| Rc::ptr_eq(t, c)); } fn process_connection( &mut self, c: StateRef, dgram: Option<Datagram>, now: Instant, ) -> Option<Datagram> { qtrace!([self], "Process connection {:?}", c); let out = c.borrow_mut().process(dgram, now); match out { Output::Datagram(_) => { qtrace!([self], "Sending packet, added to waiting connections"); self.waiting.push_back(Rc::clone(&c)); } Output::Callback(delay) => { let next = now + delay; if next!= c.borrow().last_timer { qtrace!([self], "Change timer to {:?}", next); self.remove_timer(&c); c.borrow_mut().last_timer = next; self.timers.add(next, Rc::clone(&c)); } } _ =>
} if c.borrow().has_events() { qtrace!([self], "Connection active: {:?}", c); self.active.insert(ActiveConnectionRef { c: Rc::clone(&c) }); } if *c.borrow().state() > State::Handshaking { // Remove any active connection attempt now that this is no longer handshaking. if let Some(k) = c.borrow_mut().active_attempt.take() { self.active_attempts.remove(&k); } } if matches!(c.borrow().state(), State::Closed(_)) { c.borrow_mut().set_qlog(NeqoQlog::disabled()); self.connections .borrow_mut() .retain(|_, v|!Rc::ptr_eq(v, &c)); } out.dgram() } fn connection(&self, cid: &ConnectionIdRef) -> Option<StateRef> { self.connections.borrow().get(&cid[..]).map(Rc::clone) } fn handle_initial( &mut self, initial: InitialDetails, dgram: Datagram, now: Instant, ) -> Option<Datagram> { qdebug!([self], "Handle initial"); let res = self .address_validation .borrow() .validate(&initial.token, dgram.source(), now); match res { AddressValidationResult::Invalid => None, AddressValidationResult::Pass => self.connection_attempt(initial, dgram, None, now), AddressValidationResult::ValidRetry(orig_dcid) => { self.connection_attempt(initial, dgram, Some(orig_dcid), now) } AddressValidationResult::Validate => { qinfo!([self], "Send retry for {:?}", initial.dst_cid); let res = self.address_validation.borrow().generate_retry_token( &initial.dst_cid, dgram.source(), now, ); let token = if let Ok(t) = res { t } else { qerror!([self], "unable to generate token, dropping packet"); return None; }; if let Some(new_dcid) = self.cid_generator.borrow_mut().generate_cid() { let packet = PacketBuilder::retry( initial.version, &initial.src_cid, &new_dcid, &token, &initial.dst_cid, ); if let Ok(p) = packet { let retry = Datagram::new(dgram.destination(), dgram.source(), p); Some(retry) } else { qerror!([self], "unable to encode retry, dropping packet"); None } } else { qerror!([self], "no connection ID for retry, dropping packet"); None } } } } fn connection_attempt( &mut self, initial: InitialDetails, dgram: Datagram, orig_dcid: Option<ConnectionId>, now: Instant, ) -> Option<Datagram> { let attempt_key = AttemptKey { remote_address: dgram.source(), odcid: orig_dcid.as_ref().unwrap_or(&initial.dst_cid).clone(), }; if let Some(c) = self.active_attempts.get(&attempt_key) { qdebug!( [self], "Handle Initial for existing connection attempt {:?}", attempt_key ); let c = Rc::clone(c); self.process_connection(c, Some(dgram), now) } else { self.accept_connection(attempt_key, initial, dgram, orig_dcid, now) } } fn create_qlog_trace(&self, attempt_key: &AttemptKey) -> NeqoQlog { if let Some(qlog_dir) = &self.qlog_dir { let mut qlog_path = qlog_dir.to_path_buf(); qlog_path.push(format!("{}.qlog", attempt_key.odcid)); // The original DCID is chosen by the client. Using create_new() // prevents attackers from overwriting existing logs. match OpenOptions::new() .write(true) .create_new(true) .open(&qlog_path) { Ok(f) => { qinfo!("Qlog output to {}", qlog_path.display()); let streamer = ::qlog::QlogStreamer::new( qlog::QLOG_VERSION.to_string(), Some("Neqo server qlog".to_string()), Some("Neqo server qlog".to_string()), None, std::time::Instant::now(), common::qlog::new_trace(Role::Server), Box::new(f), ); let n_qlog = NeqoQlog::enabled(streamer, qlog_path); match n_qlog { Ok(nql) => nql, Err(e) => { // Keep going but w/o qlogging qerror!("NeqoQlog error: {}", e); NeqoQlog::disabled() } } } Err(e) => { qerror!( "Could not open file {} for qlog output: {}", qlog_path.display(), e ); NeqoQlog::disabled() } } } else { NeqoQlog::disabled() } } fn setup_connection( &mut self, c: &mut Connection, attempt_key: &AttemptKey, initial: InitialDetails, orig_dcid: Option<ConnectionId>, ) { let zcheck = self.zero_rtt_checker.clone(); if c.server_enable_0rtt(&self.anti_replay, zcheck).is_err() { qwarn!([self], "Unable to enable 0-RTT"); } if let Some(odcid) = orig_dcid { // There was a retry, so set the connection IDs for. c.set_retry_cids(odcid, initial.src_cid, initial.dst_cid); } c.set_validation(Rc::clone(&self.address_validation)); c.set_qlog(self.create_qlog_trace(attempt_key)); if let Some(cfg) = &self.ech_config { if c.server_enable_ech(cfg.config, &cfg.public_name, &cfg.sk, &cfg.pk) .is_err() { qwarn!([self], "Unable to enable ECH"); } } } fn accept_connection( &mut self, attempt_key: AttemptKey, initial: InitialDetails, dgram: Datagram, orig_dcid: Option<ConnectionId>, now: Instant, ) -> Option<Datagram> { qinfo!([self], "Accept connection {:?}", attempt_key); // The internal connection ID manager that we use is not used directly. // Instead, wrap it so that we can save connection IDs. let cid_mgr = Rc::new(RefCell::new(ServerConnectionIdGenerator { c: Weak::new(), cid_generator: Rc::clone(&self.cid_generator), connections: Rc::clone(&self.connections), saved_cids: Vec::new(), })); let mut params = self.conn_params.clone(); params.get_versions_mut().set_initial(initial.version); let sconn = Connection::new_server( &self.certs, &self.protocols, Rc::clone(&cid_mgr) as _, params, ); if let Ok(mut c) = sconn { self.setup_connection(&mut c, &attempt_key, initial, orig_dcid); let c = Rc::new(RefCell::new(ServerConnectionState { c, last_timer: now, active_attempt: Some(attempt_key.clone()), })); cid_mgr.borrow_mut().set_connection(Rc::clone(&c)); let previous_attempt = self.active_attempts.insert(attempt_key, Rc::clone(&c)); debug_assert!(previous_attempt.is_none()); self.process_connection(c, Some(dgram), now) } else { qwarn!([self], "Unable to create connection"); None } } /// Handle 0-RTT packets that were sent with the client's choice of connection ID. /// Most 0-RTT will arrive this way. A client can usually send 1-RTT after it /// receives a connection ID from the server. fn handle_0rtt( &mut self, dgram: Datagram, dcid: ConnectionId, now: Instant, ) -> Option<Datagram> { let attempt_key = AttemptKey { remote_address: dgram.source(), odcid: dcid, }; if let Some(c) = self.active_attempts.get(&attempt_key) { qdebug!( [self], "Handle 0-RTT for existing connection attempt {:?}", attempt_key ); let c = Rc::clone(c); self.process_connection(c, Some(dgram), now) } else { qdebug!([self], "Dropping 0-RTT for unknown connection"); None } } fn process_input(&mut self, dgram: Datagram, now: Instant) -> Option<Datagram> { qtrace!("Process datagram: {}", hex(&dgram[..])); // This is only looking at the first packet header in the datagram. // All packets in the datagram are routed to the same connection. let res = PublicPacket::decode(&dgram[..], self.cid_generator.borrow().as_decoder()); let (packet, _remainder) = match res { Ok(res) => res, _ => { qtrace!([self], "Discarding {:?}", dgram); return None; } }; // Finding an existing connection. Should be the most common case. if let Some(c) = self.connection(packet.dcid()) { return self.process_connection(c, Some(dgram), now); } if packet.packet_type() == PacketType::Short { // TODO send a stateless reset here. qtrace!([self], "Short header packet for an unknown connection"); return None; } if packet.packet_type() == PacketType::OtherVersion || (packet.packet_type() == PacketType::Initial &&!self .conn_params .get_versions() .all() .contains(&packet.version().unwrap())) { if dgram.len() < MIN_INITIAL_PACKET_SIZE { qdebug!([self], "Unsupported version: too short"); return None; } qdebug!([self], "Unsupported version: {:x}", packet.wire_version()); let vn = PacketBuilder::version_negotiation( packet.scid(), packet.dcid(), packet.wire_version(),
{ self.remove_timer(&c); }
conditional_block
server.rs
/// as this depends on there being some distribution of events. const TIMER_GRANULARITY: Duration = Duration::from_millis(4); /// The number of buckets in the timer. As mentioned in the definition of `Timer`, /// the granularity and capacity need to multiply to be larger than the largest /// delay that might be used. That's the idle timeout (currently 30s). const TIMER_CAPACITY: usize = 16384; type StateRef = Rc<RefCell<ServerConnectionState>>; type ConnectionTableRef = Rc<RefCell<HashMap<ConnectionId, StateRef>>>; #[derive(Debug)] pub struct ServerConnectionState { c: Connection, active_attempt: Option<AttemptKey>, last_timer: Instant, } impl Deref for ServerConnectionState { type Target = Connection; fn deref(&self) -> &Self::Target { &self.c } } impl DerefMut for ServerConnectionState { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.c } } /// A `AttemptKey` is used to disambiguate connection attempts. /// Multiple connection attempts with the same key won't produce multiple connections. #[derive(Clone, Debug, Hash, PartialEq, Eq)] struct AttemptKey { // Using the remote address is sufficient for disambiguation, // until we support multiple local socket addresses. remote_address: SocketAddr, odcid: ConnectionId, } /// A `ServerZeroRttChecker` is a simple wrapper around a single checker. /// It uses `RefCell` so that the wrapped checker can be shared between /// multiple connections created by the server. #[derive(Clone, Debug)] struct ServerZeroRttChecker { checker: Rc<RefCell<Box<dyn ZeroRttChecker>>>, } impl ServerZeroRttChecker { pub fn new(checker: Box<dyn ZeroRttChecker>) -> Self { Self { checker: Rc::new(RefCell::new(checker)), } } } impl ZeroRttChecker for ServerZeroRttChecker { fn check(&self, token: &[u8]) -> ZeroRttCheckResult { self.checker.borrow().check(token) } } /// `InitialDetails` holds important information for processing `Initial` packets. struct InitialDetails { src_cid: ConnectionId, dst_cid: ConnectionId, token: Vec<u8>, version: Version, } impl InitialDetails { fn new(packet: &PublicPacket) -> Self { Self { src_cid: ConnectionId::from(packet.scid()), dst_cid: ConnectionId::from(packet.dcid()), token: packet.token().to_vec(), version: packet.version().unwrap(), } } } struct EchConfig { config: u8, public_name: String, sk: PrivateKey, pk: PublicKey, encoded: Vec<u8>, } impl EchConfig { fn new(config: u8, public_name: &str, sk: &PrivateKey, pk: &PublicKey) -> Res<Self> { let encoded = encode_ech_config(config, public_name, pk)?; Ok(Self { config, public_name: String::from(public_name), sk: sk.clone(), pk: pk.clone(), encoded, }) } } pub struct Server { /// The names of certificates. certs: Vec<String>, /// The ALPN values that the server supports. protocols: Vec<String>, /// The cipher suites that the server supports. ciphers: Vec<Cipher>, /// Anti-replay configuration for 0-RTT. anti_replay: AntiReplay, /// A function for determining if 0-RTT can be accepted. zero_rtt_checker: ServerZeroRttChecker, /// A connection ID generator. cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>, /// Connection parameters. conn_params: ConnectionParameters, /// Active connection attempts, keyed by `AttemptKey`. Initial packets with /// the same key are routed to the connection that was first accepted. /// This is cleared out when the connection is closed or established. active_attempts: HashMap<AttemptKey, StateRef>, /// All connections, keyed by ConnectionId. connections: ConnectionTableRef, /// The connections that have new events. active: HashSet<ActiveConnectionRef>, /// The set of connections that need immediate processing. waiting: VecDeque<StateRef>, /// Outstanding timers for connections. timers: Timer<StateRef>, /// Address validation logic, which determines whether we send a Retry. address_validation: Rc<RefCell<AddressValidation>>, /// Directory to create qlog traces in qlog_dir: Option<PathBuf>, /// Encrypted client hello (ECH) configuration. ech_config: Option<EchConfig>, } impl Server { /// Construct a new server. /// * `now` is the time that the server is instantiated. /// * `certs` is a list of the certificates that should be configured. /// * `protocols` is the preference list of ALPN values. /// * `anti_replay` is an anti-replay context. /// * `zero_rtt_checker` determines whether 0-RTT should be accepted. This /// will be passed the value of the `extra` argument that was passed to /// `Connection::send_ticket` to see if it is OK. /// * `cid_generator` is responsible for generating connection IDs and parsing them; /// connection IDs produced by the manager cannot be zero-length. pub fn new( now: Instant, certs: &[impl AsRef<str>], protocols: &[impl AsRef<str>], anti_replay: AntiReplay, zero_rtt_checker: Box<dyn ZeroRttChecker>, cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>, conn_params: ConnectionParameters, ) -> Res<Self> { let validation = AddressValidation::new(now, ValidateAddress::Never)?; Ok(Self { certs: certs.iter().map(|x| String::from(x.as_ref())).collect(), protocols: protocols.iter().map(|x| String::from(x.as_ref())).collect(), ciphers: Vec::new(), anti_replay, zero_rtt_checker: ServerZeroRttChecker::new(zero_rtt_checker), cid_generator, conn_params, active_attempts: HashMap::default(), connections: Rc::default(), active: HashSet::default(), waiting: VecDeque::default(), timers: Timer::new(now, TIMER_GRANULARITY, TIMER_CAPACITY), address_validation: Rc::new(RefCell::new(validation)), qlog_dir: None, ech_config: None, }) } /// Set or clear directory to create logs of connection events in QLOG format. pub fn set_qlog_dir(&mut self, dir: Option<PathBuf>) { self.qlog_dir = dir; } /// Set the policy for address validation. pub fn set_validation(&mut self, v: ValidateAddress) { self.address_validation.borrow_mut().set_validation(v); } /// Set the cipher suites that should be used. Set an empty value to use /// default values. pub fn set_ciphers(&mut self, ciphers: impl AsRef<[Cipher]>) { self.ciphers = Vec::from(ciphers.as_ref()); } pub fn enable_ech( &mut self, config: u8, public_name: &str, sk: &PrivateKey, pk: &PublicKey, ) -> Res<()> { self.ech_config = Some(EchConfig::new(config, public_name, sk, pk)?); Ok(()) } pub fn ech_config(&self) -> &[u8] { self.ech_config.as_ref().map_or(&[], |cfg| &cfg.encoded) } fn remove_timer(&mut self, c: &StateRef) { let last = c.borrow().last_timer; self.timers.remove(last, |t| Rc::ptr_eq(t, c)); } fn process_connection( &mut self, c: StateRef, dgram: Option<Datagram>, now: Instant, ) -> Option<Datagram> { qtrace!([self], "Process connection {:?}", c); let out = c.borrow_mut().process(dgram, now); match out { Output::Datagram(_) => { qtrace!([self], "Sending packet, added to waiting connections"); self.waiting.push_back(Rc::clone(&c)); } Output::Callback(delay) => { let next = now + delay; if next!= c.borrow().last_timer { qtrace!([self], "Change timer to {:?}", next); self.remove_timer(&c); c.borrow_mut().last_timer = next; self.timers.add(next, Rc::clone(&c)); } } _ => { self.remove_timer(&c); } } if c.borrow().has_events() { qtrace!([self], "Connection active: {:?}", c); self.active.insert(ActiveConnectionRef { c: Rc::clone(&c) }); } if *c.borrow().state() > State::Handshaking { // Remove any active connection attempt now that this is no longer handshaking. if let Some(k) = c.borrow_mut().active_attempt.take() { self.active_attempts.remove(&k); } } if matches!(c.borrow().state(), State::Closed(_)) { c.borrow_mut().set_qlog(NeqoQlog::disabled()); self.connections .borrow_mut() .retain(|_, v|!Rc::ptr_eq(v, &c)); } out.dgram() } fn connection(&self, cid: &ConnectionIdRef) -> Option<StateRef> { self.connections.borrow().get(&cid[..]).map(Rc::clone) } fn handle_initial( &mut self, initial: InitialDetails, dgram: Datagram, now: Instant, ) -> Option<Datagram> { qdebug!([self], "Handle initial"); let res = self .address_validation .borrow() .validate(&initial.token, dgram.source(), now); match res { AddressValidationResult::Invalid => None, AddressValidationResult::Pass => self.connection_attempt(initial, dgram, None, now), AddressValidationResult::ValidRetry(orig_dcid) => { self.connection_attempt(initial, dgram, Some(orig_dcid), now) } AddressValidationResult::Validate => { qinfo!([self], "Send retry for {:?}", initial.dst_cid); let res = self.address_validation.borrow().generate_retry_token( &initial.dst_cid, dgram.source(), now, ); let token = if let Ok(t) = res { t } else { qerror!([self], "unable to generate token, dropping packet"); return None; }; if let Some(new_dcid) = self.cid_generator.borrow_mut().generate_cid() { let packet = PacketBuilder::retry( initial.version, &initial.src_cid, &new_dcid, &token, &initial.dst_cid, ); if let Ok(p) = packet { let retry = Datagram::new(dgram.destination(), dgram.source(), p); Some(retry) } else { qerror!([self], "unable to encode retry, dropping packet"); None } } else { qerror!([self], "no connection ID for retry, dropping packet"); None } } } } fn connection_attempt( &mut self, initial: InitialDetails, dgram: Datagram, orig_dcid: Option<ConnectionId>, now: Instant, ) -> Option<Datagram> { let attempt_key = AttemptKey { remote_address: dgram.source(), odcid: orig_dcid.as_ref().unwrap_or(&initial.dst_cid).clone(), }; if let Some(c) = self.active_attempts.get(&attempt_key) { qdebug!( [self], "Handle Initial for existing connection attempt {:?}", attempt_key ); let c = Rc::clone(c); self.process_connection(c, Some(dgram), now) } else { self.accept_connection(attempt_key, initial, dgram, orig_dcid, now) } } fn create_qlog_trace(&self, attempt_key: &AttemptKey) -> NeqoQlog { if let Some(qlog_dir) = &self.qlog_dir { let mut qlog_path = qlog_dir.to_path_buf(); qlog_path.push(format!("{}.qlog", attempt_key.odcid)); // The original DCID is chosen by the client. Using create_new() // prevents attackers from overwriting existing logs. match OpenOptions::new() .write(true) .create_new(true) .open(&qlog_path) { Ok(f) => { qinfo!("Qlog output to {}", qlog_path.display()); let streamer = ::qlog::QlogStreamer::new( qlog::QLOG_VERSION.to_string(), Some("Neqo server qlog".to_string()), Some("Neqo server qlog".to_string()), None, std::time::Instant::now(), common::qlog::new_trace(Role::Server), Box::new(f), ); let n_qlog = NeqoQlog::enabled(streamer, qlog_path); match n_qlog { Ok(nql) => nql, Err(e) => { // Keep going but w/o qlogging qerror!("NeqoQlog error: {}", e); NeqoQlog::disabled() } } } Err(e) => { qerror!( "Could not open file {} for qlog output: {}", qlog_path.display(), e ); NeqoQlog::disabled() } } } else { NeqoQlog::disabled() } } fn setup_connection( &mut self, c: &mut Connection, attempt_key: &AttemptKey, initial: InitialDetails, orig_dcid: Option<ConnectionId>, ) { let zcheck = self.zero_rtt_checker.clone(); if c.server_enable_0rtt(&self.anti_replay, zcheck).is_err() { qwarn!([self], "Unable to enable 0-RTT"); } if let Some(odcid) = orig_dcid { // There was a retry, so set the connection IDs for. c.set_retry_cids(odcid, initial.src_cid, initial.dst_cid); } c.set_validation(Rc::clone(&self.address_validation)); c.set_qlog(self.create_qlog_trace(attempt_key)); if let Some(cfg) = &self.ech_config { if c.server_enable_ech(cfg.config, &cfg.public_name, &cfg.sk, &cfg.pk) .is_err() { qwarn!([self], "Unable to enable ECH"); } } } fn accept_connection( &mut self, attempt_key: AttemptKey, initial: InitialDetails, dgram: Datagram, orig_dcid: Option<ConnectionId>, now: Instant, ) -> Option<Datagram> { qinfo!([self], "Accept connection {:?}", attempt_key); // The internal connection ID manager that we use is not used directly. // Instead, wrap it so that we can save connection IDs. let cid_mgr = Rc::new(RefCell::new(ServerConnectionIdGenerator { c: Weak::new(), cid_generator: Rc::clone(&self.cid_generator), connections: Rc::clone(&self.connections), saved_cids: Vec::new(), })); let mut params = self.conn_params.clone(); params.get_versions_mut().set_initial(initial.version); let sconn = Connection::new_server( &self.certs, &self.protocols, Rc::clone(&cid_mgr) as _, params, ); if let Ok(mut c) = sconn { self.setup_connection(&mut c, &attempt_key, initial, orig_dcid); let c = Rc::new(RefCell::new(ServerConnectionState { c, last_timer: now, active_attempt: Some(attempt_key.clone()), })); cid_mgr.borrow_mut().set_connection(Rc::clone(&c)); let previous_attempt = self.active_attempts.insert(attempt_key, Rc::clone(&c)); debug_assert!(previous_attempt.is_none()); self.process_connection(c, Some(dgram), now) } else { qwarn!([self], "Unable to create connection"); None } } /// Handle 0-RTT packets that were sent with the client's choice of connection ID. /// Most 0-RTT will arrive this way. A client can usually send 1-RTT after it /// receives a connection ID from the server. fn handle_0rtt( &mut self, dgram: Datagram, dcid: ConnectionId, now: Instant, ) -> Option<Datagram> { let attempt_key = AttemptKey { remote_address: dgram.source(), odcid: dcid, }; if let Some(c) = self.active_attempts.get(&attempt_key) { qdebug!( [self], "Handle 0-RTT for existing connection attempt {:?}", attempt_key ); let c = Rc::clone(c); self.process_connection(c, Some(dgram), now) } else { qdebug!([self], "Dropping 0-RTT for unknown connection"); None } } fn process_input(&mut self, dgram: Datagram, now: Instant) -> Option<Datagram> { qtrace!("Process datagram: {}", hex(&dgram[..])); // This is only looking at the first packet header in the datagram. // All packets in the datagram are routed to the same connection. let res = PublicPacket::decode(&
const MIN_INITIAL_PACKET_SIZE: usize = 1200; /// The size of timer buckets. This is higher than the actual timer granularity
random_line_split
server.rs
} impl DerefMut for ServerConnectionState { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.c } } /// A `AttemptKey` is used to disambiguate connection attempts. /// Multiple connection attempts with the same key won't produce multiple connections. #[derive(Clone, Debug, Hash, PartialEq, Eq)] struct AttemptKey { // Using the remote address is sufficient for disambiguation, // until we support multiple local socket addresses. remote_address: SocketAddr, odcid: ConnectionId, } /// A `ServerZeroRttChecker` is a simple wrapper around a single checker. /// It uses `RefCell` so that the wrapped checker can be shared between /// multiple connections created by the server. #[derive(Clone, Debug)] struct ServerZeroRttChecker { checker: Rc<RefCell<Box<dyn ZeroRttChecker>>>, } impl ServerZeroRttChecker { pub fn new(checker: Box<dyn ZeroRttChecker>) -> Self { Self { checker: Rc::new(RefCell::new(checker)), } } } impl ZeroRttChecker for ServerZeroRttChecker { fn check(&self, token: &[u8]) -> ZeroRttCheckResult { self.checker.borrow().check(token) } } /// `InitialDetails` holds important information for processing `Initial` packets. struct InitialDetails { src_cid: ConnectionId, dst_cid: ConnectionId, token: Vec<u8>, version: Version, } impl InitialDetails { fn new(packet: &PublicPacket) -> Self { Self { src_cid: ConnectionId::from(packet.scid()), dst_cid: ConnectionId::from(packet.dcid()), token: packet.token().to_vec(), version: packet.version().unwrap(), } } } struct EchConfig { config: u8, public_name: String, sk: PrivateKey, pk: PublicKey, encoded: Vec<u8>, } impl EchConfig { fn new(config: u8, public_name: &str, sk: &PrivateKey, pk: &PublicKey) -> Res<Self> { let encoded = encode_ech_config(config, public_name, pk)?; Ok(Self { config, public_name: String::from(public_name), sk: sk.clone(), pk: pk.clone(), encoded, }) } } pub struct Server { /// The names of certificates. certs: Vec<String>, /// The ALPN values that the server supports. protocols: Vec<String>, /// The cipher suites that the server supports. ciphers: Vec<Cipher>, /// Anti-replay configuration for 0-RTT. anti_replay: AntiReplay, /// A function for determining if 0-RTT can be accepted. zero_rtt_checker: ServerZeroRttChecker, /// A connection ID generator. cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>, /// Connection parameters. conn_params: ConnectionParameters, /// Active connection attempts, keyed by `AttemptKey`. Initial packets with /// the same key are routed to the connection that was first accepted. /// This is cleared out when the connection is closed or established. active_attempts: HashMap<AttemptKey, StateRef>, /// All connections, keyed by ConnectionId. connections: ConnectionTableRef, /// The connections that have new events. active: HashSet<ActiveConnectionRef>, /// The set of connections that need immediate processing. waiting: VecDeque<StateRef>, /// Outstanding timers for connections. timers: Timer<StateRef>, /// Address validation logic, which determines whether we send a Retry. address_validation: Rc<RefCell<AddressValidation>>, /// Directory to create qlog traces in qlog_dir: Option<PathBuf>, /// Encrypted client hello (ECH) configuration. ech_config: Option<EchConfig>, } impl Server { /// Construct a new server. /// * `now` is the time that the server is instantiated. /// * `certs` is a list of the certificates that should be configured. /// * `protocols` is the preference list of ALPN values. /// * `anti_replay` is an anti-replay context. /// * `zero_rtt_checker` determines whether 0-RTT should be accepted. This /// will be passed the value of the `extra` argument that was passed to /// `Connection::send_ticket` to see if it is OK. /// * `cid_generator` is responsible for generating connection IDs and parsing them; /// connection IDs produced by the manager cannot be zero-length. pub fn new( now: Instant, certs: &[impl AsRef<str>], protocols: &[impl AsRef<str>], anti_replay: AntiReplay, zero_rtt_checker: Box<dyn ZeroRttChecker>, cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>, conn_params: ConnectionParameters, ) -> Res<Self> { let validation = AddressValidation::new(now, ValidateAddress::Never)?; Ok(Self { certs: certs.iter().map(|x| String::from(x.as_ref())).collect(), protocols: protocols.iter().map(|x| String::from(x.as_ref())).collect(), ciphers: Vec::new(), anti_replay, zero_rtt_checker: ServerZeroRttChecker::new(zero_rtt_checker), cid_generator, conn_params, active_attempts: HashMap::default(), connections: Rc::default(), active: HashSet::default(), waiting: VecDeque::default(), timers: Timer::new(now, TIMER_GRANULARITY, TIMER_CAPACITY), address_validation: Rc::new(RefCell::new(validation)), qlog_dir: None, ech_config: None, }) } /// Set or clear directory to create logs of connection events in QLOG format. pub fn set_qlog_dir(&mut self, dir: Option<PathBuf>) { self.qlog_dir = dir; } /// Set the policy for address validation. pub fn set_validation(&mut self, v: ValidateAddress) { self.address_validation.borrow_mut().set_validation(v); } /// Set the cipher suites that should be used. Set an empty value to use /// default values. pub fn set_ciphers(&mut self, ciphers: impl AsRef<[Cipher]>) { self.ciphers = Vec::from(ciphers.as_ref()); } pub fn enable_ech( &mut self, config: u8, public_name: &str, sk: &PrivateKey, pk: &PublicKey, ) -> Res<()> { self.ech_config = Some(EchConfig::new(config, public_name, sk, pk)?); Ok(()) } pub fn ech_config(&self) -> &[u8] { self.ech_config.as_ref().map_or(&[], |cfg| &cfg.encoded) } fn remove_timer(&mut self, c: &StateRef) { let last = c.borrow().last_timer; self.timers.remove(last, |t| Rc::ptr_eq(t, c)); } fn process_connection( &mut self, c: StateRef, dgram: Option<Datagram>, now: Instant, ) -> Option<Datagram> { qtrace!([self], "Process connection {:?}", c); let out = c.borrow_mut().process(dgram, now); match out { Output::Datagram(_) => { qtrace!([self], "Sending packet, added to waiting connections"); self.waiting.push_back(Rc::clone(&c)); } Output::Callback(delay) => { let next = now + delay; if next!= c.borrow().last_timer { qtrace!([self], "Change timer to {:?}", next); self.remove_timer(&c); c.borrow_mut().last_timer = next; self.timers.add(next, Rc::clone(&c)); } } _ => { self.remove_timer(&c); } } if c.borrow().has_events() { qtrace!([self], "Connection active: {:?}", c); self.active.insert(ActiveConnectionRef { c: Rc::clone(&c) }); } if *c.borrow().state() > State::Handshaking { // Remove any active connection attempt now that this is no longer handshaking. if let Some(k) = c.borrow_mut().active_attempt.take() { self.active_attempts.remove(&k); } } if matches!(c.borrow().state(), State::Closed(_)) { c.borrow_mut().set_qlog(NeqoQlog::disabled()); self.connections .borrow_mut() .retain(|_, v|!Rc::ptr_eq(v, &c)); } out.dgram() } fn connection(&self, cid: &ConnectionIdRef) -> Option<StateRef> { self.connections.borrow().get(&cid[..]).map(Rc::clone) } fn handle_initial( &mut self, initial: InitialDetails, dgram: Datagram, now: Instant, ) -> Option<Datagram> { qdebug!([self], "Handle initial"); let res = self .address_validation .borrow() .validate(&initial.token, dgram.source(), now); match res { AddressValidationResult::Invalid => None, AddressValidationResult::Pass => self.connection_attempt(initial, dgram, None, now), AddressValidationResult::ValidRetry(orig_dcid) => { self.connection_attempt(initial, dgram, Some(orig_dcid), now) } AddressValidationResult::Validate => { qinfo!([self], "Send retry for {:?}", initial.dst_cid); let res = self.address_validation.borrow().generate_retry_token( &initial.dst_cid, dgram.source(), now, ); let token = if let Ok(t) = res { t } else { qerror!([self], "unable to generate token, dropping packet"); return None; }; if let Some(new_dcid) = self.cid_generator.borrow_mut().generate_cid() { let packet = PacketBuilder::retry( initial.version, &initial.src_cid, &new_dcid, &token, &initial.dst_cid, ); if let Ok(p) = packet { let retry = Datagram::new(dgram.destination(), dgram.source(), p); Some(retry) } else { qerror!([self], "unable to encode retry, dropping packet"); None } } else { qerror!([self], "no connection ID for retry, dropping packet"); None } } } } fn connection_attempt( &mut self, initial: InitialDetails, dgram: Datagram, orig_dcid: Option<ConnectionId>, now: Instant, ) -> Option<Datagram> { let attempt_key = AttemptKey { remote_address: dgram.source(), odcid: orig_dcid.as_ref().unwrap_or(&initial.dst_cid).clone(), }; if let Some(c) = self.active_attempts.get(&attempt_key) { qdebug!( [self], "Handle Initial for existing connection attempt {:?}", attempt_key ); let c = Rc::clone(c); self.process_connection(c, Some(dgram), now) } else { self.accept_connection(attempt_key, initial, dgram, orig_dcid, now) } } fn create_qlog_trace(&self, attempt_key: &AttemptKey) -> NeqoQlog { if let Some(qlog_dir) = &self.qlog_dir { let mut qlog_path = qlog_dir.to_path_buf(); qlog_path.push(format!("{}.qlog", attempt_key.odcid)); // The original DCID is chosen by the client. Using create_new() // prevents attackers from overwriting existing logs. match OpenOptions::new() .write(true) .create_new(true) .open(&qlog_path) { Ok(f) => { qinfo!("Qlog output to {}", qlog_path.display()); let streamer = ::qlog::QlogStreamer::new( qlog::QLOG_VERSION.to_string(), Some("Neqo server qlog".to_string()), Some("Neqo server qlog".to_string()), None, std::time::Instant::now(), common::qlog::new_trace(Role::Server), Box::new(f), ); let n_qlog = NeqoQlog::enabled(streamer, qlog_path); match n_qlog { Ok(nql) => nql, Err(e) => { // Keep going but w/o qlogging qerror!("NeqoQlog error: {}", e); NeqoQlog::disabled() } } } Err(e) => { qerror!( "Could not open file {} for qlog output: {}", qlog_path.display(), e ); NeqoQlog::disabled() } } } else { NeqoQlog::disabled() } } fn setup_connection( &mut self, c: &mut Connection, attempt_key: &AttemptKey, initial: InitialDetails, orig_dcid: Option<ConnectionId>, ) { let zcheck = self.zero_rtt_checker.clone(); if c.server_enable_0rtt(&self.anti_replay, zcheck).is_err() { qwarn!([self], "Unable to enable 0-RTT"); } if let Some(odcid) = orig_dcid { // There was a retry, so set the connection IDs for. c.set_retry_cids(odcid, initial.src_cid, initial.dst_cid); } c.set_validation(Rc::clone(&self.address_validation)); c.set_qlog(self.create_qlog_trace(attempt_key)); if let Some(cfg) = &self.ech_config { if c.server_enable_ech(cfg.config, &cfg.public_name, &cfg.sk, &cfg.pk) .is_err() { qwarn!([self], "Unable to enable ECH"); } } } fn accept_connection( &mut self, attempt_key: AttemptKey, initial: InitialDetails, dgram: Datagram, orig_dcid: Option<ConnectionId>, now: Instant, ) -> Option<Datagram> { qinfo!([self], "Accept connection {:?}", attempt_key); // The internal connection ID manager that we use is not used directly. // Instead, wrap it so that we can save connection IDs. let cid_mgr = Rc::new(RefCell::new(ServerConnectionIdGenerator { c: Weak::new(), cid_generator: Rc::clone(&self.cid_generator), connections: Rc::clone(&self.connections), saved_cids: Vec::new(), })); let mut params = self.conn_params.clone(); params.get_versions_mut().set_initial(initial.version); let sconn = Connection::new_server( &self.certs, &self.protocols, Rc::clone(&cid_mgr) as _, params, ); if let Ok(mut c) = sconn { self.setup_connection(&mut c, &attempt_key, initial, orig_dcid); let c = Rc::new(RefCell::new(ServerConnectionState { c, last_timer: now, active_attempt: Some(attempt_key.clone()), })); cid_mgr.borrow_mut().set_connection(Rc::clone(&c)); let previous_attempt = self.active_attempts.insert(attempt_key, Rc::clone(&c)); debug_assert!(previous_attempt.is_none()); self.process_connection(c, Some(dgram), now) } else { qwarn!([self], "Unable to create connection"); None } } /// Handle 0-RTT packets that were sent with the client's choice of connection ID. /// Most 0-RTT will arrive this way. A client can usually send 1-RTT after it /// receives a connection ID from the server. fn handle_0rtt( &mut self, dgram: Datagram, dcid: ConnectionId, now: Instant, ) -> Option<Datagram> { let attempt_key = AttemptKey { remote_address: dgram.source(), odcid: dcid, }; if let Some(c) = self.active_attempts.get(&attempt_key) { qdebug!( [self], "Handle 0-RTT for existing connection attempt {:?}", attempt_key ); let c = Rc::clone(c); self.process_connection(c, Some(dgram), now) } else { qdebug!([self], "Dropping 0-RTT for unknown connection"); None } } fn process_input(&mut self, dgram: Datagram, now: Instant) -> Option<Datagram> { qtrace!("Process datagram: {}", hex(&dgram[..])); // This is only looking at the first packet header in the datagram. // All packets in the datagram are routed to the same connection. let res = PublicPacket::decode(&dgram[..], self.cid_generator.borrow().as_decoder()); let (packet, _remainder) = match res { Ok(res) => res, _ => { qtrace!([self], "Discarding {:?}", dgram); return None; } }; // Finding an existing connection. Should be the most common case. if let Some(c) = self.connection(packet.dcid()) { return self.process_connection(c, Some(dgram), now); } if
{ &self.c }
identifier_body
server.rs
{ // Using the remote address is sufficient for disambiguation, // until we support multiple local socket addresses. remote_address: SocketAddr, odcid: ConnectionId, } /// A `ServerZeroRttChecker` is a simple wrapper around a single checker. /// It uses `RefCell` so that the wrapped checker can be shared between /// multiple connections created by the server. #[derive(Clone, Debug)] struct ServerZeroRttChecker { checker: Rc<RefCell<Box<dyn ZeroRttChecker>>>, } impl ServerZeroRttChecker { pub fn new(checker: Box<dyn ZeroRttChecker>) -> Self { Self { checker: Rc::new(RefCell::new(checker)), } } } impl ZeroRttChecker for ServerZeroRttChecker { fn check(&self, token: &[u8]) -> ZeroRttCheckResult { self.checker.borrow().check(token) } } /// `InitialDetails` holds important information for processing `Initial` packets. struct InitialDetails { src_cid: ConnectionId, dst_cid: ConnectionId, token: Vec<u8>, version: Version, } impl InitialDetails { fn new(packet: &PublicPacket) -> Self { Self { src_cid: ConnectionId::from(packet.scid()), dst_cid: ConnectionId::from(packet.dcid()), token: packet.token().to_vec(), version: packet.version().unwrap(), } } } struct EchConfig { config: u8, public_name: String, sk: PrivateKey, pk: PublicKey, encoded: Vec<u8>, } impl EchConfig { fn new(config: u8, public_name: &str, sk: &PrivateKey, pk: &PublicKey) -> Res<Self> { let encoded = encode_ech_config(config, public_name, pk)?; Ok(Self { config, public_name: String::from(public_name), sk: sk.clone(), pk: pk.clone(), encoded, }) } } pub struct Server { /// The names of certificates. certs: Vec<String>, /// The ALPN values that the server supports. protocols: Vec<String>, /// The cipher suites that the server supports. ciphers: Vec<Cipher>, /// Anti-replay configuration for 0-RTT. anti_replay: AntiReplay, /// A function for determining if 0-RTT can be accepted. zero_rtt_checker: ServerZeroRttChecker, /// A connection ID generator. cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>, /// Connection parameters. conn_params: ConnectionParameters, /// Active connection attempts, keyed by `AttemptKey`. Initial packets with /// the same key are routed to the connection that was first accepted. /// This is cleared out when the connection is closed or established. active_attempts: HashMap<AttemptKey, StateRef>, /// All connections, keyed by ConnectionId. connections: ConnectionTableRef, /// The connections that have new events. active: HashSet<ActiveConnectionRef>, /// The set of connections that need immediate processing. waiting: VecDeque<StateRef>, /// Outstanding timers for connections. timers: Timer<StateRef>, /// Address validation logic, which determines whether we send a Retry. address_validation: Rc<RefCell<AddressValidation>>, /// Directory to create qlog traces in qlog_dir: Option<PathBuf>, /// Encrypted client hello (ECH) configuration. ech_config: Option<EchConfig>, } impl Server { /// Construct a new server. /// * `now` is the time that the server is instantiated. /// * `certs` is a list of the certificates that should be configured. /// * `protocols` is the preference list of ALPN values. /// * `anti_replay` is an anti-replay context. /// * `zero_rtt_checker` determines whether 0-RTT should be accepted. This /// will be passed the value of the `extra` argument that was passed to /// `Connection::send_ticket` to see if it is OK. /// * `cid_generator` is responsible for generating connection IDs and parsing them; /// connection IDs produced by the manager cannot be zero-length. pub fn new( now: Instant, certs: &[impl AsRef<str>], protocols: &[impl AsRef<str>], anti_replay: AntiReplay, zero_rtt_checker: Box<dyn ZeroRttChecker>, cid_generator: Rc<RefCell<dyn ConnectionIdGenerator>>, conn_params: ConnectionParameters, ) -> Res<Self> { let validation = AddressValidation::new(now, ValidateAddress::Never)?; Ok(Self { certs: certs.iter().map(|x| String::from(x.as_ref())).collect(), protocols: protocols.iter().map(|x| String::from(x.as_ref())).collect(), ciphers: Vec::new(), anti_replay, zero_rtt_checker: ServerZeroRttChecker::new(zero_rtt_checker), cid_generator, conn_params, active_attempts: HashMap::default(), connections: Rc::default(), active: HashSet::default(), waiting: VecDeque::default(), timers: Timer::new(now, TIMER_GRANULARITY, TIMER_CAPACITY), address_validation: Rc::new(RefCell::new(validation)), qlog_dir: None, ech_config: None, }) } /// Set or clear directory to create logs of connection events in QLOG format. pub fn set_qlog_dir(&mut self, dir: Option<PathBuf>) { self.qlog_dir = dir; } /// Set the policy for address validation. pub fn set_validation(&mut self, v: ValidateAddress) { self.address_validation.borrow_mut().set_validation(v); } /// Set the cipher suites that should be used. Set an empty value to use /// default values. pub fn set_ciphers(&mut self, ciphers: impl AsRef<[Cipher]>) { self.ciphers = Vec::from(ciphers.as_ref()); } pub fn enable_ech( &mut self, config: u8, public_name: &str, sk: &PrivateKey, pk: &PublicKey, ) -> Res<()> { self.ech_config = Some(EchConfig::new(config, public_name, sk, pk)?); Ok(()) } pub fn ech_config(&self) -> &[u8] { self.ech_config.as_ref().map_or(&[], |cfg| &cfg.encoded) } fn remove_timer(&mut self, c: &StateRef) { let last = c.borrow().last_timer; self.timers.remove(last, |t| Rc::ptr_eq(t, c)); } fn process_connection( &mut self, c: StateRef, dgram: Option<Datagram>, now: Instant, ) -> Option<Datagram> { qtrace!([self], "Process connection {:?}", c); let out = c.borrow_mut().process(dgram, now); match out { Output::Datagram(_) => { qtrace!([self], "Sending packet, added to waiting connections"); self.waiting.push_back(Rc::clone(&c)); } Output::Callback(delay) => { let next = now + delay; if next!= c.borrow().last_timer { qtrace!([self], "Change timer to {:?}", next); self.remove_timer(&c); c.borrow_mut().last_timer = next; self.timers.add(next, Rc::clone(&c)); } } _ => { self.remove_timer(&c); } } if c.borrow().has_events() { qtrace!([self], "Connection active: {:?}", c); self.active.insert(ActiveConnectionRef { c: Rc::clone(&c) }); } if *c.borrow().state() > State::Handshaking { // Remove any active connection attempt now that this is no longer handshaking. if let Some(k) = c.borrow_mut().active_attempt.take() { self.active_attempts.remove(&k); } } if matches!(c.borrow().state(), State::Closed(_)) { c.borrow_mut().set_qlog(NeqoQlog::disabled()); self.connections .borrow_mut() .retain(|_, v|!Rc::ptr_eq(v, &c)); } out.dgram() } fn connection(&self, cid: &ConnectionIdRef) -> Option<StateRef> { self.connections.borrow().get(&cid[..]).map(Rc::clone) } fn handle_initial( &mut self, initial: InitialDetails, dgram: Datagram, now: Instant, ) -> Option<Datagram> { qdebug!([self], "Handle initial"); let res = self .address_validation .borrow() .validate(&initial.token, dgram.source(), now); match res { AddressValidationResult::Invalid => None, AddressValidationResult::Pass => self.connection_attempt(initial, dgram, None, now), AddressValidationResult::ValidRetry(orig_dcid) => { self.connection_attempt(initial, dgram, Some(orig_dcid), now) } AddressValidationResult::Validate => { qinfo!([self], "Send retry for {:?}", initial.dst_cid); let res = self.address_validation.borrow().generate_retry_token( &initial.dst_cid, dgram.source(), now, ); let token = if let Ok(t) = res { t } else { qerror!([self], "unable to generate token, dropping packet"); return None; }; if let Some(new_dcid) = self.cid_generator.borrow_mut().generate_cid() { let packet = PacketBuilder::retry( initial.version, &initial.src_cid, &new_dcid, &token, &initial.dst_cid, ); if let Ok(p) = packet { let retry = Datagram::new(dgram.destination(), dgram.source(), p); Some(retry) } else { qerror!([self], "unable to encode retry, dropping packet"); None } } else { qerror!([self], "no connection ID for retry, dropping packet"); None } } } } fn connection_attempt( &mut self, initial: InitialDetails, dgram: Datagram, orig_dcid: Option<ConnectionId>, now: Instant, ) -> Option<Datagram> { let attempt_key = AttemptKey { remote_address: dgram.source(), odcid: orig_dcid.as_ref().unwrap_or(&initial.dst_cid).clone(), }; if let Some(c) = self.active_attempts.get(&attempt_key) { qdebug!( [self], "Handle Initial for existing connection attempt {:?}", attempt_key ); let c = Rc::clone(c); self.process_connection(c, Some(dgram), now) } else { self.accept_connection(attempt_key, initial, dgram, orig_dcid, now) } } fn create_qlog_trace(&self, attempt_key: &AttemptKey) -> NeqoQlog { if let Some(qlog_dir) = &self.qlog_dir { let mut qlog_path = qlog_dir.to_path_buf(); qlog_path.push(format!("{}.qlog", attempt_key.odcid)); // The original DCID is chosen by the client. Using create_new() // prevents attackers from overwriting existing logs. match OpenOptions::new() .write(true) .create_new(true) .open(&qlog_path) { Ok(f) => { qinfo!("Qlog output to {}", qlog_path.display()); let streamer = ::qlog::QlogStreamer::new( qlog::QLOG_VERSION.to_string(), Some("Neqo server qlog".to_string()), Some("Neqo server qlog".to_string()), None, std::time::Instant::now(), common::qlog::new_trace(Role::Server), Box::new(f), ); let n_qlog = NeqoQlog::enabled(streamer, qlog_path); match n_qlog { Ok(nql) => nql, Err(e) => { // Keep going but w/o qlogging qerror!("NeqoQlog error: {}", e); NeqoQlog::disabled() } } } Err(e) => { qerror!( "Could not open file {} for qlog output: {}", qlog_path.display(), e ); NeqoQlog::disabled() } } } else { NeqoQlog::disabled() } } fn setup_connection( &mut self, c: &mut Connection, attempt_key: &AttemptKey, initial: InitialDetails, orig_dcid: Option<ConnectionId>, ) { let zcheck = self.zero_rtt_checker.clone(); if c.server_enable_0rtt(&self.anti_replay, zcheck).is_err() { qwarn!([self], "Unable to enable 0-RTT"); } if let Some(odcid) = orig_dcid { // There was a retry, so set the connection IDs for. c.set_retry_cids(odcid, initial.src_cid, initial.dst_cid); } c.set_validation(Rc::clone(&self.address_validation)); c.set_qlog(self.create_qlog_trace(attempt_key)); if let Some(cfg) = &self.ech_config { if c.server_enable_ech(cfg.config, &cfg.public_name, &cfg.sk, &cfg.pk) .is_err() { qwarn!([self], "Unable to enable ECH"); } } } fn accept_connection( &mut self, attempt_key: AttemptKey, initial: InitialDetails, dgram: Datagram, orig_dcid: Option<ConnectionId>, now: Instant, ) -> Option<Datagram> { qinfo!([self], "Accept connection {:?}", attempt_key); // The internal connection ID manager that we use is not used directly. // Instead, wrap it so that we can save connection IDs. let cid_mgr = Rc::new(RefCell::new(ServerConnectionIdGenerator { c: Weak::new(), cid_generator: Rc::clone(&self.cid_generator), connections: Rc::clone(&self.connections), saved_cids: Vec::new(), })); let mut params = self.conn_params.clone(); params.get_versions_mut().set_initial(initial.version); let sconn = Connection::new_server( &self.certs, &self.protocols, Rc::clone(&cid_mgr) as _, params, ); if let Ok(mut c) = sconn { self.setup_connection(&mut c, &attempt_key, initial, orig_dcid); let c = Rc::new(RefCell::new(ServerConnectionState { c, last_timer: now, active_attempt: Some(attempt_key.clone()), })); cid_mgr.borrow_mut().set_connection(Rc::clone(&c)); let previous_attempt = self.active_attempts.insert(attempt_key, Rc::clone(&c)); debug_assert!(previous_attempt.is_none()); self.process_connection(c, Some(dgram), now) } else { qwarn!([self], "Unable to create connection"); None } } /// Handle 0-RTT packets that were sent with the client's choice of connection ID. /// Most 0-RTT will arrive this way. A client can usually send 1-RTT after it /// receives a connection ID from the server. fn handle_0rtt( &mut self, dgram: Datagram, dcid: ConnectionId, now: Instant, ) -> Option<Datagram> { let attempt_key = AttemptKey { remote_address: dgram.source(), odcid: dcid, }; if let Some(c) = self.active_attempts.get(&attempt_key) { qdebug!( [self], "Handle 0-RTT for existing connection attempt {:?}", attempt_key ); let c = Rc::clone(c); self.process_connection(c, Some(dgram), now) } else { qdebug!([self], "Dropping 0-RTT for unknown connection"); None } } fn process_input(&mut self, dgram: Datagram, now: Instant) -> Option<Datagram> { qtrace!("Process datagram: {}", hex(&dgram[..])); // This is only looking at the first packet header in the datagram. // All packets in the datagram are routed to the same connection. let res = PublicPacket::decode(&dgram[..], self.cid_generator.borrow().as_decoder()); let (packet, _remainder) = match res { Ok(res) => res, _ => { qtrace!([self], "Discarding {:?}", dgram); return None; } }; // Finding an existing connection. Should be the most common case. if let Some(c) = self.connection(packet.dcid()) { return self.process_connection(c, Some(dgram), now); } if packet.packet_type() == PacketType::Short { // TODO send a stateless reset here. qtrace!([self], "Short header packet for an unknown connection"); return None; } if packet.packet_type() == PacketType::OtherVersion
AttemptKey
identifier_name
spacetime.rs
// // Licensed under the Apache License, Version 2.0 (the "License"); you may not use // this file except in compliance with the License. You may obtain a copy of the // License at http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #![allow(dead_code)] //! Most bundles can mutate the EinsteinDB spacetime by transacting assertions: //! //! - they can add (and, eventually, retract and alter) recognized causetIds using the `:edb/causetid` //! attribute; //! //! - they can add (and, eventually, retract and alter) schemaReplicant attributes using various `:edb/*` //! attributes; //! //! - eventually, they will be able to add (and possibly retract) solitonId partitions using a EinsteinDB //! equivalent (perhaps :edb/partition or :edb.partition/start) to Causetic's `:edb.install/partition` //! attribute. //! //! This module recognizes, validates, applies, and reports on these mutations. use failure::ResultExt; use std::collections::{BTreeMap, BTreeSet}; use std::collections::btree_map::Entry; use add_retract_alter_set::{ AddRetractAlterSet, }; use edbn::symbols; use causetids; use causetq_pull_promises::errors::{ DbErrorKind, Result, }; use allegrosql_promises::{ attribute, SolitonId, MinkowskiType, MinkowskiValueType, }; use causetq_allegrosql::{ SchemaReplicant, AttributeMap, }; use schemaReplicant::{ AttributeBuilder, AttributeValidation, }; use types::{ EAV, }; /// An alteration to an attribute. #[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)] pub enum AttributeAlteration { /// From http://blog.Causetic.com/2014/01/schemaReplicant-alteration.html: /// - rename attributes /// - rename your own programmatic causetIdities (uses of :edb/causetid) /// - add or remove indexes Index, /// - add or remove uniqueness constraints Unique, /// - change attribute cardinality Cardinality, /// - change whether history is retained for an attribute NoHistory, /// - change whether an attribute is treated as a component IsComponent, } /// An alteration to an causetid. #[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)] pub enum CausetIdAlteration { CausetId(symbols::Keyword), } /// Summarizes changes to spacetime such as a a `SchemaReplicant` and (in the future) a `PartitionMap`. #[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)] pub struct SpacetimeReport { // SolitonIds that were not present in the original `AttributeMap` that was mutated. pub attributes_installed: BTreeSet<SolitonId>, // SolitonIds that were present in the original `AttributeMap` that was mutated, together with a // representation of the mutations that were applied. pub attributes_altered: BTreeMap<SolitonId, Vec<AttributeAlteration>>, // CausetIds that were installed into the `AttributeMap`. pub causetIds_altered: BTreeMap<SolitonId, CausetIdAlteration>, } impl SpacetimeReport { pub fn attributes_did_change(&self) -> bool { !(self.attributes_installed.is_empty() && self.attributes_altered.is_empty()) } } /// Update an 'AttributeMap' in place given two sets of causetid and attribute retractions, which /// together contain enough information to reason about a "schemaReplicant retraction". /// /// SchemaReplicant may only be retracted if all of its necessary attributes are being retracted: /// - :edb/causetid, :edb/valueType, :edb/cardinality. /// /// Note that this is currently incomplete/flawed: /// - we're allowing optional attributes to not be retracted and dangle afterwards /// /// Returns a set of attribute retractions which do not involve schemaReplicant-defining attributes. fn update_attribute_map_from_schemaReplicant_retractions(attribute_map: &mut AttributeMap, retractions: Vec<EAV>, causetId_retractions: &BTreeMap<SolitonId, symbols::Keyword>) -> Result<Vec<EAV>> { // Process retractions of schemaReplicant attributes first. It's allowed to retract a schemaReplicant attribute // if all of the schemaReplicant-defining schemaReplicant attributes are being retracted. // A defining set of attributes is :edb/causetid, :edb/valueType, :edb/cardinality. let mut filtered_retractions = vec![]; let mut suspect_retractions = vec![]; // Filter out sets of schemaReplicant altering retractions. let mut eas = BTreeMap::new(); for (e, a, v) in retractions.into_iter() { if causetids::is_a_schemaReplicant_attribute(a) { eas.entry(e).or_insert(vec![]).push(a); suspect_retractions.push((e, a, v)); } else { filtered_retractions.push((e, a, v)); } } // TODO (see https://github.com/whtcorpsinc/edb/issues/796). // Retraction of causetIds is allowed, but if an causetid names a schemaReplicant attribute, then we should enforce // retraction of all of the associated schemaReplicant attributes. // Unfortunately, our current in-memory schemaReplicant representation (namely, how we define an Attribute) is not currently // rich enough: it lacks distinction between presence and absence, and instead assumes default values. // Currently, in order to do this enforcement correctly, we'd need to inspect 'causets'. // Here is an incorrect way to enforce this. It's incorrect because it prevents us from retracting non-"schemaReplicant naming" causetIds. // for retracted_e in causetId_retractions.keys() { // if!eas.contains_key(retracted_e) { // bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting :edb/causetid of a schemaReplicant without retracting its defining attributes is not permitted."))); // } // } for (e, a, v) in suspect_retractions.into_iter() { let attributes = eas.get(&e).unwrap(); // Found a set of retractions which negate a schemaReplicant. if attributes.contains(&causetids::DB_CARDINALITY) && attributes.contains(&causetids::DB_VALUE_TYPE) { // Ensure that corresponding :edb/causetid is also being retracted at the same time. if causetId_retractions.contains_key(&e) { // Remove attributes corresponding to retracted attribute. attribute_map.remove(&e); } else { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting defining attributes of a schemaReplicant without retracting its :edb/causetid is not permitted."))); } } else { filtered_retractions.push((e, a, v)); } } Ok(filtered_retractions) } /// Update a `AttributeMap` in place from the given `[e a typed_value]` triples. /// /// This is suiBlock for producing a `AttributeMap` from the `schemaReplicant` materialized view, which does not /// contain install and alter markers. /// /// Returns a report summarizing the mutations that were applied. pub fn update_attribute_map_from_causetid_triples(attribute_map: &mut AttributeMap, assertions: Vec<EAV>, retractions: Vec<EAV>) -> Result<SpacetimeReport> { fn
(attribute_id: SolitonId, existing: &AttributeMap) -> AttributeBuilder { existing.get(&attribute_id) .map(AttributeBuilder::to_modify_attribute) .unwrap_or_else(AttributeBuilder::default) } // Group mutations by impacted solitonId. let mut builders: BTreeMap<SolitonId, AttributeBuilder> = BTreeMap::new(); // For retractions, we start with an attribute builder that's pre-populated with the existing // attribute values. That allows us to check existing values and unset them. for (solitonId, attr, ref value) in retractions { let builder = builders.entry(solitonId).or_insert_with(|| attribute_builder_to_modify(solitonId, attribute_map)); match attr { // You can only retract :edb/unique, :edb/isComponent; all others must be altered instead // of retracted, or are not allowed to change. causetids::DB_IS_COMPONENT => { match value { &MinkowskiType::Boolean(v) if builder.component == Some(v) => { builder.component(false); }, v => { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/isComponent with the wrong value {:?}.", v))); }, } }, causetids::DB_UNIQUE => { match *value { MinkowskiType::Ref(u) => { match u { causetids::DB_UNIQUE_VALUE if builder.unique == Some(Some(attribute::Unique::Value)) => { builder.non_unique(); }, causetids::DB_UNIQUE_CausetIDITY if builder.unique == Some(Some(attribute::Unique::CausetIdity)) => { builder.non_unique(); }, v => { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/unique with the wrong value {}.", v))); }, } }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [:edb/retract _ :edb/unique :edb.unique/_] but got [:edb/retract {} :edb/unique {:?}]", solitonId, value))) } }, causetids::DB_VALUE_TYPE | causetids::DB_CARDINALITY | causetids::DB_INDEX | causetids::DB_FULLTEXT | causetids::DB_NO_HISTORY => { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting attribute {} for instanton {} not permitted.", attr, solitonId))); }, _ => { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Do not recognize attribute {} for solitonId {}", attr, solitonId))) } } } for (solitonId, attr, ref value) in assertions.into_iter() { // For assertions, we can start with an empty attribute builder. let builder = builders.entry(solitonId).or_insert_with(Default::default); // TODO: improve error messages throughout. match attr { causetids::DB_VALUE_TYPE => { match *value { MinkowskiType::Ref(causetids::DB_TYPE_BOOLEAN) => { builder.value_type(MinkowskiValueType::Boolean); }, MinkowskiType::Ref(causetids::DB_TYPE_DOUBLE) => { builder.value_type(MinkowskiValueType::Double); }, MinkowskiType::Ref(causetids::DB_TYPE_INSTANT) => { builder.value_type(MinkowskiValueType::Instant); }, MinkowskiType::Ref(causetids::DB_TYPE_KEYWORD) => { builder.value_type(MinkowskiValueType::Keyword); }, MinkowskiType::Ref(causetids::DB_TYPE_LONG) => { builder.value_type(MinkowskiValueType::Long); }, MinkowskiType::Ref(causetids::DB_TYPE_REF) => { builder.value_type(MinkowskiValueType::Ref); }, MinkowskiType::Ref(causetids::DB_TYPE_STRING) => { builder.value_type(MinkowskiValueType::String); }, MinkowskiType::Ref(causetids::DB_TYPE_UUID) => { builder.value_type(MinkowskiValueType::Uuid); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/valueType :edb.type/*] but got [... :edb/valueType {:?}] for solitonId {} and attribute {}", value, solitonId, attr))) } }, causetids::DB_CARDINALITY => { match *value { MinkowskiType::Ref(causetids::DB_CARDINALITY_MANY) => { builder.multival(true); }, MinkowskiType::Ref(causetids::DB_CARDINALITY_ONE) => { builder.multival(false); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/cardinality :edb.cardinality/many|:edb.cardinality/one] but got [... :edb/cardinality {:?}]", value))) } }, causetids::DB_UNIQUE => { match *value { MinkowskiType::Ref(causetids::DB_UNIQUE_VALUE) => { builder.unique(attribute::Unique::Value); }, MinkowskiType::Ref(causetids::DB_UNIQUE_CausetIDITY) => { builder.unique(attribute::Unique::CausetIdity); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/unique :edb.unique/value|:edb.unique/causetIdity] but got [... :edb/unique {:?}]", value))) } }, causetids::DB_INDEX => { match *value { MinkowskiType::Boolean(x) => { builder.index(x); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/index true|false] but got [... :edb/index {:?}]", value))) } }, causetids::DB_FULLTEXT => { match *value { MinkowskiType::Boolean(x) => { builder.fulltext(x); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/fulltext true|false] but got [... :edb/fulltext {:?}]", value))) } }, causetids::DB_IS_COMPONENT => { match *value { MinkowskiType::Boolean(x) => { builder.component(x); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/isComponent true|false] but got [... :edb/isComponent {:?}]", value))) } }, causetids::DB_NO_HISTORY => { match *value { MinkowskiType::Boolean(x) => { builder.no_history(x); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/noHistory true|false] but got [... :edb/noHistory {:?}]", value))) } }, _ => { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Do not recognize attribute {} for solitonId {}", attr, solitonId))) } } }; let mut attributes_installed: BTreeSet<SolitonId> = BTreeSet::default(); let mut attributes_altered: BTreeMap<SolitonId, Vec<AttributeAlteration>> = BTreeMap::default(); for (solitonId, builder) in builders.into_iter() { match attribute_map.entry(solitonId) { Entry::Vacant(entry) => { // Validate once… builder.validate_install_attribute().context(DbErrorKind::BadSchemaReplicantAssertion(format!("SchemaReplicant alteration for new attribute with solitonId {} is not valid", solitonId)))?; // … and twice, now we have the Attribute. let a = builder.build(); a.validate(|| solitonId.to_string())?; entry.insert(a); attributes_installed.insert(solitonId); }, Entry::Occupied(mut entry) => { builder.validate_alter_attribute().context(DbErrorKind::BadSchemaReplicantAssertion(format!("SchemaReplicant alteration for existing attribute with solitonId {} is not valid", solitonId)))?; let mutations = builder.mutate(entry.get_mut()); attributes_altered.insert(solitonId, mutations); }, } } Ok(SpacetimeReport { attributes_installed: attributes_installed, attributes_altered: attributes_altered, causetIds_altered: BTreeMap::default(), }) } /// Update a `SchemaReplicant` in place from the given `[e a typed_value added]` quadruples. /// /// This layer enforces that causetid assertions of the form [solitonId :edb/causetid...] (as distinct from /// attribute assertions) are present and correct. /// /// This is suiBlock for mutating a `SchemaReplicant` from an applied transaction. /// /// Returns a report summarizing the mutations that were applied. pub fn update_schemaReplicant_from_causetid_quadruples<U>(schemaReplicant: &mut SchemaReplicant, assertions: U) -> Result<SpacetimeReport> where U: IntoIterator<Item=(SolitonId, SolitonId, MinkowskiType, bool)> { // Group attribute assertions into asserted, retracted, and updated. We assume all our // attribute assertions are :edb/cardinality :edb.cardinality/one (so they'll only be added or // retracted at most once), which means all attribute alterations are simple changes from an old // value to a new value. let mut attribute_set: AddRetractAlterSet<(SolitonId, SolitonId), MinkowskiType> = AddRetractAlterSet::default(); let mut causetId_set: AddRetractAlterSet<SolitonId, symbols::Keyword> = AddRetractAlterSet::default(); for (e, a, typed_value, added) in assertions.into_iter() { // Here we handle :edb/causetid assertions. if a == causetids::DB_CausetID { if let MinkowskiType::Keyword(ref keyword) = typed_value { causetId_set.witness(e, keyword.as_ref().clone(), added); continue } else { // Something is terribly wrong: the schemaReplicant ensures we have a keyword. unreachable!(); } } attribute_set.witness((e, a), typed_value, added); } // Collect triples. let retracted_triples = attribute_set.retracted.into_iter().map(|((e, a), typed_value)| (e, a, typed_value)); let asserted_triples = attribute_set.asserted.into_iter().map(|((e, a), typed_value)| (e, a, typed_value)); let altered_triples = attribute_set.altered.into_iter().map(|((e, a), (_old_value, new_value))| (e, a, new_value)); // First we process retractions which remove schemaReplicant. // This operation consumes our current list of attribute retractions, producing a filtered one. let non_schemaReplicant_retractions = update_attribute_map_from_schemaReplicant_retractions(&mut schemaReplicant.attribute_map, retracted_triples.collect(), &causetId_set.retracted)?; // Now we process all other retractions. let report = update_attribute_map_from_causetid_triples(&mut schemaReplicant.attribute_map, asserted_triples.chain(altered_triples).collect(), non_schemaReplicant_retractions)?; let mut causetIds_altered: BTreeMap<SolitonId, CausetIdAlteration> = BTreeMap::new(); // Asserted, altered, or retracted :edb/causetIds update the relevant causetids. for (solitonId, causetid) in causetId_set.asserted { schemaReplicant.causetid_map.insert(solitonId, causetid.clone()); schemaReplicant.causetId_map.insert(causetid.clone(), solitonId); causetIds_altered.insert(solitonId, CausetIdAlteration::CausetId(causetid.clone())); } for (solitonId, (old_causetId, new_causetId)) in causetId_set.altered { schemaReplicant.causetid_map.insert(solitonId, new_causetId.clone()); // Overwrite existing. schemaReplicant.causetId_map.remove(&old_causetId); // Remove old. schemaReplicant.causetId_map.insert(new_causetId.clone(), solitonId); // Insert new. causetIds_altered.insert(solitonId, CausetIdAlteration::CausetId(new_causetId.clone())); } for (solitonId, causetid) in &causetId_set.retracted { schemaReplicant.causetid_map.remove(solitonId); schemaReplicant.causetId_map.remove(causetid); causetIds_altered.insert(*solitonId, CausetIdAlteration::CausetId(causetid.clone())); } // Component attributes need to change if either: // - a component attribute changed // - a schemaReplicant attribute that was a component was retracted // These two checks are a rather heavy-handed way of keeping schemaReplicant's // component_attributes up-to-date: most of the time we'll rebuild it // even though it's not necessary (e.g. a schemaReplicant attribute that's _not_ // a component was removed, or a non-component related attribute changed). if report.attributes_did_change() || causetId_set.retracted.len() > 0 { schemaReplicant.update_component_attributes(); } Ok(SpacetimeReport { causetIds_altered: causetIds_alte
attribute_builder_to_modify
identifier_name
spacetime.rs
INC // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use // this file except in compliance with the License. You may obtain a copy of the // License at http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #![allow(dead_code)] //! Most bundles can mutate the EinsteinDB spacetime by transacting assertions: //! //! - they can add (and, eventually, retract and alter) recognized causetIds using the `:edb/causetid` //! attribute; //! //! - they can add (and, eventually, retract and alter) schemaReplicant attributes using various `:edb/*` //! attributes; //! //! - eventually, they will be able to add (and possibly retract) solitonId partitions using a EinsteinDB //! equivalent (perhaps :edb/partition or :edb.partition/start) to Causetic's `:edb.install/partition` //! attribute. //! //! This module recognizes, validates, applies, and reports on these mutations. use failure::ResultExt; use std::collections::{BTreeMap, BTreeSet}; use std::collections::btree_map::Entry; use add_retract_alter_set::{ AddRetractAlterSet, }; use edbn::symbols; use causetids; use causetq_pull_promises::errors::{ DbErrorKind, Result, }; use allegrosql_promises::{ attribute, SolitonId, MinkowskiType, MinkowskiValueType, }; use causetq_allegrosql::{ SchemaReplicant, AttributeMap, }; use schemaReplicant::{ AttributeBuilder, AttributeValidation, }; use types::{ EAV, }; /// An alteration to an attribute. #[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)] pub enum AttributeAlteration { /// From http://blog.Causetic.com/2014/01/schemaReplicant-alteration.html: /// - rename attributes /// - rename your own programmatic causetIdities (uses of :edb/causetid) /// - add or remove indexes Index, /// - add or remove uniqueness constraints Unique, /// - change attribute cardinality Cardinality, /// - change whether history is retained for an attribute NoHistory, /// - change whether an attribute is treated as a component IsComponent, } /// An alteration to an causetid. #[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)] pub enum CausetIdAlteration { CausetId(symbols::Keyword), } /// Summarizes changes to spacetime such as a a `SchemaReplicant` and (in the future) a `PartitionMap`. #[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)] pub struct SpacetimeReport { // SolitonIds that were not present in the original `AttributeMap` that was mutated. pub attributes_installed: BTreeSet<SolitonId>, // SolitonIds that were present in the original `AttributeMap` that was mutated, together with a // representation of the mutations that were applied. pub attributes_altered: BTreeMap<SolitonId, Vec<AttributeAlteration>>, // CausetIds that were installed into the `AttributeMap`. pub causetIds_altered: BTreeMap<SolitonId, CausetIdAlteration>, } impl SpacetimeReport { pub fn attributes_did_change(&self) -> bool { !(self.attributes_installed.is_empty() && self.attributes_altered.is_empty()) } } /// Update an 'AttributeMap' in place given two sets of causetid and attribute retractions, which /// together contain enough information to reason about a "schemaReplicant retraction". /// /// SchemaReplicant may only be retracted if all of its necessary attributes are being retracted: /// - :edb/causetid, :edb/valueType, :edb/cardinality. /// /// Note that this is currently incomplete/flawed: /// - we're allowing optional attributes to not be retracted and dangle afterwards /// /// Returns a set of attribute retractions which do not involve schemaReplicant-defining attributes. fn update_attribute_map_from_schemaReplicant_retractions(attribute_map: &mut AttributeMap, retractions: Vec<EAV>, causetId_retractions: &BTreeMap<SolitonId, symbols::Keyword>) -> Result<Vec<EAV>> { // Process retractions of schemaReplicant attributes first. It's allowed to retract a schemaReplicant attribute // if all of the schemaReplicant-defining schemaReplicant attributes are being retracted. // A defining set of attributes is :edb/causetid, :edb/valueType, :edb/cardinality. let mut filtered_retractions = vec![]; let mut suspect_retractions = vec![]; // Filter out sets of schemaReplicant altering retractions. let mut eas = BTreeMap::new(); for (e, a, v) in retractions.into_iter() { if causetids::is_a_schemaReplicant_attribute(a) { eas.entry(e).or_insert(vec![]).push(a); suspect_retractions.push((e, a, v)); } else { filtered_retractions.push((e, a, v)); } } // TODO (see https://github.com/whtcorpsinc/edb/issues/796). // Retraction of causetIds is allowed, but if an causetid names a schemaReplicant attribute, then we should enforce // retraction of all of the associated schemaReplicant attributes. // Unfortunately, our current in-memory schemaReplicant representation (namely, how we define an Attribute) is not currently // rich enough: it lacks distinction between presence and absence, and instead assumes default values. // Currently, in order to do this enforcement correctly, we'd need to inspect 'causets'. // Here is an incorrect way to enforce this. It's incorrect because it prevents us from retracting non-"schemaReplicant naming" causetIds. // for retracted_e in causetId_retractions.keys() { // if!eas.contains_key(retracted_e) { // bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting :edb/causetid of a schemaReplicant without retracting its defining attributes is not permitted."))); // } // } for (e, a, v) in suspect_retractions.into_iter() { let attributes = eas.get(&e).unwrap(); // Found a set of retractions which negate a schemaReplicant. if attributes.contains(&causetids::DB_CARDINALITY) && attributes.contains(&causetids::DB_VALUE_TYPE) { // Ensure that corresponding :edb/causetid is also being retracted at the same time. if causetId_retractions.contains_key(&e) { // Remove attributes corresponding to retracted attribute. attribute_map.remove(&e); } else { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting defining attributes of a schemaReplicant without retracting its :edb/causetid is not permitted."))); } } else { filtered_retractions.push((e, a, v)); } } Ok(filtered_retractions) } /// Update a `AttributeMap` in place from the given `[e a typed_value]` triples. ///
fn attribute_builder_to_modify(attribute_id: SolitonId, existing: &AttributeMap) -> AttributeBuilder { existing.get(&attribute_id) .map(AttributeBuilder::to_modify_attribute) .unwrap_or_else(AttributeBuilder::default) } // Group mutations by impacted solitonId. let mut builders: BTreeMap<SolitonId, AttributeBuilder> = BTreeMap::new(); // For retractions, we start with an attribute builder that's pre-populated with the existing // attribute values. That allows us to check existing values and unset them. for (solitonId, attr, ref value) in retractions { let builder = builders.entry(solitonId).or_insert_with(|| attribute_builder_to_modify(solitonId, attribute_map)); match attr { // You can only retract :edb/unique, :edb/isComponent; all others must be altered instead // of retracted, or are not allowed to change. causetids::DB_IS_COMPONENT => { match value { &MinkowskiType::Boolean(v) if builder.component == Some(v) => { builder.component(false); }, v => { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/isComponent with the wrong value {:?}.", v))); }, } }, causetids::DB_UNIQUE => { match *value { MinkowskiType::Ref(u) => { match u { causetids::DB_UNIQUE_VALUE if builder.unique == Some(Some(attribute::Unique::Value)) => { builder.non_unique(); }, causetids::DB_UNIQUE_CausetIDITY if builder.unique == Some(Some(attribute::Unique::CausetIdity)) => { builder.non_unique(); }, v => { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/unique with the wrong value {}.", v))); }, } }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [:edb/retract _ :edb/unique :edb.unique/_] but got [:edb/retract {} :edb/unique {:?}]", solitonId, value))) } }, causetids::DB_VALUE_TYPE | causetids::DB_CARDINALITY | causetids::DB_INDEX | causetids::DB_FULLTEXT | causetids::DB_NO_HISTORY => { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting attribute {} for instanton {} not permitted.", attr, solitonId))); }, _ => { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Do not recognize attribute {} for solitonId {}", attr, solitonId))) } } } for (solitonId, attr, ref value) in assertions.into_iter() { // For assertions, we can start with an empty attribute builder. let builder = builders.entry(solitonId).or_insert_with(Default::default); // TODO: improve error messages throughout. match attr { causetids::DB_VALUE_TYPE => { match *value { MinkowskiType::Ref(causetids::DB_TYPE_BOOLEAN) => { builder.value_type(MinkowskiValueType::Boolean); }, MinkowskiType::Ref(causetids::DB_TYPE_DOUBLE) => { builder.value_type(MinkowskiValueType::Double); }, MinkowskiType::Ref(causetids::DB_TYPE_INSTANT) => { builder.value_type(MinkowskiValueType::Instant); }, MinkowskiType::Ref(causetids::DB_TYPE_KEYWORD) => { builder.value_type(MinkowskiValueType::Keyword); }, MinkowskiType::Ref(causetids::DB_TYPE_LONG) => { builder.value_type(MinkowskiValueType::Long); }, MinkowskiType::Ref(causetids::DB_TYPE_REF) => { builder.value_type(MinkowskiValueType::Ref); }, MinkowskiType::Ref(causetids::DB_TYPE_STRING) => { builder.value_type(MinkowskiValueType::String); }, MinkowskiType::Ref(causetids::DB_TYPE_UUID) => { builder.value_type(MinkowskiValueType::Uuid); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/valueType :edb.type/*] but got [... :edb/valueType {:?}] for solitonId {} and attribute {}", value, solitonId, attr))) } }, causetids::DB_CARDINALITY => { match *value { MinkowskiType::Ref(causetids::DB_CARDINALITY_MANY) => { builder.multival(true); }, MinkowskiType::Ref(causetids::DB_CARDINALITY_ONE) => { builder.multival(false); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/cardinality :edb.cardinality/many|:edb.cardinality/one] but got [... :edb/cardinality {:?}]", value))) } }, causetids::DB_UNIQUE => { match *value { MinkowskiType::Ref(causetids::DB_UNIQUE_VALUE) => { builder.unique(attribute::Unique::Value); }, MinkowskiType::Ref(causetids::DB_UNIQUE_CausetIDITY) => { builder.unique(attribute::Unique::CausetIdity); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/unique :edb.unique/value|:edb.unique/causetIdity] but got [... :edb/unique {:?}]", value))) } }, causetids::DB_INDEX => { match *value { MinkowskiType::Boolean(x) => { builder.index(x); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/index true|false] but got [... :edb/index {:?}]", value))) } }, causetids::DB_FULLTEXT => { match *value { MinkowskiType::Boolean(x) => { builder.fulltext(x); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/fulltext true|false] but got [... :edb/fulltext {:?}]", value))) } }, causetids::DB_IS_COMPONENT => { match *value { MinkowskiType::Boolean(x) => { builder.component(x); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/isComponent true|false] but got [... :edb/isComponent {:?}]", value))) } }, causetids::DB_NO_HISTORY => { match *value { MinkowskiType::Boolean(x) => { builder.no_history(x); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/noHistory true|false] but got [... :edb/noHistory {:?}]", value))) } }, _ => { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Do not recognize attribute {} for solitonId {}", attr, solitonId))) } } }; let mut attributes_installed: BTreeSet<SolitonId> = BTreeSet::default(); let mut attributes_altered: BTreeMap<SolitonId, Vec<AttributeAlteration>> = BTreeMap::default(); for (solitonId, builder) in builders.into_iter() { match attribute_map.entry(solitonId) { Entry::Vacant(entry) => { // Validate once… builder.validate_install_attribute().context(DbErrorKind::BadSchemaReplicantAssertion(format!("SchemaReplicant alteration for new attribute with solitonId {} is not valid", solitonId)))?; // … and twice, now we have the Attribute. let a = builder.build(); a.validate(|| solitonId.to_string())?; entry.insert(a); attributes_installed.insert(solitonId); }, Entry::Occupied(mut entry) => { builder.validate_alter_attribute().context(DbErrorKind::BadSchemaReplicantAssertion(format!("SchemaReplicant alteration for existing attribute with solitonId {} is not valid", solitonId)))?; let mutations = builder.mutate(entry.get_mut()); attributes_altered.insert(solitonId, mutations); }, } } Ok(SpacetimeReport { attributes_installed: attributes_installed, attributes_altered: attributes_altered, causetIds_altered: BTreeMap::default(), }) } /// Update a `SchemaReplicant` in place from the given `[e a typed_value added]` quadruples. /// /// This layer enforces that causetid assertions of the form [solitonId :edb/causetid...] (as distinct from /// attribute assertions) are present and correct. /// /// This is suiBlock for mutating a `SchemaReplicant` from an applied transaction. /// /// Returns a report summarizing the mutations that were applied. pub fn update_schemaReplicant_from_causetid_quadruples<U>(schemaReplicant: &mut SchemaReplicant, assertions: U) -> Result<SpacetimeReport> where U: IntoIterator<Item=(SolitonId, SolitonId, MinkowskiType, bool)> { // Group attribute assertions into asserted, retracted, and updated. We assume all our // attribute assertions are :edb/cardinality :edb.cardinality/one (so they'll only be added or // retracted at most once), which means all attribute alterations are simple changes from an old // value to a new value. let mut attribute_set: AddRetractAlterSet<(SolitonId, SolitonId), MinkowskiType> = AddRetractAlterSet::default(); let mut causetId_set: AddRetractAlterSet<SolitonId, symbols::Keyword> = AddRetractAlterSet::default(); for (e, a, typed_value, added) in assertions.into_iter() { // Here we handle :edb/causetid assertions. if a == causetids::DB_CausetID { if let MinkowskiType::Keyword(ref keyword) = typed_value { causetId_set.witness(e, keyword.as_ref().clone(), added); continue } else { // Something is terribly wrong: the schemaReplicant ensures we have a keyword. unreachable!(); } } attribute_set.witness((e, a), typed_value, added); } // Collect triples. let retracted_triples = attribute_set.retracted.into_iter().map(|((e, a), typed_value)| (e, a, typed_value)); let asserted_triples = attribute_set.asserted.into_iter().map(|((e, a), typed_value)| (e, a, typed_value)); let altered_triples = attribute_set.altered.into_iter().map(|((e, a), (_old_value, new_value))| (e, a, new_value)); // First we process retractions which remove schemaReplicant. // This operation consumes our current list of attribute retractions, producing a filtered one. let non_schemaReplicant_retractions = update_attribute_map_from_schemaReplicant_retractions(&mut schemaReplicant.attribute_map, retracted_triples.collect(), &causetId_set.retracted)?; // Now we process all other retractions. let report = update_attribute_map_from_causetid_triples(&mut schemaReplicant.attribute_map, asserted_triples.chain(altered_triples).collect(), non_schemaReplicant_retractions)?; let mut causetIds_altered: BTreeMap<SolitonId, CausetIdAlteration> = BTreeMap::new(); // Asserted, altered, or retracted :edb/causetIds update the relevant causetids. for (solitonId, causetid) in causetId_set.asserted { schemaReplicant.causetid_map.insert(solitonId, causetid.clone()); schemaReplicant.causetId_map.insert(causetid.clone(), solitonId); causetIds_altered.insert(solitonId, CausetIdAlteration::CausetId(causetid.clone())); } for (solitonId, (old_causetId, new_causetId)) in causetId_set.altered { schemaReplicant.causetid_map.insert(solitonId, new_causetId.clone()); // Overwrite existing. schemaReplicant.causetId_map.remove(&old_causetId); // Remove old. schemaReplicant.causetId_map.insert(new_causetId.clone(), solitonId); // Insert new. causetIds_altered.insert(solitonId, CausetIdAlteration::CausetId(new_causetId.clone())); } for (solitonId, causetid) in &causetId_set.retracted { schemaReplicant.causetid_map.remove(solitonId); schemaReplicant.causetId_map.remove(causetid); causetIds_altered.insert(*solitonId, CausetIdAlteration::CausetId(causetid.clone())); } // Component attributes need to change if either: // - a component attribute changed // - a schemaReplicant attribute that was a component was retracted // These two checks are a rather heavy-handed way of keeping schemaReplicant's // component_attributes up-to-date: most of the time we'll rebuild it // even though it's not necessary (e.g. a schemaReplicant attribute that's _not_ // a component was removed, or a non-component related attribute changed). if report.attributes_did_change() || causetId_set.retracted.len() > 0 { schemaReplicant.update_component_attributes(); } Ok(SpacetimeReport { causetIds_altered: causetIds_altered,
/// This is suiBlock for producing a `AttributeMap` from the `schemaReplicant` materialized view, which does not /// contain install and alter markers. /// /// Returns a report summarizing the mutations that were applied. pub fn update_attribute_map_from_causetid_triples(attribute_map: &mut AttributeMap, assertions: Vec<EAV>, retractions: Vec<EAV>) -> Result<SpacetimeReport> {
random_line_split
spacetime.rs
// // Licensed under the Apache License, Version 2.0 (the "License"); you may not use // this file except in compliance with the License. You may obtain a copy of the // License at http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #![allow(dead_code)] //! Most bundles can mutate the EinsteinDB spacetime by transacting assertions: //! //! - they can add (and, eventually, retract and alter) recognized causetIds using the `:edb/causetid` //! attribute; //! //! - they can add (and, eventually, retract and alter) schemaReplicant attributes using various `:edb/*` //! attributes; //! //! - eventually, they will be able to add (and possibly retract) solitonId partitions using a EinsteinDB //! equivalent (perhaps :edb/partition or :edb.partition/start) to Causetic's `:edb.install/partition` //! attribute. //! //! This module recognizes, validates, applies, and reports on these mutations. use failure::ResultExt; use std::collections::{BTreeMap, BTreeSet}; use std::collections::btree_map::Entry; use add_retract_alter_set::{ AddRetractAlterSet, }; use edbn::symbols; use causetids; use causetq_pull_promises::errors::{ DbErrorKind, Result, }; use allegrosql_promises::{ attribute, SolitonId, MinkowskiType, MinkowskiValueType, }; use causetq_allegrosql::{ SchemaReplicant, AttributeMap, }; use schemaReplicant::{ AttributeBuilder, AttributeValidation, }; use types::{ EAV, }; /// An alteration to an attribute. #[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)] pub enum AttributeAlteration { /// From http://blog.Causetic.com/2014/01/schemaReplicant-alteration.html: /// - rename attributes /// - rename your own programmatic causetIdities (uses of :edb/causetid) /// - add or remove indexes Index, /// - add or remove uniqueness constraints Unique, /// - change attribute cardinality Cardinality, /// - change whether history is retained for an attribute NoHistory, /// - change whether an attribute is treated as a component IsComponent, } /// An alteration to an causetid. #[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)] pub enum CausetIdAlteration { CausetId(symbols::Keyword), } /// Summarizes changes to spacetime such as a a `SchemaReplicant` and (in the future) a `PartitionMap`. #[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)] pub struct SpacetimeReport { // SolitonIds that were not present in the original `AttributeMap` that was mutated. pub attributes_installed: BTreeSet<SolitonId>, // SolitonIds that were present in the original `AttributeMap` that was mutated, together with a // representation of the mutations that were applied. pub attributes_altered: BTreeMap<SolitonId, Vec<AttributeAlteration>>, // CausetIds that were installed into the `AttributeMap`. pub causetIds_altered: BTreeMap<SolitonId, CausetIdAlteration>, } impl SpacetimeReport { pub fn attributes_did_change(&self) -> bool { !(self.attributes_installed.is_empty() && self.attributes_altered.is_empty()) } } /// Update an 'AttributeMap' in place given two sets of causetid and attribute retractions, which /// together contain enough information to reason about a "schemaReplicant retraction". /// /// SchemaReplicant may only be retracted if all of its necessary attributes are being retracted: /// - :edb/causetid, :edb/valueType, :edb/cardinality. /// /// Note that this is currently incomplete/flawed: /// - we're allowing optional attributes to not be retracted and dangle afterwards /// /// Returns a set of attribute retractions which do not involve schemaReplicant-defining attributes. fn update_attribute_map_from_schemaReplicant_retractions(attribute_map: &mut AttributeMap, retractions: Vec<EAV>, causetId_retractions: &BTreeMap<SolitonId, symbols::Keyword>) -> Result<Vec<EAV>> { // Process retractions of schemaReplicant attributes first. It's allowed to retract a schemaReplicant attribute // if all of the schemaReplicant-defining schemaReplicant attributes are being retracted. // A defining set of attributes is :edb/causetid, :edb/valueType, :edb/cardinality. let mut filtered_retractions = vec![]; let mut suspect_retractions = vec![]; // Filter out sets of schemaReplicant altering retractions. let mut eas = BTreeMap::new(); for (e, a, v) in retractions.into_iter() { if causetids::is_a_schemaReplicant_attribute(a) { eas.entry(e).or_insert(vec![]).push(a); suspect_retractions.push((e, a, v)); } else { filtered_retractions.push((e, a, v)); } } // TODO (see https://github.com/whtcorpsinc/edb/issues/796). // Retraction of causetIds is allowed, but if an causetid names a schemaReplicant attribute, then we should enforce // retraction of all of the associated schemaReplicant attributes. // Unfortunately, our current in-memory schemaReplicant representation (namely, how we define an Attribute) is not currently // rich enough: it lacks distinction between presence and absence, and instead assumes default values. // Currently, in order to do this enforcement correctly, we'd need to inspect 'causets'. // Here is an incorrect way to enforce this. It's incorrect because it prevents us from retracting non-"schemaReplicant naming" causetIds. // for retracted_e in causetId_retractions.keys() { // if!eas.contains_key(retracted_e) { // bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting :edb/causetid of a schemaReplicant without retracting its defining attributes is not permitted."))); // } // } for (e, a, v) in suspect_retractions.into_iter() { let attributes = eas.get(&e).unwrap(); // Found a set of retractions which negate a schemaReplicant. if attributes.contains(&causetids::DB_CARDINALITY) && attributes.contains(&causetids::DB_VALUE_TYPE) { // Ensure that corresponding :edb/causetid is also being retracted at the same time. if causetId_retractions.contains_key(&e) { // Remove attributes corresponding to retracted attribute. attribute_map.remove(&e); } else { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting defining attributes of a schemaReplicant without retracting its :edb/causetid is not permitted."))); } } else { filtered_retractions.push((e, a, v)); } } Ok(filtered_retractions) } /// Update a `AttributeMap` in place from the given `[e a typed_value]` triples. /// /// This is suiBlock for producing a `AttributeMap` from the `schemaReplicant` materialized view, which does not /// contain install and alter markers. /// /// Returns a report summarizing the mutations that were applied. pub fn update_attribute_map_from_causetid_triples(attribute_map: &mut AttributeMap, assertions: Vec<EAV>, retractions: Vec<EAV>) -> Result<SpacetimeReport>
builder.component(false); }, v => { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/isComponent with the wrong value {:?}.", v))); }, } }, causetids::DB_UNIQUE => { match *value { MinkowskiType::Ref(u) => { match u { causetids::DB_UNIQUE_VALUE if builder.unique == Some(Some(attribute::Unique::Value)) => { builder.non_unique(); }, causetids::DB_UNIQUE_CausetIDITY if builder.unique == Some(Some(attribute::Unique::CausetIdity)) => { builder.non_unique(); }, v => { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/unique with the wrong value {}.", v))); }, } }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [:edb/retract _ :edb/unique :edb.unique/_] but got [:edb/retract {} :edb/unique {:?}]", solitonId, value))) } }, causetids::DB_VALUE_TYPE | causetids::DB_CARDINALITY | causetids::DB_INDEX | causetids::DB_FULLTEXT | causetids::DB_NO_HISTORY => { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting attribute {} for instanton {} not permitted.", attr, solitonId))); }, _ => { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Do not recognize attribute {} for solitonId {}", attr, solitonId))) } } } for (solitonId, attr, ref value) in assertions.into_iter() { // For assertions, we can start with an empty attribute builder. let builder = builders.entry(solitonId).or_insert_with(Default::default); // TODO: improve error messages throughout. match attr { causetids::DB_VALUE_TYPE => { match *value { MinkowskiType::Ref(causetids::DB_TYPE_BOOLEAN) => { builder.value_type(MinkowskiValueType::Boolean); }, MinkowskiType::Ref(causetids::DB_TYPE_DOUBLE) => { builder.value_type(MinkowskiValueType::Double); }, MinkowskiType::Ref(causetids::DB_TYPE_INSTANT) => { builder.value_type(MinkowskiValueType::Instant); }, MinkowskiType::Ref(causetids::DB_TYPE_KEYWORD) => { builder.value_type(MinkowskiValueType::Keyword); }, MinkowskiType::Ref(causetids::DB_TYPE_LONG) => { builder.value_type(MinkowskiValueType::Long); }, MinkowskiType::Ref(causetids::DB_TYPE_REF) => { builder.value_type(MinkowskiValueType::Ref); }, MinkowskiType::Ref(causetids::DB_TYPE_STRING) => { builder.value_type(MinkowskiValueType::String); }, MinkowskiType::Ref(causetids::DB_TYPE_UUID) => { builder.value_type(MinkowskiValueType::Uuid); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/valueType :edb.type/*] but got [... :edb/valueType {:?}] for solitonId {} and attribute {}", value, solitonId, attr))) } }, causetids::DB_CARDINALITY => { match *value { MinkowskiType::Ref(causetids::DB_CARDINALITY_MANY) => { builder.multival(true); }, MinkowskiType::Ref(causetids::DB_CARDINALITY_ONE) => { builder.multival(false); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/cardinality :edb.cardinality/many|:edb.cardinality/one] but got [... :edb/cardinality {:?}]", value))) } }, causetids::DB_UNIQUE => { match *value { MinkowskiType::Ref(causetids::DB_UNIQUE_VALUE) => { builder.unique(attribute::Unique::Value); }, MinkowskiType::Ref(causetids::DB_UNIQUE_CausetIDITY) => { builder.unique(attribute::Unique::CausetIdity); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/unique :edb.unique/value|:edb.unique/causetIdity] but got [... :edb/unique {:?}]", value))) } }, causetids::DB_INDEX => { match *value { MinkowskiType::Boolean(x) => { builder.index(x); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/index true|false] but got [... :edb/index {:?}]", value))) } }, causetids::DB_FULLTEXT => { match *value { MinkowskiType::Boolean(x) => { builder.fulltext(x); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/fulltext true|false] but got [... :edb/fulltext {:?}]", value))) } }, causetids::DB_IS_COMPONENT => { match *value { MinkowskiType::Boolean(x) => { builder.component(x); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/isComponent true|false] but got [... :edb/isComponent {:?}]", value))) } }, causetids::DB_NO_HISTORY => { match *value { MinkowskiType::Boolean(x) => { builder.no_history(x); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/noHistory true|false] but got [... :edb/noHistory {:?}]", value))) } }, _ => { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Do not recognize attribute {} for solitonId {}", attr, solitonId))) } } }; let mut attributes_installed: BTreeSet<SolitonId> = BTreeSet::default(); let mut attributes_altered: BTreeMap<SolitonId, Vec<AttributeAlteration>> = BTreeMap::default(); for (solitonId, builder) in builders.into_iter() { match attribute_map.entry(solitonId) { Entry::Vacant(entry) => { // Validate once… builder.validate_install_attribute().context(DbErrorKind::BadSchemaReplicantAssertion(format!("SchemaReplicant alteration for new attribute with solitonId {} is not valid", solitonId)))?; // … and twice, now we have the Attribute. let a = builder.build(); a.validate(|| solitonId.to_string())?; entry.insert(a); attributes_installed.insert(solitonId); }, Entry::Occupied(mut entry) => { builder.validate_alter_attribute().context(DbErrorKind::BadSchemaReplicantAssertion(format!("SchemaReplicant alteration for existing attribute with solitonId {} is not valid", solitonId)))?; let mutations = builder.mutate(entry.get_mut()); attributes_altered.insert(solitonId, mutations); }, } } Ok(SpacetimeReport { attributes_installed: attributes_installed, attributes_altered: attributes_altered, causetIds_altered: BTreeMap::default(), }) } // / Update a `SchemaReplicant` in place from the given `[e a typed_value added]` quadruples. /// /// This layer enforces that causetid assertions of the form [solitonId :edb/causetid...] (as distinct from /// attribute assertions) are present and correct. /// /// This is suiBlock for mutating a `SchemaReplicant` from an applied transaction. /// /// Returns a report summarizing the mutations that were applied. pub fn update_schemaReplicant_from_causetid_quadruples<U>(schemaReplicant: &mut SchemaReplicant, assertions: U) -> Result<SpacetimeReport> where U: IntoIterator<Item=(SolitonId, SolitonId, MinkowskiType, bool)> { // Group attribute assertions into asserted, retracted, and updated. We assume all our // attribute assertions are :edb/cardinality :edb.cardinality/one (so they'll only be added or // retracted at most once), which means all attribute alterations are simple changes from an old // value to a new value. let mut attribute_set: AddRetractAlterSet<(SolitonId, SolitonId), MinkowskiType> = AddRetractAlterSet::default(); let mut causetId_set: AddRetractAlterSet<SolitonId, symbols::Keyword> = AddRetractAlterSet::default(); for (e, a, typed_value, added) in assertions.into_iter() { // Here we handle :edb/causetid assertions. if a == causetids::DB_CausetID { if let MinkowskiType::Keyword(ref keyword) = typed_value { causetId_set.witness(e, keyword.as_ref().clone(), added); continue } else { // Something is terribly wrong: the schemaReplicant ensures we have a keyword. unreachable!(); } } attribute_set.witness((e, a), typed_value, added); } // Collect triples. let retracted_triples = attribute_set.retracted.into_iter().map(|((e, a), typed_value)| (e, a, typed_value)); let asserted_triples = attribute_set.asserted.into_iter().map(|((e, a), typed_value)| (e, a, typed_value)); let altered_triples = attribute_set.altered.into_iter().map(|((e, a), (_old_value, new_value))| (e, a, new_value)); // First we process retractions which remove schemaReplicant. // This operation consumes our current list of attribute retractions, producing a filtered one. let non_schemaReplicant_retractions = update_attribute_map_from_schemaReplicant_retractions(&mut schemaReplicant.attribute_map, retracted_triples.collect(), &causetId_set.retracted)?; // Now we process all other retractions. let report = update_attribute_map_from_causetid_triples(&mut schemaReplicant.attribute_map, asserted_triples.chain(altered_triples).collect(), non_schemaReplicant_retractions)?; let mut causetIds_altered: BTreeMap<SolitonId, CausetIdAlteration> = BTreeMap::new(); // Asserted, altered, or retracted :edb/causetIds update the relevant causetids. for (solitonId, causetid) in causetId_set.asserted { schemaReplicant.causetid_map.insert(solitonId, causetid.clone()); schemaReplicant.causetId_map.insert(causetid.clone(), solitonId); causetIds_altered.insert(solitonId, CausetIdAlteration::CausetId(causetid.clone())); } for (solitonId, (old_causetId, new_causetId)) in causetId_set.altered { schemaReplicant.causetid_map.insert(solitonId, new_causetId.clone()); // Overwrite existing. schemaReplicant.causetId_map.remove(&old_causetId); // Remove old. schemaReplicant.causetId_map.insert(new_causetId.clone(), solitonId); // Insert new. causetIds_altered.insert(solitonId, CausetIdAlteration::CausetId(new_causetId.clone())); } for (solitonId, causetid) in &causetId_set.retracted { schemaReplicant.causetid_map.remove(solitonId); schemaReplicant.causetId_map.remove(causetid); causetIds_altered.insert(*solitonId, CausetIdAlteration::CausetId(causetid.clone())); } // Component attributes need to change if either: // - a component attribute changed // - a schemaReplicant attribute that was a component was retracted // These two checks are a rather heavy-handed way of keeping schemaReplicant's // component_attributes up-to-date: most of the time we'll rebuild it // even though it's not necessary (e.g. a schemaReplicant attribute that's _not_ // a component was removed, or a non-component related attribute changed). if report.attributes_did_change() || causetId_set.retracted.len() > 0 { schemaReplicant.update_component_attributes(); } Ok(SpacetimeReport { causetIds_altered: causetIds_
{ fn attribute_builder_to_modify(attribute_id: SolitonId, existing: &AttributeMap) -> AttributeBuilder { existing.get(&attribute_id) .map(AttributeBuilder::to_modify_attribute) .unwrap_or_else(AttributeBuilder::default) } // Group mutations by impacted solitonId. let mut builders: BTreeMap<SolitonId, AttributeBuilder> = BTreeMap::new(); // For retractions, we start with an attribute builder that's pre-populated with the existing // attribute values. That allows us to check existing values and unset them. for (solitonId, attr, ref value) in retractions { let builder = builders.entry(solitonId).or_insert_with(|| attribute_builder_to_modify(solitonId, attribute_map)); match attr { // You can only retract :edb/unique, :edb/isComponent; all others must be altered instead // of retracted, or are not allowed to change. causetids::DB_IS_COMPONENT => { match value { &MinkowskiType::Boolean(v) if builder.component == Some(v) => {
identifier_body
spacetime.rs
// // Licensed under the Apache License, Version 2.0 (the "License"); you may not use // this file except in compliance with the License. You may obtain a copy of the // License at http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #![allow(dead_code)] //! Most bundles can mutate the EinsteinDB spacetime by transacting assertions: //! //! - they can add (and, eventually, retract and alter) recognized causetIds using the `:edb/causetid` //! attribute; //! //! - they can add (and, eventually, retract and alter) schemaReplicant attributes using various `:edb/*` //! attributes; //! //! - eventually, they will be able to add (and possibly retract) solitonId partitions using a EinsteinDB //! equivalent (perhaps :edb/partition or :edb.partition/start) to Causetic's `:edb.install/partition` //! attribute. //! //! This module recognizes, validates, applies, and reports on these mutations. use failure::ResultExt; use std::collections::{BTreeMap, BTreeSet}; use std::collections::btree_map::Entry; use add_retract_alter_set::{ AddRetractAlterSet, }; use edbn::symbols; use causetids; use causetq_pull_promises::errors::{ DbErrorKind, Result, }; use allegrosql_promises::{ attribute, SolitonId, MinkowskiType, MinkowskiValueType, }; use causetq_allegrosql::{ SchemaReplicant, AttributeMap, }; use schemaReplicant::{ AttributeBuilder, AttributeValidation, }; use types::{ EAV, }; /// An alteration to an attribute. #[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)] pub enum AttributeAlteration { /// From http://blog.Causetic.com/2014/01/schemaReplicant-alteration.html: /// - rename attributes /// - rename your own programmatic causetIdities (uses of :edb/causetid) /// - add or remove indexes Index, /// - add or remove uniqueness constraints Unique, /// - change attribute cardinality Cardinality, /// - change whether history is retained for an attribute NoHistory, /// - change whether an attribute is treated as a component IsComponent, } /// An alteration to an causetid. #[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)] pub enum CausetIdAlteration { CausetId(symbols::Keyword), } /// Summarizes changes to spacetime such as a a `SchemaReplicant` and (in the future) a `PartitionMap`. #[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)] pub struct SpacetimeReport { // SolitonIds that were not present in the original `AttributeMap` that was mutated. pub attributes_installed: BTreeSet<SolitonId>, // SolitonIds that were present in the original `AttributeMap` that was mutated, together with a // representation of the mutations that were applied. pub attributes_altered: BTreeMap<SolitonId, Vec<AttributeAlteration>>, // CausetIds that were installed into the `AttributeMap`. pub causetIds_altered: BTreeMap<SolitonId, CausetIdAlteration>, } impl SpacetimeReport { pub fn attributes_did_change(&self) -> bool { !(self.attributes_installed.is_empty() && self.attributes_altered.is_empty()) } } /// Update an 'AttributeMap' in place given two sets of causetid and attribute retractions, which /// together contain enough information to reason about a "schemaReplicant retraction". /// /// SchemaReplicant may only be retracted if all of its necessary attributes are being retracted: /// - :edb/causetid, :edb/valueType, :edb/cardinality. /// /// Note that this is currently incomplete/flawed: /// - we're allowing optional attributes to not be retracted and dangle afterwards /// /// Returns a set of attribute retractions which do not involve schemaReplicant-defining attributes. fn update_attribute_map_from_schemaReplicant_retractions(attribute_map: &mut AttributeMap, retractions: Vec<EAV>, causetId_retractions: &BTreeMap<SolitonId, symbols::Keyword>) -> Result<Vec<EAV>> { // Process retractions of schemaReplicant attributes first. It's allowed to retract a schemaReplicant attribute // if all of the schemaReplicant-defining schemaReplicant attributes are being retracted. // A defining set of attributes is :edb/causetid, :edb/valueType, :edb/cardinality. let mut filtered_retractions = vec![]; let mut suspect_retractions = vec![]; // Filter out sets of schemaReplicant altering retractions. let mut eas = BTreeMap::new(); for (e, a, v) in retractions.into_iter() { if causetids::is_a_schemaReplicant_attribute(a) { eas.entry(e).or_insert(vec![]).push(a); suspect_retractions.push((e, a, v)); } else { filtered_retractions.push((e, a, v)); } } // TODO (see https://github.com/whtcorpsinc/edb/issues/796). // Retraction of causetIds is allowed, but if an causetid names a schemaReplicant attribute, then we should enforce // retraction of all of the associated schemaReplicant attributes. // Unfortunately, our current in-memory schemaReplicant representation (namely, how we define an Attribute) is not currently // rich enough: it lacks distinction between presence and absence, and instead assumes default values. // Currently, in order to do this enforcement correctly, we'd need to inspect 'causets'. // Here is an incorrect way to enforce this. It's incorrect because it prevents us from retracting non-"schemaReplicant naming" causetIds. // for retracted_e in causetId_retractions.keys() { // if!eas.contains_key(retracted_e) { // bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting :edb/causetid of a schemaReplicant without retracting its defining attributes is not permitted."))); // } // } for (e, a, v) in suspect_retractions.into_iter() { let attributes = eas.get(&e).unwrap(); // Found a set of retractions which negate a schemaReplicant. if attributes.contains(&causetids::DB_CARDINALITY) && attributes.contains(&causetids::DB_VALUE_TYPE) { // Ensure that corresponding :edb/causetid is also being retracted at the same time. if causetId_retractions.contains_key(&e) { // Remove attributes corresponding to retracted attribute. attribute_map.remove(&e); } else { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting defining attributes of a schemaReplicant without retracting its :edb/causetid is not permitted."))); } } else { filtered_retractions.push((e, a, v)); } } Ok(filtered_retractions) } /// Update a `AttributeMap` in place from the given `[e a typed_value]` triples. /// /// This is suiBlock for producing a `AttributeMap` from the `schemaReplicant` materialized view, which does not /// contain install and alter markers. /// /// Returns a report summarizing the mutations that were applied. pub fn update_attribute_map_from_causetid_triples(attribute_map: &mut AttributeMap, assertions: Vec<EAV>, retractions: Vec<EAV>) -> Result<SpacetimeReport> { fn attribute_builder_to_modify(attribute_id: SolitonId, existing: &AttributeMap) -> AttributeBuilder { existing.get(&attribute_id) .map(AttributeBuilder::to_modify_attribute) .unwrap_or_else(AttributeBuilder::default) } // Group mutations by impacted solitonId. let mut builders: BTreeMap<SolitonId, AttributeBuilder> = BTreeMap::new(); // For retractions, we start with an attribute builder that's pre-populated with the existing // attribute values. That allows us to check existing values and unset them. for (solitonId, attr, ref value) in retractions { let builder = builders.entry(solitonId).or_insert_with(|| attribute_builder_to_modify(solitonId, attribute_map)); match attr { // You can only retract :edb/unique, :edb/isComponent; all others must be altered instead // of retracted, or are not allowed to change. causetids::DB_IS_COMPONENT => { match value { &MinkowskiType::Boolean(v) if builder.component == Some(v) => { builder.component(false); }, v => { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/isComponent with the wrong value {:?}.", v))); }, } }, causetids::DB_UNIQUE => { match *value { MinkowskiType::Ref(u) => { match u { causetids::DB_UNIQUE_VALUE if builder.unique == Some(Some(attribute::Unique::Value)) => { builder.non_unique(); }, causetids::DB_UNIQUE_CausetIDITY if builder.unique == Some(Some(attribute::Unique::CausetIdity)) => { builder.non_unique(); }, v => { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Attempted to retract :edb/unique with the wrong value {}.", v))); }, } }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [:edb/retract _ :edb/unique :edb.unique/_] but got [:edb/retract {} :edb/unique {:?}]", solitonId, value))) } }, causetids::DB_VALUE_TYPE | causetids::DB_CARDINALITY | causetids::DB_INDEX | causetids::DB_FULLTEXT | causetids::DB_NO_HISTORY => { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Retracting attribute {} for instanton {} not permitted.", attr, solitonId))); }, _ => { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Do not recognize attribute {} for solitonId {}", attr, solitonId))) } } } for (solitonId, attr, ref value) in assertions.into_iter() { // For assertions, we can start with an empty attribute builder. let builder = builders.entry(solitonId).or_insert_with(Default::default); // TODO: improve error messages throughout. match attr { causetids::DB_VALUE_TYPE => { match *value { MinkowskiType::Ref(causetids::DB_TYPE_BOOLEAN) => { builder.value_type(MinkowskiValueType::Boolean); }, MinkowskiType::Ref(causetids::DB_TYPE_DOUBLE) => { builder.value_type(MinkowskiValueType::Double); }, MinkowskiType::Ref(causetids::DB_TYPE_INSTANT) => { builder.value_type(MinkowskiValueType::Instant); }, MinkowskiType::Ref(causetids::DB_TYPE_KEYWORD) => { builder.value_type(MinkowskiValueType::Keyword); }, MinkowskiType::Ref(causetids::DB_TYPE_LONG) => { builder.value_type(MinkowskiValueType::Long); }, MinkowskiType::Ref(causetids::DB_TYPE_REF) => { builder.value_type(MinkowskiValueType::Ref); }, MinkowskiType::Ref(causetids::DB_TYPE_STRING) => { builder.value_type(MinkowskiValueType::String); }, MinkowskiType::Ref(causetids::DB_TYPE_UUID) => { builder.value_type(MinkowskiValueType::Uuid); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/valueType :edb.type/*] but got [... :edb/valueType {:?}] for solitonId {} and attribute {}", value, solitonId, attr))) } }, causetids::DB_CARDINALITY => { match *value { MinkowskiType::Ref(causetids::DB_CARDINALITY_MANY) => { builder.multival(true); }, MinkowskiType::Ref(causetids::DB_CARDINALITY_ONE) => { builder.multival(false); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/cardinality :edb.cardinality/many|:edb.cardinality/one] but got [... :edb/cardinality {:?}]", value))) } }, causetids::DB_UNIQUE => { match *value { MinkowskiType::Ref(causetids::DB_UNIQUE_VALUE) => { builder.unique(attribute::Unique::Value); }, MinkowskiType::Ref(causetids::DB_UNIQUE_CausetIDITY) => { builder.unique(attribute::Unique::CausetIdity); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/unique :edb.unique/value|:edb.unique/causetIdity] but got [... :edb/unique {:?}]", value))) } }, causetids::DB_INDEX => { match *value { MinkowskiType::Boolean(x) => { builder.index(x); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/index true|false] but got [... :edb/index {:?}]", value))) } }, causetids::DB_FULLTEXT => { match *value { MinkowskiType::Boolean(x) => { builder.fulltext(x); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/fulltext true|false] but got [... :edb/fulltext {:?}]", value))) } }, causetids::DB_IS_COMPONENT => { match *value { MinkowskiType::Boolean(x) => { builder.component(x); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/isComponent true|false] but got [... :edb/isComponent {:?}]", value))) } }, causetids::DB_NO_HISTORY => { match *value { MinkowskiType::Boolean(x) => { builder.no_history(x); }, _ => bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Expected [... :edb/noHistory true|false] but got [... :edb/noHistory {:?}]", value))) } }, _ => { bail!(DbErrorKind::BadSchemaReplicantAssertion(format!("Do not recognize attribute {} for solitonId {}", attr, solitonId))) } } }; let mut attributes_installed: BTreeSet<SolitonId> = BTreeSet::default(); let mut attributes_altered: BTreeMap<SolitonId, Vec<AttributeAlteration>> = BTreeMap::default(); for (solitonId, builder) in builders.into_iter() { match attribute_map.entry(solitonId) { Entry::Vacant(entry) => { // Validate once… builder.validate_install_attribute().context(DbErrorKind::BadSchemaReplicantAssertion(format!("SchemaReplicant alteration for new attribute with solitonId {} is not valid", solitonId)))?; // … and twice, now we have the Attribute. let a = builder.build(); a.validate(|| solitonId.to_string())?; entry.insert(a); attributes_installed.insert(solitonId); }, Entry::Occupied(mut entry) => {
} } Ok(SpacetimeReport { attributes_installed: attributes_installed, attributes_altered: attributes_altered, causetIds_altered: BTreeMap::default(), }) } /// Update a `SchemaReplicant` in place from the given `[e a typed_value added]` quadruples. /// /// This layer enforces that causetid assertions of the form [solitonId :edb/causetid...] (as distinct from /// attribute assertions) are present and correct. /// /// This is suiBlock for mutating a `SchemaReplicant` from an applied transaction. /// /// Returns a report summarizing the mutations that were applied. pub fn update_schemaReplicant_from_causetid_quadruples<U>(schemaReplicant: &mut SchemaReplicant, assertions: U) -> Result<SpacetimeReport> where U: IntoIterator<Item=(SolitonId, SolitonId, MinkowskiType, bool)> { // Group attribute assertions into asserted, retracted, and updated. We assume all our // attribute assertions are :edb/cardinality :edb.cardinality/one (so they'll only be added or // retracted at most once), which means all attribute alterations are simple changes from an old // value to a new value. let mut attribute_set: AddRetractAlterSet<(SolitonId, SolitonId), MinkowskiType> = AddRetractAlterSet::default(); let mut causetId_set: AddRetractAlterSet<SolitonId, symbols::Keyword> = AddRetractAlterSet::default(); for (e, a, typed_value, added) in assertions.into_iter() { // Here we handle :edb/causetid assertions. if a == causetids::DB_CausetID { if let MinkowskiType::Keyword(ref keyword) = typed_value { causetId_set.witness(e, keyword.as_ref().clone(), added); continue } else { // Something is terribly wrong: the schemaReplicant ensures we have a keyword. unreachable!(); } } attribute_set.witness((e, a), typed_value, added); } // Collect triples. let retracted_triples = attribute_set.retracted.into_iter().map(|((e, a), typed_value)| (e, a, typed_value)); let asserted_triples = attribute_set.asserted.into_iter().map(|((e, a), typed_value)| (e, a, typed_value)); let altered_triples = attribute_set.altered.into_iter().map(|((e, a), (_old_value, new_value))| (e, a, new_value)); // First we process retractions which remove schemaReplicant. // This operation consumes our current list of attribute retractions, producing a filtered one. let non_schemaReplicant_retractions = update_attribute_map_from_schemaReplicant_retractions(&mut schemaReplicant.attribute_map, retracted_triples.collect(), &causetId_set.retracted)?; // Now we process all other retractions. let report = update_attribute_map_from_causetid_triples(&mut schemaReplicant.attribute_map, asserted_triples.chain(altered_triples).collect(), non_schemaReplicant_retractions)?; let mut causetIds_altered: BTreeMap<SolitonId, CausetIdAlteration> = BTreeMap::new(); // Asserted, altered, or retracted :edb/causetIds update the relevant causetids. for (solitonId, causetid) in causetId_set.asserted { schemaReplicant.causetid_map.insert(solitonId, causetid.clone()); schemaReplicant.causetId_map.insert(causetid.clone(), solitonId); causetIds_altered.insert(solitonId, CausetIdAlteration::CausetId(causetid.clone())); } for (solitonId, (old_causetId, new_causetId)) in causetId_set.altered { schemaReplicant.causetid_map.insert(solitonId, new_causetId.clone()); // Overwrite existing. schemaReplicant.causetId_map.remove(&old_causetId); // Remove old. schemaReplicant.causetId_map.insert(new_causetId.clone(), solitonId); // Insert new. causetIds_altered.insert(solitonId, CausetIdAlteration::CausetId(new_causetId.clone())); } for (solitonId, causetid) in &causetId_set.retracted { schemaReplicant.causetid_map.remove(solitonId); schemaReplicant.causetId_map.remove(causetid); causetIds_altered.insert(*solitonId, CausetIdAlteration::CausetId(causetid.clone())); } // Component attributes need to change if either: // - a component attribute changed // - a schemaReplicant attribute that was a component was retracted // These two checks are a rather heavy-handed way of keeping schemaReplicant's // component_attributes up-to-date: most of the time we'll rebuild it // even though it's not necessary (e.g. a schemaReplicant attribute that's _not_ // a component was removed, or a non-component related attribute changed). if report.attributes_did_change() || causetId_set.retracted.len() > 0 { schemaReplicant.update_component_attributes(); } Ok(SpacetimeReport { causetIds_altered: causetIds_altered
builder.validate_alter_attribute().context(DbErrorKind::BadSchemaReplicantAssertion(format!("SchemaReplicant alteration for existing attribute with solitonId {} is not valid", solitonId)))?; let mutations = builder.mutate(entry.get_mut()); attributes_altered.insert(solitonId, mutations); },
conditional_block
init.rs
// Copyright 2019-2023 Tauri Programme within The Commons Conservancy // SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: MIT use super::{get_app, Target}; use crate::helpers::{config::get as get_tauri_config, template::JsonMap}; use crate::Result; use handlebars::{Context, Handlebars, Helper, HelperResult, Output, RenderContext, RenderError}; use tauri_mobile::{ android::{ config::Config as AndroidConfig, env::Env as AndroidEnv, target::Target as AndroidTarget, }, config::app::App, dot_cargo, target::TargetTrait as _, util::{ self, cli::{Report, TextWrapper}, }, }; use std::{ env::{current_dir, var, var_os}, path::PathBuf, }; pub fn command( target: Target, ci: bool, reinstall_deps: bool, skip_targets_install: bool, ) -> Result<()> { let wrapper = TextWrapper::with_splitter(textwrap::termwidth(), textwrap::NoHyphenation); exec( target, &wrapper, ci || var_os("CI").is_some(), reinstall_deps, skip_targets_install, ) .map_err(|e| anyhow::anyhow!("{:#}", e))?; Ok(()) } pub fn configure_cargo( app: &App, android: Option<(&mut AndroidEnv, &AndroidConfig)>, ) -> Result<()> { if let Some((env, config)) = android { for target in AndroidTarget::all().values() { let config = target.generate_cargo_config(config, env)?; let target_var_name = target.triple.replace('-', "_").to_uppercase(); if let Some(linker) = config.linker { env.base.insert_env_var( format!("CARGO_TARGET_{target_var_name}_LINKER"), linker.into(), ); } env.base.insert_env_var( format!("CARGO_TARGET_{target_var_name}_RUSTFLAGS"), config.rustflags.join(" ").into(), ); } } let mut dot_cargo = dot_cargo::DotCargo::load(app)?; // Mysteriously, builds that don't specify `--target` seem to fight over // the build cache with builds that use `--target`! This means that // alternating between i.e. `cargo run` and `cargo apple run` would // result in clean builds being made each time you switched... which is // pretty nightmarish. Specifying `build.target` in `.cargo/config` // fortunately has the same effect as specifying `--target`, so now we can // `cargo run` with peace of mind! // // This behavior could be explained here: // https://doc.rust-lang.org/cargo/reference/config.html#buildrustflags dot_cargo.set_default_target(util::host_target_triple()?); dot_cargo.write(app).map_err(Into::into) } pub fn exec( target: Target, wrapper: &TextWrapper, #[allow(unused_variables)] non_interactive: bool, #[allow(unused_variables)] reinstall_deps: bool, skip_targets_install: bool, ) -> Result<App> { let current_dir = current_dir()?; let tauri_config = get_tauri_config(None)?; let tauri_config_guard = tauri_config.lock().unwrap(); let tauri_config_ = tauri_config_guard.as_ref().unwrap(); let app = get_app(tauri_config_); let (handlebars, mut map) = handlebars(&app); let mut args = std::env::args_os(); let mut binary = args .next() .map(|bin| { let path = PathBuf::from(&bin); if path.exists() { let absolute_path = util::prefix_path(&current_dir, path); return absolute_path.into(); } bin }) .unwrap_or_else(|| std::ffi::OsString::from("cargo")); let mut build_args = Vec::new(); for arg in args { let path = PathBuf::from(&arg); if path.exists() { let absolute_path = util::prefix_path(&current_dir, path); build_args.push(absolute_path.to_string_lossy().into_owned()); continue; } build_args.push(arg.to_string_lossy().into_owned()); if arg == "android" || arg == "ios" { break; } } build_args.push(target.ide_build_script_name().into()); let binary_path = PathBuf::from(&binary); let bin_stem = binary_path.file_stem().unwrap().to_string_lossy(); let r = regex::Regex::new("(nodejs|node)\\-?([1-9]*)*$").unwrap(); if r.is_match(&bin_stem) { if let Some(npm_execpath) = var_os("npm_execpath").map(PathBuf::from) { let manager_stem = npm_execpath.file_stem().unwrap().to_os_string(); let is_npm = manager_stem == "npm-cli"; let is_npx = manager_stem == "npx-cli"; binary = if is_npm { "npm".into() } else if is_npx { "npx".into() } else { manager_stem }; if!(build_args.is_empty() || is_npx) { // remove script path, we'll use `npm_lifecycle_event` instead build_args.remove(0); } if is_npm { build_args.insert(0, "--".into()); } if!is_npx { build_args.insert(0, var("npm_lifecycle_event").unwrap()); } if is_npm { build_args.insert(0, "run".into()); } } } map.insert("tauri-binary", binary.to_string_lossy()); map.insert("tauri-binary-args", &build_args); map.insert("tauri-binary-args-str", build_args.join(" ")); let app = match target { // Generate Android Studio project Target::Android => match AndroidEnv::new() { Ok(_env) => { let app = get_app(tauri_config_); let (config, metadata) = super::android::get_config(&app, tauri_config_, &Default::default()); map.insert("android", &config); super::android::project::gen( &config, &metadata, (handlebars, map), wrapper, skip_targets_install, )?; app } Err(err) => { if err.sdk_or_ndk_issue() { Report::action_request( " to initialize Android environment; Android support won't be usable until you fix the issue below and re-run `tauri android init`!", err, ) .print(wrapper); app } else { return Err(err.into()); } } }, #[cfg(target_os = "macos")] // Generate Xcode project Target::Ios => { let (config, metadata) = super::ios::get_config(&app, tauri_config_, &Default::default()); map.insert("apple", &config); super::ios::project::gen( &config, &metadata, (handlebars, map), wrapper, non_interactive, reinstall_deps, skip_targets_install, )?; app } }; Report::victory( "Project generated successfully!", "Make cool apps! 🌻 🐕 🎉", ) .print(wrapper); Ok(app) } fn handlebars(app: &App) -> (Handlebars<'static>, JsonMap) { let mut h = Handlebars::new(); h.register_escape_fn(handlebars::no_escape); h.register_helper("html-escape", Box::new(html_escape)); h.register_helper("join", Box::new(join)); h.register_helper("quote-and-join", Box::new(quote_and_join)); h.register_helper( "quote-and-join-colon-prefix", Box::new(quote_and_join_colon_prefix), ); h.register_helper("snake-case", Box::new(snake_case)); h.register_helper("reverse-domain", Box::new(reverse_domain)); h.register_helper( "reverse-domain-snake-case", Box::new(reverse_domain_snake_case), ); // don't mix these up or very bad things will happen to all of us h.register_helper("prefix-path", Box::new(prefix_path)); h.register_helper("unprefix-path", Box::new(unprefix_path)); let mut map = JsonMap::default(); map.insert("app", app); (h, map) } fn get_str<'a>(helper: &'a Helper) -> &'a str { helper .param(0) .and_then(|v| v.value().as_str()) .unwrap_or("") } fn get_str_array(helper: &Helper, formatter: impl Fn(&str) -> String) -> Option<Vec<String>> { helper.param(0).and_then(|v| { v.value().as_array().and_then(|arr| { arr .iter() .map(|val| { val.as_str().map( #[allow(clippy::redundant_closure)] |s| formatter(s), ) }) .collect() }) }) } fn html_escape( helper: &Helper, _: &Handlebars, _ctx: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write(&handlebars::html_escape(get_str(helper))) .map_err(Into::into) } fn join( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write( &get_str_array(helper, |s| s.to_string()) .ok_or_else(|| RenderError::new("`join` helper wasn't given an array"))? .join(", "), ) .map_err(Into::into) } fn quote_and_join( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write( &get_str_array(helper, |s| format!("{s:?}")) .ok_or_else(|| RenderError::new("`quote-and-join` helper wasn't given an array"))? .join(", "), ) .map_err(Into::into) } fn quote_and_join_colon_prefix( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write( &get_str_array(helper, |s| format!("{:?}", format!(":{s}"))) .ok_or_else(|| { RenderError::new("`quote-and-join-colon-prefix` helper wasn't given an array") })? .join(", "), ) .map_err(Into::into) } fn snake_case( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { use heck::ToSnekCase as _; out .write(&get_str(helper).to_snek_case()) .map_err(Into::into) } fn reverse_domain( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write(&util::reverse_domain(get_str(helper))) .map_err(Into::into) } fn reverse_domain_snake_case( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { use heck::ToSnekCase as _; out .write(&util::reverse_domain(get_str(helper)).to_snek_case()) .map_err(Into::into) } fn app_root(ctx: &Context) -> Result<&str, RenderError> { let app_root = ctx .data() .get("app") .ok_or_else(|| RenderError::new("`app` missing from template data."))? .get("root-dir") .ok_or_else(|| RenderError::new("`app.root-dir` missing from template data."))?; app_root .as_str() .ok_or_else(|| RenderError::new("`app.root-dir` contained invalid UTF-8.")) } fn prefix_path( helper: &Helper, _: &Handlebars, ctx: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write( util::prefix_path(app_root(ctx)?, get_str(helper)) .to_str() .ok_or_else(|| { RenderError::new( "Either the `app.root-dir` or the specified path contained invalid UTF-8.", ) })?, ) .map_err(Into::into) } fn unprefix_
r: &Helper, _: &Handlebars, ctx: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write( util::unprefix_path(app_root(ctx)?, get_str(helper)) .map_err(|_| { RenderError::new("Attempted to unprefix a path that wasn't in the app root dir.") })? .to_str() .ok_or_else(|| { RenderError::new( "Either the `app.root-dir` or the specified path contained invalid UTF-8.", ) })?, ) .map_err(Into::into) }
path( helpe
identifier_name
init.rs
// Copyright 2019-2023 Tauri Programme within The Commons Conservancy // SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: MIT use super::{get_app, Target}; use crate::helpers::{config::get as get_tauri_config, template::JsonMap}; use crate::Result; use handlebars::{Context, Handlebars, Helper, HelperResult, Output, RenderContext, RenderError}; use tauri_mobile::{ android::{ config::Config as AndroidConfig, env::Env as AndroidEnv, target::Target as AndroidTarget, }, config::app::App, dot_cargo, target::TargetTrait as _, util::{ self, cli::{Report, TextWrapper}, }, }; use std::{ env::{current_dir, var, var_os}, path::PathBuf, }; pub fn command( target: Target, ci: bool, reinstall_deps: bool, skip_targets_install: bool, ) -> Result<()> { let wrapper = TextWrapper::with_splitter(textwrap::termwidth(), textwrap::NoHyphenation); exec( target, &wrapper, ci || var_os("CI").is_some(), reinstall_deps, skip_targets_install, ) .map_err(|e| anyhow::anyhow!("{:#}", e))?; Ok(()) } pub fn configure_cargo( app: &App, android: Option<(&mut AndroidEnv, &AndroidConfig)>, ) -> Result<()> { if let Some((env, config)) = android { for target in AndroidTarget::all().values() { let config = target.generate_cargo_config(config, env)?; let target_var_name = target.triple.replace('-', "_").to_uppercase(); if let Some(linker) = config.linker { env.base.insert_env_var( format!("CARGO_TARGET_{target_var_name}_LINKER"), linker.into(), ); } env.base.insert_env_var( format!("CARGO_TARGET_{target_var_name}_RUSTFLAGS"), config.rustflags.join(" ").into(), ); } } let mut dot_cargo = dot_cargo::DotCargo::load(app)?; // Mysteriously, builds that don't specify `--target` seem to fight over // the build cache with builds that use `--target`! This means that // alternating between i.e. `cargo run` and `cargo apple run` would // result in clean builds being made each time you switched... which is // pretty nightmarish. Specifying `build.target` in `.cargo/config` // fortunately has the same effect as specifying `--target`, so now we can // `cargo run` with peace of mind! // // This behavior could be explained here: // https://doc.rust-lang.org/cargo/reference/config.html#buildrustflags dot_cargo.set_default_target(util::host_target_triple()?); dot_cargo.write(app).map_err(Into::into) }
#[allow(unused_variables)] non_interactive: bool, #[allow(unused_variables)] reinstall_deps: bool, skip_targets_install: bool, ) -> Result<App> { let current_dir = current_dir()?; let tauri_config = get_tauri_config(None)?; let tauri_config_guard = tauri_config.lock().unwrap(); let tauri_config_ = tauri_config_guard.as_ref().unwrap(); let app = get_app(tauri_config_); let (handlebars, mut map) = handlebars(&app); let mut args = std::env::args_os(); let mut binary = args .next() .map(|bin| { let path = PathBuf::from(&bin); if path.exists() { let absolute_path = util::prefix_path(&current_dir, path); return absolute_path.into(); } bin }) .unwrap_or_else(|| std::ffi::OsString::from("cargo")); let mut build_args = Vec::new(); for arg in args { let path = PathBuf::from(&arg); if path.exists() { let absolute_path = util::prefix_path(&current_dir, path); build_args.push(absolute_path.to_string_lossy().into_owned()); continue; } build_args.push(arg.to_string_lossy().into_owned()); if arg == "android" || arg == "ios" { break; } } build_args.push(target.ide_build_script_name().into()); let binary_path = PathBuf::from(&binary); let bin_stem = binary_path.file_stem().unwrap().to_string_lossy(); let r = regex::Regex::new("(nodejs|node)\\-?([1-9]*)*$").unwrap(); if r.is_match(&bin_stem) { if let Some(npm_execpath) = var_os("npm_execpath").map(PathBuf::from) { let manager_stem = npm_execpath.file_stem().unwrap().to_os_string(); let is_npm = manager_stem == "npm-cli"; let is_npx = manager_stem == "npx-cli"; binary = if is_npm { "npm".into() } else if is_npx { "npx".into() } else { manager_stem }; if!(build_args.is_empty() || is_npx) { // remove script path, we'll use `npm_lifecycle_event` instead build_args.remove(0); } if is_npm { build_args.insert(0, "--".into()); } if!is_npx { build_args.insert(0, var("npm_lifecycle_event").unwrap()); } if is_npm { build_args.insert(0, "run".into()); } } } map.insert("tauri-binary", binary.to_string_lossy()); map.insert("tauri-binary-args", &build_args); map.insert("tauri-binary-args-str", build_args.join(" ")); let app = match target { // Generate Android Studio project Target::Android => match AndroidEnv::new() { Ok(_env) => { let app = get_app(tauri_config_); let (config, metadata) = super::android::get_config(&app, tauri_config_, &Default::default()); map.insert("android", &config); super::android::project::gen( &config, &metadata, (handlebars, map), wrapper, skip_targets_install, )?; app } Err(err) => { if err.sdk_or_ndk_issue() { Report::action_request( " to initialize Android environment; Android support won't be usable until you fix the issue below and re-run `tauri android init`!", err, ) .print(wrapper); app } else { return Err(err.into()); } } }, #[cfg(target_os = "macos")] // Generate Xcode project Target::Ios => { let (config, metadata) = super::ios::get_config(&app, tauri_config_, &Default::default()); map.insert("apple", &config); super::ios::project::gen( &config, &metadata, (handlebars, map), wrapper, non_interactive, reinstall_deps, skip_targets_install, )?; app } }; Report::victory( "Project generated successfully!", "Make cool apps! 🌻 🐕 🎉", ) .print(wrapper); Ok(app) } fn handlebars(app: &App) -> (Handlebars<'static>, JsonMap) { let mut h = Handlebars::new(); h.register_escape_fn(handlebars::no_escape); h.register_helper("html-escape", Box::new(html_escape)); h.register_helper("join", Box::new(join)); h.register_helper("quote-and-join", Box::new(quote_and_join)); h.register_helper( "quote-and-join-colon-prefix", Box::new(quote_and_join_colon_prefix), ); h.register_helper("snake-case", Box::new(snake_case)); h.register_helper("reverse-domain", Box::new(reverse_domain)); h.register_helper( "reverse-domain-snake-case", Box::new(reverse_domain_snake_case), ); // don't mix these up or very bad things will happen to all of us h.register_helper("prefix-path", Box::new(prefix_path)); h.register_helper("unprefix-path", Box::new(unprefix_path)); let mut map = JsonMap::default(); map.insert("app", app); (h, map) } fn get_str<'a>(helper: &'a Helper) -> &'a str { helper .param(0) .and_then(|v| v.value().as_str()) .unwrap_or("") } fn get_str_array(helper: &Helper, formatter: impl Fn(&str) -> String) -> Option<Vec<String>> { helper.param(0).and_then(|v| { v.value().as_array().and_then(|arr| { arr .iter() .map(|val| { val.as_str().map( #[allow(clippy::redundant_closure)] |s| formatter(s), ) }) .collect() }) }) } fn html_escape( helper: &Helper, _: &Handlebars, _ctx: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write(&handlebars::html_escape(get_str(helper))) .map_err(Into::into) } fn join( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write( &get_str_array(helper, |s| s.to_string()) .ok_or_else(|| RenderError::new("`join` helper wasn't given an array"))? .join(", "), ) .map_err(Into::into) } fn quote_and_join( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write( &get_str_array(helper, |s| format!("{s:?}")) .ok_or_else(|| RenderError::new("`quote-and-join` helper wasn't given an array"))? .join(", "), ) .map_err(Into::into) } fn quote_and_join_colon_prefix( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write( &get_str_array(helper, |s| format!("{:?}", format!(":{s}"))) .ok_or_else(|| { RenderError::new("`quote-and-join-colon-prefix` helper wasn't given an array") })? .join(", "), ) .map_err(Into::into) } fn snake_case( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { use heck::ToSnekCase as _; out .write(&get_str(helper).to_snek_case()) .map_err(Into::into) } fn reverse_domain( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write(&util::reverse_domain(get_str(helper))) .map_err(Into::into) } fn reverse_domain_snake_case( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { use heck::ToSnekCase as _; out .write(&util::reverse_domain(get_str(helper)).to_snek_case()) .map_err(Into::into) } fn app_root(ctx: &Context) -> Result<&str, RenderError> { let app_root = ctx .data() .get("app") .ok_or_else(|| RenderError::new("`app` missing from template data."))? .get("root-dir") .ok_or_else(|| RenderError::new("`app.root-dir` missing from template data."))?; app_root .as_str() .ok_or_else(|| RenderError::new("`app.root-dir` contained invalid UTF-8.")) } fn prefix_path( helper: &Helper, _: &Handlebars, ctx: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write( util::prefix_path(app_root(ctx)?, get_str(helper)) .to_str() .ok_or_else(|| { RenderError::new( "Either the `app.root-dir` or the specified path contained invalid UTF-8.", ) })?, ) .map_err(Into::into) } fn unprefix_path( helper: &Helper, _: &Handlebars, ctx: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write( util::unprefix_path(app_root(ctx)?, get_str(helper)) .map_err(|_| { RenderError::new("Attempted to unprefix a path that wasn't in the app root dir.") })? .to_str() .ok_or_else(|| { RenderError::new( "Either the `app.root-dir` or the specified path contained invalid UTF-8.", ) })?, ) .map_err(Into::into) }
pub fn exec( target: Target, wrapper: &TextWrapper,
random_line_split
init.rs
// Copyright 2019-2023 Tauri Programme within The Commons Conservancy // SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: MIT use super::{get_app, Target}; use crate::helpers::{config::get as get_tauri_config, template::JsonMap}; use crate::Result; use handlebars::{Context, Handlebars, Helper, HelperResult, Output, RenderContext, RenderError}; use tauri_mobile::{ android::{ config::Config as AndroidConfig, env::Env as AndroidEnv, target::Target as AndroidTarget, }, config::app::App, dot_cargo, target::TargetTrait as _, util::{ self, cli::{Report, TextWrapper}, }, }; use std::{ env::{current_dir, var, var_os}, path::PathBuf, }; pub fn command( target: Target, ci: bool, reinstall_deps: bool, skip_targets_install: bool, ) -> Result<()> { let wrapper = TextWrapper::with_splitter(textwrap::termwidth(), textwrap::NoHyphenation); exec( target, &wrapper, ci || var_os("CI").is_some(), reinstall_deps, skip_targets_install, ) .map_err(|e| anyhow::anyhow!("{:#}", e))?; Ok(()) } pub fn configure_cargo( app: &App, android: Option<(&mut AndroidEnv, &AndroidConfig)>, ) -> Result<()> { if let Some((env, config)) = android { for target in AndroidTarget::all().values() { let config = target.generate_cargo_config(config, env)?; let target_var_name = target.triple.replace('-', "_").to_uppercase(); if let Some(linker) = config.linker { env.base.insert_env_var( format!("CARGO_TARGET_{target_var_name}_LINKER"), linker.into(), ); } env.base.insert_env_var( format!("CARGO_TARGET_{target_var_name}_RUSTFLAGS"), config.rustflags.join(" ").into(), ); } } let mut dot_cargo = dot_cargo::DotCargo::load(app)?; // Mysteriously, builds that don't specify `--target` seem to fight over // the build cache with builds that use `--target`! This means that // alternating between i.e. `cargo run` and `cargo apple run` would // result in clean builds being made each time you switched... which is // pretty nightmarish. Specifying `build.target` in `.cargo/config` // fortunately has the same effect as specifying `--target`, so now we can // `cargo run` with peace of mind! // // This behavior could be explained here: // https://doc.rust-lang.org/cargo/reference/config.html#buildrustflags dot_cargo.set_default_target(util::host_target_triple()?); dot_cargo.write(app).map_err(Into::into) } pub fn exec( target: Target, wrapper: &TextWrapper, #[allow(unused_variables)] non_interactive: bool, #[allow(unused_variables)] reinstall_deps: bool, skip_targets_install: bool, ) -> Result<App> { let current_dir = current_dir()?; let tauri_config = get_tauri_config(None)?; let tauri_config_guard = tauri_config.lock().unwrap(); let tauri_config_ = tauri_config_guard.as_ref().unwrap(); let app = get_app(tauri_config_); let (handlebars, mut map) = handlebars(&app); let mut args = std::env::args_os(); let mut binary = args .next() .map(|bin| { let path = PathBuf::from(&bin); if path.exists() { let absolute_path = util::prefix_path(&current_dir, path); return absolute_path.into(); } bin }) .unwrap_or_else(|| std::ffi::OsString::from("cargo")); let mut build_args = Vec::new(); for arg in args { let path = PathBuf::from(&arg); if path.exists() { let absolute_path = util::prefix_path(&current_dir, path); build_args.push(absolute_path.to_string_lossy().into_owned()); continue; } build_args.push(arg.to_string_lossy().into_owned()); if arg == "android" || arg == "ios" { break; } } build_args.push(target.ide_build_script_name().into()); let binary_path = PathBuf::from(&binary); let bin_stem = binary_path.file_stem().unwrap().to_string_lossy(); let r = regex::Regex::new("(nodejs|node)\\-?([1-9]*)*$").unwrap(); if r.is_match(&bin_stem)
build_args.insert(0, var("npm_lifecycle_event").unwrap()); } if is_npm { build_args.insert(0, "run".into()); } } } map.insert("tauri-binary", binary.to_string_lossy()); map.insert("tauri-binary-args", &build_args); map.insert("tauri-binary-args-str", build_args.join(" ")); let app = match target { // Generate Android Studio project Target::Android => match AndroidEnv::new() { Ok(_env) => { let app = get_app(tauri_config_); let (config, metadata) = super::android::get_config(&app, tauri_config_, &Default::default()); map.insert("android", &config); super::android::project::gen( &config, &metadata, (handlebars, map), wrapper, skip_targets_install, )?; app } Err(err) => { if err.sdk_or_ndk_issue() { Report::action_request( " to initialize Android environment; Android support won't be usable until you fix the issue below and re-run `tauri android init`!", err, ) .print(wrapper); app } else { return Err(err.into()); } } }, #[cfg(target_os = "macos")] // Generate Xcode project Target::Ios => { let (config, metadata) = super::ios::get_config(&app, tauri_config_, &Default::default()); map.insert("apple", &config); super::ios::project::gen( &config, &metadata, (handlebars, map), wrapper, non_interactive, reinstall_deps, skip_targets_install, )?; app } }; Report::victory( "Project generated successfully!", "Make cool apps! 🌻 🐕 🎉", ) .print(wrapper); Ok(app) } fn handlebars(app: &App) -> (Handlebars<'static>, JsonMap) { let mut h = Handlebars::new(); h.register_escape_fn(handlebars::no_escape); h.register_helper("html-escape", Box::new(html_escape)); h.register_helper("join", Box::new(join)); h.register_helper("quote-and-join", Box::new(quote_and_join)); h.register_helper( "quote-and-join-colon-prefix", Box::new(quote_and_join_colon_prefix), ); h.register_helper("snake-case", Box::new(snake_case)); h.register_helper("reverse-domain", Box::new(reverse_domain)); h.register_helper( "reverse-domain-snake-case", Box::new(reverse_domain_snake_case), ); // don't mix these up or very bad things will happen to all of us h.register_helper("prefix-path", Box::new(prefix_path)); h.register_helper("unprefix-path", Box::new(unprefix_path)); let mut map = JsonMap::default(); map.insert("app", app); (h, map) } fn get_str<'a>(helper: &'a Helper) -> &'a str { helper .param(0) .and_then(|v| v.value().as_str()) .unwrap_or("") } fn get_str_array(helper: &Helper, formatter: impl Fn(&str) -> String) -> Option<Vec<String>> { helper.param(0).and_then(|v| { v.value().as_array().and_then(|arr| { arr .iter() .map(|val| { val.as_str().map( #[allow(clippy::redundant_closure)] |s| formatter(s), ) }) .collect() }) }) } fn html_escape( helper: &Helper, _: &Handlebars, _ctx: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write(&handlebars::html_escape(get_str(helper))) .map_err(Into::into) } fn join( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write( &get_str_array(helper, |s| s.to_string()) .ok_or_else(|| RenderError::new("`join` helper wasn't given an array"))? .join(", "), ) .map_err(Into::into) } fn quote_and_join( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write( &get_str_array(helper, |s| format!("{s:?}")) .ok_or_else(|| RenderError::new("`quote-and-join` helper wasn't given an array"))? .join(", "), ) .map_err(Into::into) } fn quote_and_join_colon_prefix( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write( &get_str_array(helper, |s| format!("{:?}", format!(":{s}"))) .ok_or_else(|| { RenderError::new("`quote-and-join-colon-prefix` helper wasn't given an array") })? .join(", "), ) .map_err(Into::into) } fn snake_case( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { use heck::ToSnekCase as _; out .write(&get_str(helper).to_snek_case()) .map_err(Into::into) } fn reverse_domain( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write(&util::reverse_domain(get_str(helper))) .map_err(Into::into) } fn reverse_domain_snake_case( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { use heck::ToSnekCase as _; out .write(&util::reverse_domain(get_str(helper)).to_snek_case()) .map_err(Into::into) } fn app_root(ctx: &Context) -> Result<&str, RenderError> { let app_root = ctx .data() .get("app") .ok_or_else(|| RenderError::new("`app` missing from template data."))? .get("root-dir") .ok_or_else(|| RenderError::new("`app.root-dir` missing from template data."))?; app_root .as_str() .ok_or_else(|| RenderError::new("`app.root-dir` contained invalid UTF-8.")) } fn prefix_path( helper: &Helper, _: &Handlebars, ctx: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write( util::prefix_path(app_root(ctx)?, get_str(helper)) .to_str() .ok_or_else(|| { RenderError::new( "Either the `app.root-dir` or the specified path contained invalid UTF-8.", ) })?, ) .map_err(Into::into) } fn unprefix_path( helper: &Helper, _: &Handlebars, ctx: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write( util::unprefix_path(app_root(ctx)?, get_str(helper)) .map_err(|_| { RenderError::new("Attempted to unprefix a path that wasn't in the app root dir.") })? .to_str() .ok_or_else(|| { RenderError::new( "Either the `app.root-dir` or the specified path contained invalid UTF-8.", ) })?, ) .map_err(Into::into) }
{ if let Some(npm_execpath) = var_os("npm_execpath").map(PathBuf::from) { let manager_stem = npm_execpath.file_stem().unwrap().to_os_string(); let is_npm = manager_stem == "npm-cli"; let is_npx = manager_stem == "npx-cli"; binary = if is_npm { "npm".into() } else if is_npx { "npx".into() } else { manager_stem }; if !(build_args.is_empty() || is_npx) { // remove script path, we'll use `npm_lifecycle_event` instead build_args.remove(0); } if is_npm { build_args.insert(0, "--".into()); } if !is_npx {
conditional_block
init.rs
// Copyright 2019-2023 Tauri Programme within The Commons Conservancy // SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: MIT use super::{get_app, Target}; use crate::helpers::{config::get as get_tauri_config, template::JsonMap}; use crate::Result; use handlebars::{Context, Handlebars, Helper, HelperResult, Output, RenderContext, RenderError}; use tauri_mobile::{ android::{ config::Config as AndroidConfig, env::Env as AndroidEnv, target::Target as AndroidTarget, }, config::app::App, dot_cargo, target::TargetTrait as _, util::{ self, cli::{Report, TextWrapper}, }, }; use std::{ env::{current_dir, var, var_os}, path::PathBuf, }; pub fn command( target: Target, ci: bool, reinstall_deps: bool, skip_targets_install: bool, ) -> Result<()> { let wrapper = TextWrapper::with_splitter(textwrap::termwidth(), textwrap::NoHyphenation); exec( target, &wrapper, ci || var_os("CI").is_some(), reinstall_deps, skip_targets_install, ) .map_err(|e| anyhow::anyhow!("{:#}", e))?; Ok(()) } pub fn configure_cargo( app: &App, android: Option<(&mut AndroidEnv, &AndroidConfig)>, ) -> Result<()> { if let Some((env, config)) = android { for target in AndroidTarget::all().values() { let config = target.generate_cargo_config(config, env)?; let target_var_name = target.triple.replace('-', "_").to_uppercase(); if let Some(linker) = config.linker { env.base.insert_env_var( format!("CARGO_TARGET_{target_var_name}_LINKER"), linker.into(), ); } env.base.insert_env_var( format!("CARGO_TARGET_{target_var_name}_RUSTFLAGS"), config.rustflags.join(" ").into(), ); } } let mut dot_cargo = dot_cargo::DotCargo::load(app)?; // Mysteriously, builds that don't specify `--target` seem to fight over // the build cache with builds that use `--target`! This means that // alternating between i.e. `cargo run` and `cargo apple run` would // result in clean builds being made each time you switched... which is // pretty nightmarish. Specifying `build.target` in `.cargo/config` // fortunately has the same effect as specifying `--target`, so now we can // `cargo run` with peace of mind! // // This behavior could be explained here: // https://doc.rust-lang.org/cargo/reference/config.html#buildrustflags dot_cargo.set_default_target(util::host_target_triple()?); dot_cargo.write(app).map_err(Into::into) } pub fn exec( target: Target, wrapper: &TextWrapper, #[allow(unused_variables)] non_interactive: bool, #[allow(unused_variables)] reinstall_deps: bool, skip_targets_install: bool, ) -> Result<App> { let current_dir = current_dir()?; let tauri_config = get_tauri_config(None)?; let tauri_config_guard = tauri_config.lock().unwrap(); let tauri_config_ = tauri_config_guard.as_ref().unwrap(); let app = get_app(tauri_config_); let (handlebars, mut map) = handlebars(&app); let mut args = std::env::args_os(); let mut binary = args .next() .map(|bin| { let path = PathBuf::from(&bin); if path.exists() { let absolute_path = util::prefix_path(&current_dir, path); return absolute_path.into(); } bin }) .unwrap_or_else(|| std::ffi::OsString::from("cargo")); let mut build_args = Vec::new(); for arg in args { let path = PathBuf::from(&arg); if path.exists() { let absolute_path = util::prefix_path(&current_dir, path); build_args.push(absolute_path.to_string_lossy().into_owned()); continue; } build_args.push(arg.to_string_lossy().into_owned()); if arg == "android" || arg == "ios" { break; } } build_args.push(target.ide_build_script_name().into()); let binary_path = PathBuf::from(&binary); let bin_stem = binary_path.file_stem().unwrap().to_string_lossy(); let r = regex::Regex::new("(nodejs|node)\\-?([1-9]*)*$").unwrap(); if r.is_match(&bin_stem) { if let Some(npm_execpath) = var_os("npm_execpath").map(PathBuf::from) { let manager_stem = npm_execpath.file_stem().unwrap().to_os_string(); let is_npm = manager_stem == "npm-cli"; let is_npx = manager_stem == "npx-cli"; binary = if is_npm { "npm".into() } else if is_npx { "npx".into() } else { manager_stem }; if!(build_args.is_empty() || is_npx) { // remove script path, we'll use `npm_lifecycle_event` instead build_args.remove(0); } if is_npm { build_args.insert(0, "--".into()); } if!is_npx { build_args.insert(0, var("npm_lifecycle_event").unwrap()); } if is_npm { build_args.insert(0, "run".into()); } } } map.insert("tauri-binary", binary.to_string_lossy()); map.insert("tauri-binary-args", &build_args); map.insert("tauri-binary-args-str", build_args.join(" ")); let app = match target { // Generate Android Studio project Target::Android => match AndroidEnv::new() { Ok(_env) => { let app = get_app(tauri_config_); let (config, metadata) = super::android::get_config(&app, tauri_config_, &Default::default()); map.insert("android", &config); super::android::project::gen( &config, &metadata, (handlebars, map), wrapper, skip_targets_install, )?; app } Err(err) => { if err.sdk_or_ndk_issue() { Report::action_request( " to initialize Android environment; Android support won't be usable until you fix the issue below and re-run `tauri android init`!", err, ) .print(wrapper); app } else { return Err(err.into()); } } }, #[cfg(target_os = "macos")] // Generate Xcode project Target::Ios => { let (config, metadata) = super::ios::get_config(&app, tauri_config_, &Default::default()); map.insert("apple", &config); super::ios::project::gen( &config, &metadata, (handlebars, map), wrapper, non_interactive, reinstall_deps, skip_targets_install, )?; app } }; Report::victory( "Project generated successfully!", "Make cool apps! 🌻 🐕 🎉", ) .print(wrapper); Ok(app) } fn handlebars(app: &App) -> (Handlebars<'static>, JsonMap) { let mut h = Handlebars::new(); h.register_escape_fn(handlebars::no_escape); h.register_helper("html-escape", Box::new(html_escape)); h.register_helper("join", Box::new(join)); h.register_helper("quote-and-join", Box::new(quote_and_join)); h.register_helper( "quote-and-join-colon-prefix", Box::new(quote_and_join_colon_prefix), ); h.register_helper("snake-case", Box::new(snake_case)); h.register_helper("reverse-domain", Box::new(reverse_domain)); h.register_helper( "reverse-domain-snake-case", Box::new(reverse_domain_snake_case), ); // don't mix these up or very bad things will happen to all of us h.register_helper("prefix-path", Box::new(prefix_path)); h.register_helper("unprefix-path", Box::new(unprefix_path)); let mut map = JsonMap::default(); map.insert("app", app); (h, map) } fn get_str<'a>(helper: &'a Helper) -> &'a str { helper .param(0) .and_then(|v| v.value().as_str()) .unwrap_or("") } fn get_str_array(helper: &Helper, formatter: impl Fn(&str) -> String) -> Option<Vec<String>> { helper.param(0).and_then(|v| { v.value().as_array().and_then(|arr| { arr .iter() .map(|val| { val.as_str().map( #[allow(clippy::redundant_closure)] |s| formatter(s), ) }) .collect() }) }) } fn html_escape( helper: &Helper, _: &Handlebars, _ctx: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out
( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write( &get_str_array(helper, |s| s.to_string()) .ok_or_else(|| RenderError::new("`join` helper wasn't given an array"))? .join(", "), ) .map_err(Into::into) } fn quote_and_join( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write( &get_str_array(helper, |s| format!("{s:?}")) .ok_or_else(|| RenderError::new("`quote-and-join` helper wasn't given an array"))? .join(", "), ) .map_err(Into::into) } fn quote_and_join_colon_prefix( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write( &get_str_array(helper, |s| format!("{:?}", format!(":{s}"))) .ok_or_else(|| { RenderError::new("`quote-and-join-colon-prefix` helper wasn't given an array") })? .join(", "), ) .map_err(Into::into) } fn snake_case( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { use heck::ToSnekCase as _; out .write(&get_str(helper).to_snek_case()) .map_err(Into::into) } fn reverse_domain( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write(&util::reverse_domain(get_str(helper))) .map_err(Into::into) } fn reverse_domain_snake_case( helper: &Helper, _: &Handlebars, _: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { use heck::ToSnekCase as _; out .write(&util::reverse_domain(get_str(helper)).to_snek_case()) .map_err(Into::into) } fn app_root(ctx: &Context) -> Result<&str, RenderError> { let app_root = ctx .data() .get("app") .ok_or_else(|| RenderError::new("`app` missing from template data."))? .get("root-dir") .ok_or_else(|| RenderError::new("`app.root-dir` missing from template data."))?; app_root .as_str() .ok_or_else(|| RenderError::new("`app.root-dir` contained invalid UTF-8.")) } fn prefix_path( helper: &Helper, _: &Handlebars, ctx: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write( util::prefix_path(app_root(ctx)?, get_str(helper)) .to_str() .ok_or_else(|| { RenderError::new( "Either the `app.root-dir` or the specified path contained invalid UTF-8.", ) })?, ) .map_err(Into::into) } fn unprefix_path( helper: &Helper, _: &Handlebars, ctx: &Context, _: &mut RenderContext, out: &mut dyn Output, ) -> HelperResult { out .write( util::unprefix_path(app_root(ctx)?, get_str(helper)) .map_err(|_| { RenderError::new("Attempted to unprefix a path that wasn't in the app root dir.") })? .to_str() .ok_or_else(|| { RenderError::new( "Either the `app.root-dir` or the specified path contained invalid UTF-8.", ) })?, ) .map_err(Into::into) }
.write(&handlebars::html_escape(get_str(helper))) .map_err(Into::into) } fn join
identifier_body
consensus.rs
// Copyright 2020 The Grin Developers // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! All the rules required for a cryptocurrency to have reach consensus across //! the whole network are complex and hard to completely isolate. Some can be //! simple parameters (like block reward), others complex algorithms (like //! Merkle sum trees or reorg rules). However, as long as they're simple //! enough, consensus-relevant constants and short functions should be kept //! here. // Proof of existence: // txid: d043f5cc3e9e135e0bafb010521813668d5bc86eef27c0e30232287fd7f5a85f // document hash: 9b6372224719c5531e0ee1fcc36e7c9e29def9edd22e61aa60c014927191e58a use crate::core::block::HeaderVersion; use crate::core::hash::{Hash, ZERO_HASH}; use crate::global; use crate::pow::Difficulty; use std::cmp::{max, min}; /// A grin is divisible to 10^9, following the SI prefixes pub const GRIN_BASE: u64 = 1_000_000_000; /// Milligrin, a thousand of a grin pub const MILLI_GRIN: u64 = GRIN_BASE / 1_000; /// Microgrin, a thousand of a milligrin pub const MICRO_GRIN: u64 = MILLI_GRIN / 1_000; /// Nanogrin, smallest unit, takes a billion to make a grin pub const NANO_GRIN: u64 = 1; /// Block interval, in seconds, the network will tune its next_target for. Note /// that we may reduce this value in the future as we get more data on mining /// with Cuckoo Cycle, networks improve and block propagation is optimized /// (adjusting the reward accordingly). pub const BLOCK_TIME_SEC: u64 = 60; /// Start at BTC block 717,000 (snapshot) which should occur around /// Jan 3, 2022. This block will reward 6.25 BTC. /// We allocate the remaining of the 6.25 blocks to our /// "long tail" which will last 1000 years and start with 3.25. /// So initial reward is 0.3125 BCMWs for 1,224,600 more blocks. /// This is due to the 1 minute blocks instead of the 10 minute of BTC. /// This is approximately Bitcoin's halving schedule, until /// the 8th halving, after which the long tail will distribute the /// remainder of the BCMWs over 1000 years. At block 717,000 there will be /// 19,246,875 BTC. /// Note that pre-launch we may recalibrate these numbers /// a little. The goal will be to get exactly 21m BCMWs, have /// a 1000 year long tail, and do a snapshot on January 3, 2022. /// Snapshot includes 18,918,750,000,000,000 NanoBCMWs /// Gensis reward is 0. pub const REWARD0: u64 = 0; /// First reward 1,224,600 blocks pub const REWARD1: u64 = 312_500_000; // 382,687,500,000,000 NanoBCMWs /// Second reward for 2,100,000 blocks pub const REWARD2: u64 = 156_250_000; // 328,125,000,000,000 NanoBCMWs /// Third reward for 2,100,000 blocks pub const REWARD3: u64 = 78_125_000; // 164,062,500,000,000 NanoBCMWs /// Fourth reward for 2,100,000 blocks pub const REWARD4: u64 = 39_062_500; // 82,031,250,000,000 NanoBCMWs /// Fifth reward for 2,100,000 blocks pub const REWARD5: u64 = 19_531_250; // 41,015,625,000,000 NanoBCMWs /// Sixth reward for 2,100,000 blocks pub const REWARD6: u64 = 9_675_625; // 20,507,812,500,000 NanoBCMWs /// Seventh reward for 2,100,000 blocks pub const REWARD7: u64 = 4_882_812; // 10,253,905,200,000 NanoBCMWs /// Eigth reward for 525,600,000 blocks pub const REWARD8: u64 = 2_000_000; // 105,120,000,0000,000 NanoBCMWs /// Actual block reward for a given total fee amount pub fn reward(fee: u64, height: u64) -> u64 { calc_block_reward(height).saturating_add(fee) } fn get_epoch_start(num: u64) -> u64 { if num == 1 { 1 } else if num == 2 { 1_224_600 } else if num == 3 { 3_324_600 } else if num == 4 { 5_424_600 } else if num == 5 { 7_524_600 } else if num == 6 { 9_624_600 } else if num == 7 { 11_724_600 } else if num == 8 { 13_824_600 } else if num == 9 { 539_424_600 } else { // shouldn't get here. 0 } } /// Calculate block reward based on height pub fn calc_block_reward(height: u64) -> u64 { if height == 0 { // reward for genesis block REWARD0 } else if height <= get_epoch_start(2) { REWARD1 } else if height <= get_epoch_start(3) { REWARD2 } else if height <= get_epoch_start(4) { REWARD3 } else if height <= get_epoch_start(5) { REWARD4 } else if height <= get_epoch_start(6) { REWARD5 } else if height <= get_epoch_start(7) { REWARD6 } else if height <= get_epoch_start(8) { REWARD7 } else if height <= get_epoch_start(9) { REWARD8 } else { 0 // no reward after this. } } fn get_overage_offset_start_epoch(num: u64) -> u64 { if num == 1 { REWARD0 } else if num == 2 { get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 3 { get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 4 { get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 5 { get_epoch_start(5) * REWARD4 + get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 6 { get_epoch_start(6) * REWARD5 + get_epoch_start(5) * REWARD4 + get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 7 { get_epoch_start(7) * REWARD6 + get_epoch_start(6) * REWARD5 + get_epoch_start(5) * REWARD4 + get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 8 { get_epoch_start(8) * REWARD7 + get_epoch_start(7) * REWARD6 + get_epoch_start(6) * REWARD5 + get_epoch_start(5) * REWARD4 + get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 9 { get_epoch_start(9) * REWARD8 + get_epoch_start(8) * REWARD7 + get_epoch_start(7) * REWARD6 + get_epoch_start(6) * REWARD5 + get_epoch_start(5) * REWARD4 + get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else { // should not get here 1 } } /// Calculate block overage based on height and claimed BTCUtxos pub fn calc_block_overage(height: u64) -> u64 { if height == 0 { 0 } else if height <= get_epoch_start(2) { (REWARD1 * height) + get_overage_offset_start_epoch(1) } else if height <= get_epoch_start(3) { (REWARD2 * (height - get_epoch_start(2))) + get_overage_offset_start_epoch(2) } else if height <= get_epoch_start(4) { (REWARD3 * (height - get_epoch_start(3))) + get_overage_offset_start_epoch(3) } else if height <= get_epoch_start(5) { (REWARD4 * (height - get_epoch_start(4))) + get_overage_offset_start_epoch(4) } else if height <= get_epoch_start(6) { (REWARD5 * (height - get_epoch_start(5))) + get_overage_offset_start_epoch(5) } else if height <= get_epoch_start(7) { (REWARD6 * (height - get_epoch_start(6))) + get_overage_offset_start_epoch(6) } else if height <= get_epoch_start(8) { (REWARD7 * (height - get_epoch_start(7))) + get_overage_offset_start_epoch(7) } else if height <= get_epoch_start(9) { (REWARD8 * (height - get_epoch_start(8))) + get_overage_offset_start_epoch(8) } else { // we exit here. Up to future generations to decide // how to handle. std::process::exit(0); } } /// an hour in seconds pub const HOUR_SEC: u64 = 60 * 60; /// Nominal height for standard time intervals, hour is 60 blocks pub const HOUR_HEIGHT: u64 = HOUR_SEC / BLOCK_TIME_SEC; /// A day is 1440 blocks pub const DAY_HEIGHT: u64 = 24 * HOUR_HEIGHT; /// A week is 10_080 blocks pub const WEEK_HEIGHT: u64 = 7 * DAY_HEIGHT; /// A year is 524_160 blocks pub const YEAR_HEIGHT: u64 = 52 * WEEK_HEIGHT; /// Number of blocks before a coinbase matures and can be spent pub const COINBASE_MATURITY: u64 = DAY_HEIGHT; /// We use all C29d from the start pub fn secondary_pow_ratio(_height: u64) -> u64 { 100 } /// Cuckoo-cycle proof size (cycle length) pub const PROOFSIZE: usize = 42; /// Default Cuckatoo Cycle edge_bits, used for mining and validating. pub const DEFAULT_MIN_EDGE_BITS: u8 = 31; /// Cuckaroo* proof-of-work edge_bits, meant to be ASIC resistant. pub const SECOND_POW_EDGE_BITS: u8 = 29; /// Original reference edge_bits to compute difficulty factors for higher /// Cuckoo graph sizes, changing this would hard fork pub const BASE_EDGE_BITS: u8 = 24; /// Default number of blocks in the past when cross-block cut-through will start /// happening. Needs to be long enough to not overlap with a long reorg. /// Rational /// behind the value is the longest bitcoin fork was about 30 blocks, so 5h. We /// add an order of magnitude to be safe and round to 7x24h of blocks to make it /// easier to reason about. pub const CUT_THROUGH_HORIZON: u32 = WEEK_HEIGHT as u32; /// Default number of blocks in the past to determine the height where we request /// a txhashset (and full blocks from). Needs to be long enough to not overlap with /// a long reorg. /// Rational behind the value is the longest bitcoin fork was about 30 blocks, so 5h. /// We add an order of magnitude to be safe and round to 2x24h of blocks to make it /// easier to reason about. pub const STATE_SYNC_THRESHOLD: u32 = 2 * DAY_HEIGHT as u32; /// Weight of an input when counted against the max block weight capacity pub const INPUT_WEIGHT: u64 = 1; /// Weight of an output when counted against the max block weight capacity pub const OUTPUT_WEIGHT: u64 = 21; /// Weight of a kernel when counted against the max block weight capacity pub const KERNEL_WEIGHT: u64 = 3; /// Total maximum block weight. At current sizes, this means a maximum /// theoretical size of: /// * `(674 + 33 + 1) * (40_000 / 21) = 1_348_571` for a block with only outputs /// * `(1 + 8 + 8 + 33 + 64) * (40_000 / 3) = 1_520_000` for a block with only kernels /// * `(1 + 33) * 40_000 = 1_360_000` for a block with only inputs /// /// Regardless of the relative numbers of inputs/outputs/kernels in a block the maximum /// block size is around 1.5MB /// For a block full of "average" txs (2 inputs, 2 outputs, 1 kernel) we have - /// `(1 * 2) + (21 * 2) + (3 * 1) = 47` (weight per tx) /// `40_000 / 47 = 851` (txs per block) /// pub const MAX_BLOCK_WEIGHT: u64 = 40_000; /// Fork every 6 months. pub const HARD_FORK_INTERVAL: u64 = YEAR_HEIGHT / 2; /// Testnet first hard fork height, set to happen around 2019-06-20 pub const TESTNET_FIRST_HARD_FORK: u64 = 185_040; /// Testnet second hard fork height, set to happen around 2019-12-19 pub const TESTNET_SECOND_HARD_FORK: u64 = 298_080; /// Testnet second hard fork height, set to happen around 2020-06-20 pub const TESTNET_THIRD_HARD_FORK: u64 = 552_960; /// Testnet second hard fork height, set to happen around 2020-12-8 pub const TESTNET_FOURTH_HARD_FORK: u64 = 642_240; /// Fork every 3 blocks pub const TESTING_HARD_FORK_INTERVAL: u64 = 3; /// Compute possible block version at a given height, /// currently no hard forks. pub fn header_version(_height: u64) -> HeaderVersion { HeaderVersion(1) } /// Check whether the block version is valid at a given height, implements /// 6 months interval scheduled hard forks for the first 2 years. pub fn valid_header_version(height: u64, version: HeaderVersion) -> bool { version == header_version(height) } /// Number of blocks used to calculate difficulty adjustment by Damped Moving Average pub const DMA_WINDOW: u64 = HOUR_HEIGHT; /// Difficulty adjustment half life (actually, 60s * number of 0s-blocks to raise diff by factor e) is 4 hours pub const WTEMA_HALF_LIFE: u64 = 4 * HOUR_SEC; /// Average time span of the DMA difficulty adjustment window pub const BLOCK_TIME_WINDOW: u64 = DMA_WINDOW * BLOCK_TIME_SEC; /// Clamp factor to use for DMA difficulty adjustment /// Limit value to within this factor of goal pub const CLAMP_FACTOR: u64 = 2; /// Dampening factor to use for DMA difficulty adjustment pub const DMA_DAMP_FACTOR: u64 = 3; /// Dampening factor to use for AR scale calculation. pub const AR_SCALE_DAMP_FACTOR: u64 = 13; /// Compute weight of a graph as number of siphash bits defining the graph /// The height dependence allows a 30-week linear transition from C31+ to C32+ starting after 1 year pub fn graph_weight(height: u64, edge_bits: u8) -> u64 { let mut xpr_edge_bits = edge_bits as u64; let expiry_height = YEAR_HEIGHT; if edge_bits == 31 && height >= expiry_height { xpr_edge_bits = xpr_edge_bits.saturating_sub(1 + (height - expiry_height) / WEEK_HEIGHT); } // For C31 xpr_edge_bits reaches 0 at height YEAR_HEIGHT + 30 * WEEK_HEIGHT // 30 weeks after Jan 15, 2020 would be Aug 12, 2020 (2u64 << (edge_bits - global::base_edge_bits()) as u64) * xpr_edge_bits } /// minimum solution difficulty after HardFork4 when PoW becomes primary only Cuckatoo32+ pub const C32_GRAPH_WEIGHT: u64 = (2u64 << (32 - BASE_EDGE_BITS) as u64) * 32; // 16384 /// Minimum difficulty, enforced in Damped Moving Average diff retargetting /// avoids getting stuck when trying to increase difficulty subject to dampening pub const MIN_DMA_DIFFICULTY: u64 = DMA_DAMP_FACTOR; /// Minimum scaling factor for AR pow, enforced in diff retargetting /// avoids getting stuck when trying to increase ar_scale subject to dampening pub const MIN_AR_SCALE: u64 = AR_SCALE_DAMP_FACTOR; /// unit difficulty, equal to graph_weight(SECOND_POW_EDGE_BITS) pub const UNIT_DIFFICULTY: u64 = ((2 as u64) << (SECOND_POW_EDGE_BITS - BASE_EDGE_BITS)) * (SECOND_POW_EDGE_BITS as u64); /// The initial difficulty at launch. This should be over-estimated /// and difficulty should come down at launch rather than up /// Currently grossly over-estimated at 10% of current /// ethereum GPUs (assuming 1GPU can solve a block at diff 1 in one block interval) pub const INITIAL_DIFFICULTY: u64 = 1_000_000 * UNIT_DIFFICULTY; /// Minimal header information required for the Difficulty calculation to /// take place #[derive(Clone, Debug, Eq, PartialEq)] pub struct HeaderInfo { /// Block hash, ZERO_HASH when this is a sythetic entry. pub block_hash: Hash, /// Timestamp of the header, 1 when not used (returned info) pub timestamp: u64, /// Network difficulty or next difficulty to use pub difficulty: Difficulty, /// Network secondary PoW factor or factor to use pub secondary_scaling: u32, /// Whether the header is a secondary proof of work pub is_secondary: bool, } impl HeaderInfo { /// Default constructor pub fn new( block_hash: Hash, timestamp: u64, difficulty: Difficulty, secondary_scaling: u32, is_secondary: bool, ) -> HeaderInfo { HeaderInfo { block_hash, timestamp, difficulty, secondary_scaling, is_secondary, } } /// Constructor from a timestamp and difficulty, setting a default secondary /// PoW factor pub fn from_ts_diff(timestamp: u64, difficulty: Difficulty) -> HeaderInfo { HeaderInfo { block_hash: ZERO_HASH, timestamp, difficulty, secondary_scaling: global::initial_graph_weight(), is_secondary: true, } } /// Constructor from a difficulty and secondary factor, setting a default /// timestamp pub fn from_diff_scaling(difficulty: Difficulty, secondary_scaling: u32) -> HeaderInfo { HeaderInfo { block_hash: ZERO_HASH, timestamp: 1, difficulty, secondary_scaling, is_secondary: true, } } } /// Move value linearly toward a goal pub fn damp(actual: u64, goal: u64, damp_factor: u64) -> u64 { (actual + (damp_factor - 1) * goal) / damp_factor } /// limit value to be within some factor from a goal pub fn clamp(actual: u64, goal: u64, clamp_factor: u64) -> u64 { max(goal / clamp_factor, min(actual, goal * clamp_factor)) } /// Computes the proof-of-work difficulty that the next block should comply with. /// Takes an iterator over past block headers information, from latest /// (highest height) to oldest (lowest height). /// Uses either the old dma DAA or, starting from HF4, the new wtema DAA pub fn next_difficulty<T>(height: u64, cursor: T) -> HeaderInfo where T: IntoIterator<Item = HeaderInfo>, { if header_version(height) < HeaderVersion(5) { next_dma_difficulty(height, cursor) } else { next_wtema_difficulty(height, cursor) } } /// Difficulty calculation based on a Damped Moving Average /// of difficulty over a window of DMA_WINDOW blocks. /// The corresponding timespan is calculated /// by using the difference between the timestamps at the beginning /// and the end of the window, with a damping toward the target block time. pub fn next_dma_difficulty<T>(height: u64, cursor: T) -> HeaderInfo where T: IntoIterator<Item = HeaderInfo>, { // Create vector of difficulty data running from earliest // to latest, and pad with simulated pre-genesis data to allow earlier // adjustment if there isn't enough window data length will be // DMA_WINDOW + 1 (for initial block time bound) let diff_data = global::difficulty_data_to_vector(cursor); // First, get the ratio of secondary PoW vs primary, skipping initial header let sec_pow_scaling = secondary_pow_scaling(height, &diff_data[1..]); // Get the timestamp delta across the window let ts_delta: u64 = diff_data[DMA_WINDOW as usize].timestamp - diff_data[0].timestamp; // Get the difficulty sum of the last DMA_WINDOW elements let diff_sum: u64 = diff_data .iter() .skip(1) .map(|dd| dd.difficulty.to_num()) .sum(); // adjust time delta toward goal subject to dampening and clamping let adj_ts = clamp( damp(ts_delta, BLOCK_TIME_WINDOW, DMA_DAMP_FACTOR), BLOCK_TIME_WINDOW, CLAMP_FACTOR, ); // minimum difficulty avoids getting stuck due to dampening
} /// Difficulty calculation based on a Weighted Target Exponential Moving Average /// of difficulty, using the ratio of the last block time over the target block time. pub fn next_wtema_difficulty<T>(_height: u64, cursor: T) -> HeaderInfo where T: IntoIterator<Item = HeaderInfo>, { let mut last_headers = cursor.into_iter(); // last two headers let last_header = last_headers.next().unwrap(); let prev_header = last_headers.next().unwrap(); let last_block_time: u64 = last_header.timestamp - prev_header.timestamp; let last_diff = last_header.difficulty.to_num(); // wtema difficulty update let next_diff = last_diff * WTEMA_HALF_LIFE / (WTEMA_HALF_LIFE - BLOCK_TIME_SEC + last_block_time); // mainnet minimum difficulty at graph_weight(32) ensures difficulty increase on 59s block // since 16384 * WTEMA_HALF_LIFE / (WTEMA_HALF_LIFE - 1) > 16384 let difficulty = max(Difficulty::min_wtema(), Difficulty::from_num(next_diff)); HeaderInfo::from_diff_scaling(difficulty, 0) // no more secondary PoW } /// Count, in units of 1/100 (a percent), the number of "secondary" (AR) blocks in the provided window of blocks. pub fn ar_count(_height: u64, diff_data: &[HeaderInfo]) -> u64 { 100 * diff_data.iter().filter(|n| n.is_secondary).count() as u64 } /// The secondary proof-of-work factor is calculated along the same lines as in next_dma_difficulty, /// as an adjustment on the deviation against the ideal value. /// Factor by which the secondary proof of work difficulty will be adjusted pub fn secondary_pow_scaling(height: u64, diff_data: &[HeaderInfo]) -> u32 { // Get the scaling factor sum of the last DMA_WINDOW elements let scale_sum: u64 = diff_data.iter().map(|dd| dd.secondary_scaling as u64).sum(); // compute ideal 2nd_pow_fraction in pct and across window let target_pct = secondary_pow_ratio(height); let target_count = DMA_WINDOW * target_pct; // Get the secondary count across the window, adjusting count toward goal // subject to dampening and clamping. let adj_count = clamp( damp( ar_count(height, diff_data), target_count, AR_SCALE_DAMP_FACTOR, ), target_count, CLAMP_FACTOR, ); let scale = scale_sum * target_pct / max(1, adj_count); // minimum AR scale avoids getting stuck due to dampening max(MIN_AR_SCALE, scale) as u32 } #[cfg(test)] mod test { use super::*; #[test] fn test_graph_weight() { global::set_local_chain_type(global::ChainTypes::Mainnet); // initial weights assert_eq!(graph_weight(1, 31), 256 * 31); assert_eq!(graph_weight(1, 32), 512 * 32); assert_eq!(graph_weight(1, 33), 1024 * 33); // one year in, 31 starts going down, the rest stays the same assert_eq!(graph_weight(YEAR_HEIGHT, 31), 256 * 30); assert_eq!(graph_weight(YEAR_HEIGHT, 32), 512 * 32); assert_eq!(graph_weight(YEAR_HEIGHT, 33), 1024 * 33); // 31 loses one factor per week assert_eq!(graph_weight(YEAR_HEIGHT + WEEK_HEIGHT, 31), 256 * 29); assert_eq!(graph_weight(YEAR_HEIGHT + 2 * WEEK_HEIGHT, 31), 256 * 28); assert_eq!(graph_weight(YEAR_HEIGHT + 32 * WEEK_HEIGHT, 31), 0); // 2 years in, 31 still at 0, 32 starts decreasing assert_eq!(graph_weight(2 * YEAR_HEIGHT, 31), 0); assert_eq!(graph_weight(2 * YEAR_HEIGHT, 32), 512 * 32); assert_eq!(graph_weight(2 * YEAR_HEIGHT, 33), 1024 * 33); // 32 phaseout on hold assert_eq!( graph_weight(2 * YEAR_HEIGHT + WEEK_HEIGHT, 32), C32_GRAPH_WEIGHT ); assert_eq!(graph_weight(2 * YEAR_HEIGHT + WEEK_HEIGHT, 31), 0); assert_eq!( graph_weight(2 * YEAR_HEIGHT + 30 * WEEK_HEIGHT, 32), C32_GRAPH_WEIGHT ); assert_eq!( graph_weight(2 * YEAR_HEIGHT + 31 * WEEK_HEIGHT, 32), C32_GRAPH_WEIGHT ); // 3 years in, nothing changes assert_eq!(graph_weight(3 * YEAR_HEIGHT, 31), 0); assert_eq!(graph_weight(3 * YEAR_HEIGHT, 32), 512 * 32); assert_eq!(graph_weight(3 * YEAR_HEIGHT, 33), 1024 * 33); // 4 years in, still on hold assert_eq!(graph_weight(4 * YEAR_HEIGHT, 31), 0); assert_eq!(graph_weight(4 * YEAR_HEIGHT, 32), 512 * 32); assert_eq!(graph_weight(4 * YEAR_HEIGHT, 33), 1024 * 33); assert_eq!(graph_weight(4 * YEAR_HEIGHT, 33), 1024 * 33); } }
let difficulty = max(MIN_DMA_DIFFICULTY, diff_sum * BLOCK_TIME_SEC / adj_ts); HeaderInfo::from_diff_scaling(Difficulty::from_num(difficulty), sec_pow_scaling)
random_line_split
consensus.rs
// Copyright 2020 The Grin Developers // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! All the rules required for a cryptocurrency to have reach consensus across //! the whole network are complex and hard to completely isolate. Some can be //! simple parameters (like block reward), others complex algorithms (like //! Merkle sum trees or reorg rules). However, as long as they're simple //! enough, consensus-relevant constants and short functions should be kept //! here. // Proof of existence: // txid: d043f5cc3e9e135e0bafb010521813668d5bc86eef27c0e30232287fd7f5a85f // document hash: 9b6372224719c5531e0ee1fcc36e7c9e29def9edd22e61aa60c014927191e58a use crate::core::block::HeaderVersion; use crate::core::hash::{Hash, ZERO_HASH}; use crate::global; use crate::pow::Difficulty; use std::cmp::{max, min}; /// A grin is divisible to 10^9, following the SI prefixes pub const GRIN_BASE: u64 = 1_000_000_000; /// Milligrin, a thousand of a grin pub const MILLI_GRIN: u64 = GRIN_BASE / 1_000; /// Microgrin, a thousand of a milligrin pub const MICRO_GRIN: u64 = MILLI_GRIN / 1_000; /// Nanogrin, smallest unit, takes a billion to make a grin pub const NANO_GRIN: u64 = 1; /// Block interval, in seconds, the network will tune its next_target for. Note /// that we may reduce this value in the future as we get more data on mining /// with Cuckoo Cycle, networks improve and block propagation is optimized /// (adjusting the reward accordingly). pub const BLOCK_TIME_SEC: u64 = 60; /// Start at BTC block 717,000 (snapshot) which should occur around /// Jan 3, 2022. This block will reward 6.25 BTC. /// We allocate the remaining of the 6.25 blocks to our /// "long tail" which will last 1000 years and start with 3.25. /// So initial reward is 0.3125 BCMWs for 1,224,600 more blocks. /// This is due to the 1 minute blocks instead of the 10 minute of BTC. /// This is approximately Bitcoin's halving schedule, until /// the 8th halving, after which the long tail will distribute the /// remainder of the BCMWs over 1000 years. At block 717,000 there will be /// 19,246,875 BTC. /// Note that pre-launch we may recalibrate these numbers /// a little. The goal will be to get exactly 21m BCMWs, have /// a 1000 year long tail, and do a snapshot on January 3, 2022. /// Snapshot includes 18,918,750,000,000,000 NanoBCMWs /// Gensis reward is 0. pub const REWARD0: u64 = 0; /// First reward 1,224,600 blocks pub const REWARD1: u64 = 312_500_000; // 382,687,500,000,000 NanoBCMWs /// Second reward for 2,100,000 blocks pub const REWARD2: u64 = 156_250_000; // 328,125,000,000,000 NanoBCMWs /// Third reward for 2,100,000 blocks pub const REWARD3: u64 = 78_125_000; // 164,062,500,000,000 NanoBCMWs /// Fourth reward for 2,100,000 blocks pub const REWARD4: u64 = 39_062_500; // 82,031,250,000,000 NanoBCMWs /// Fifth reward for 2,100,000 blocks pub const REWARD5: u64 = 19_531_250; // 41,015,625,000,000 NanoBCMWs /// Sixth reward for 2,100,000 blocks pub const REWARD6: u64 = 9_675_625; // 20,507,812,500,000 NanoBCMWs /// Seventh reward for 2,100,000 blocks pub const REWARD7: u64 = 4_882_812; // 10,253,905,200,000 NanoBCMWs /// Eigth reward for 525,600,000 blocks pub const REWARD8: u64 = 2_000_000; // 105,120,000,0000,000 NanoBCMWs /// Actual block reward for a given total fee amount pub fn reward(fee: u64, height: u64) -> u64 { calc_block_reward(height).saturating_add(fee) } fn get_epoch_start(num: u64) -> u64 { if num == 1 { 1 } else if num == 2 { 1_224_600 } else if num == 3 { 3_324_600 } else if num == 4 { 5_424_600 } else if num == 5 { 7_524_600 } else if num == 6 { 9_624_600 } else if num == 7 { 11_724_600 } else if num == 8 { 13_824_600 } else if num == 9 { 539_424_600 } else { // shouldn't get here. 0 } } /// Calculate block reward based on height pub fn calc_block_reward(height: u64) -> u64 { if height == 0 { // reward for genesis block REWARD0 } else if height <= get_epoch_start(2) { REWARD1 } else if height <= get_epoch_start(3) { REWARD2 } else if height <= get_epoch_start(4) { REWARD3 } else if height <= get_epoch_start(5) { REWARD4 } else if height <= get_epoch_start(6) { REWARD5 } else if height <= get_epoch_start(7) { REWARD6 } else if height <= get_epoch_start(8) { REWARD7 } else if height <= get_epoch_start(9) { REWARD8 } else { 0 // no reward after this. } } fn get_overage_offset_start_epoch(num: u64) -> u64 { if num == 1 { REWARD0 } else if num == 2 { get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 3 { get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 4 { get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 5 { get_epoch_start(5) * REWARD4 + get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 6 { get_epoch_start(6) * REWARD5 + get_epoch_start(5) * REWARD4 + get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 7 { get_epoch_start(7) * REWARD6 + get_epoch_start(6) * REWARD5 + get_epoch_start(5) * REWARD4 + get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 8 { get_epoch_start(8) * REWARD7 + get_epoch_start(7) * REWARD6 + get_epoch_start(6) * REWARD5 + get_epoch_start(5) * REWARD4 + get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 9 { get_epoch_start(9) * REWARD8 + get_epoch_start(8) * REWARD7 + get_epoch_start(7) * REWARD6 + get_epoch_start(6) * REWARD5 + get_epoch_start(5) * REWARD4 + get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else { // should not get here 1 } } /// Calculate block overage based on height and claimed BTCUtxos pub fn calc_block_overage(height: u64) -> u64 { if height == 0 { 0 } else if height <= get_epoch_start(2) { (REWARD1 * height) + get_overage_offset_start_epoch(1) } else if height <= get_epoch_start(3) { (REWARD2 * (height - get_epoch_start(2))) + get_overage_offset_start_epoch(2) } else if height <= get_epoch_start(4) { (REWARD3 * (height - get_epoch_start(3))) + get_overage_offset_start_epoch(3) } else if height <= get_epoch_start(5) { (REWARD4 * (height - get_epoch_start(4))) + get_overage_offset_start_epoch(4) } else if height <= get_epoch_start(6) { (REWARD5 * (height - get_epoch_start(5))) + get_overage_offset_start_epoch(5) } else if height <= get_epoch_start(7) { (REWARD6 * (height - get_epoch_start(6))) + get_overage_offset_start_epoch(6) } else if height <= get_epoch_start(8) { (REWARD7 * (height - get_epoch_start(7))) + get_overage_offset_start_epoch(7) } else if height <= get_epoch_start(9) { (REWARD8 * (height - get_epoch_start(8))) + get_overage_offset_start_epoch(8) } else { // we exit here. Up to future generations to decide // how to handle. std::process::exit(0); } } /// an hour in seconds pub const HOUR_SEC: u64 = 60 * 60; /// Nominal height for standard time intervals, hour is 60 blocks pub const HOUR_HEIGHT: u64 = HOUR_SEC / BLOCK_TIME_SEC; /// A day is 1440 blocks pub const DAY_HEIGHT: u64 = 24 * HOUR_HEIGHT; /// A week is 10_080 blocks pub const WEEK_HEIGHT: u64 = 7 * DAY_HEIGHT; /// A year is 524_160 blocks pub const YEAR_HEIGHT: u64 = 52 * WEEK_HEIGHT; /// Number of blocks before a coinbase matures and can be spent pub const COINBASE_MATURITY: u64 = DAY_HEIGHT; /// We use all C29d from the start pub fn secondary_pow_ratio(_height: u64) -> u64 { 100 } /// Cuckoo-cycle proof size (cycle length) pub const PROOFSIZE: usize = 42; /// Default Cuckatoo Cycle edge_bits, used for mining and validating. pub const DEFAULT_MIN_EDGE_BITS: u8 = 31; /// Cuckaroo* proof-of-work edge_bits, meant to be ASIC resistant. pub const SECOND_POW_EDGE_BITS: u8 = 29; /// Original reference edge_bits to compute difficulty factors for higher /// Cuckoo graph sizes, changing this would hard fork pub const BASE_EDGE_BITS: u8 = 24; /// Default number of blocks in the past when cross-block cut-through will start /// happening. Needs to be long enough to not overlap with a long reorg. /// Rational /// behind the value is the longest bitcoin fork was about 30 blocks, so 5h. We /// add an order of magnitude to be safe and round to 7x24h of blocks to make it /// easier to reason about. pub const CUT_THROUGH_HORIZON: u32 = WEEK_HEIGHT as u32; /// Default number of blocks in the past to determine the height where we request /// a txhashset (and full blocks from). Needs to be long enough to not overlap with /// a long reorg. /// Rational behind the value is the longest bitcoin fork was about 30 blocks, so 5h. /// We add an order of magnitude to be safe and round to 2x24h of blocks to make it /// easier to reason about. pub const STATE_SYNC_THRESHOLD: u32 = 2 * DAY_HEIGHT as u32; /// Weight of an input when counted against the max block weight capacity pub const INPUT_WEIGHT: u64 = 1; /// Weight of an output when counted against the max block weight capacity pub const OUTPUT_WEIGHT: u64 = 21; /// Weight of a kernel when counted against the max block weight capacity pub const KERNEL_WEIGHT: u64 = 3; /// Total maximum block weight. At current sizes, this means a maximum /// theoretical size of: /// * `(674 + 33 + 1) * (40_000 / 21) = 1_348_571` for a block with only outputs /// * `(1 + 8 + 8 + 33 + 64) * (40_000 / 3) = 1_520_000` for a block with only kernels /// * `(1 + 33) * 40_000 = 1_360_000` for a block with only inputs /// /// Regardless of the relative numbers of inputs/outputs/kernels in a block the maximum /// block size is around 1.5MB /// For a block full of "average" txs (2 inputs, 2 outputs, 1 kernel) we have - /// `(1 * 2) + (21 * 2) + (3 * 1) = 47` (weight per tx) /// `40_000 / 47 = 851` (txs per block) /// pub const MAX_BLOCK_WEIGHT: u64 = 40_000; /// Fork every 6 months. pub const HARD_FORK_INTERVAL: u64 = YEAR_HEIGHT / 2; /// Testnet first hard fork height, set to happen around 2019-06-20 pub const TESTNET_FIRST_HARD_FORK: u64 = 185_040; /// Testnet second hard fork height, set to happen around 2019-12-19 pub const TESTNET_SECOND_HARD_FORK: u64 = 298_080; /// Testnet second hard fork height, set to happen around 2020-06-20 pub const TESTNET_THIRD_HARD_FORK: u64 = 552_960; /// Testnet second hard fork height, set to happen around 2020-12-8 pub const TESTNET_FOURTH_HARD_FORK: u64 = 642_240; /// Fork every 3 blocks pub const TESTING_HARD_FORK_INTERVAL: u64 = 3; /// Compute possible block version at a given height, /// currently no hard forks. pub fn header_version(_height: u64) -> HeaderVersion { HeaderVersion(1) } /// Check whether the block version is valid at a given height, implements /// 6 months interval scheduled hard forks for the first 2 years. pub fn valid_header_version(height: u64, version: HeaderVersion) -> bool { version == header_version(height) } /// Number of blocks used to calculate difficulty adjustment by Damped Moving Average pub const DMA_WINDOW: u64 = HOUR_HEIGHT; /// Difficulty adjustment half life (actually, 60s * number of 0s-blocks to raise diff by factor e) is 4 hours pub const WTEMA_HALF_LIFE: u64 = 4 * HOUR_SEC; /// Average time span of the DMA difficulty adjustment window pub const BLOCK_TIME_WINDOW: u64 = DMA_WINDOW * BLOCK_TIME_SEC; /// Clamp factor to use for DMA difficulty adjustment /// Limit value to within this factor of goal pub const CLAMP_FACTOR: u64 = 2; /// Dampening factor to use for DMA difficulty adjustment pub const DMA_DAMP_FACTOR: u64 = 3; /// Dampening factor to use for AR scale calculation. pub const AR_SCALE_DAMP_FACTOR: u64 = 13; /// Compute weight of a graph as number of siphash bits defining the graph /// The height dependence allows a 30-week linear transition from C31+ to C32+ starting after 1 year pub fn graph_weight(height: u64, edge_bits: u8) -> u64 { let mut xpr_edge_bits = edge_bits as u64; let expiry_height = YEAR_HEIGHT; if edge_bits == 31 && height >= expiry_height { xpr_edge_bits = xpr_edge_bits.saturating_sub(1 + (height - expiry_height) / WEEK_HEIGHT); } // For C31 xpr_edge_bits reaches 0 at height YEAR_HEIGHT + 30 * WEEK_HEIGHT // 30 weeks after Jan 15, 2020 would be Aug 12, 2020 (2u64 << (edge_bits - global::base_edge_bits()) as u64) * xpr_edge_bits } /// minimum solution difficulty after HardFork4 when PoW becomes primary only Cuckatoo32+ pub const C32_GRAPH_WEIGHT: u64 = (2u64 << (32 - BASE_EDGE_BITS) as u64) * 32; // 16384 /// Minimum difficulty, enforced in Damped Moving Average diff retargetting /// avoids getting stuck when trying to increase difficulty subject to dampening pub const MIN_DMA_DIFFICULTY: u64 = DMA_DAMP_FACTOR; /// Minimum scaling factor for AR pow, enforced in diff retargetting /// avoids getting stuck when trying to increase ar_scale subject to dampening pub const MIN_AR_SCALE: u64 = AR_SCALE_DAMP_FACTOR; /// unit difficulty, equal to graph_weight(SECOND_POW_EDGE_BITS) pub const UNIT_DIFFICULTY: u64 = ((2 as u64) << (SECOND_POW_EDGE_BITS - BASE_EDGE_BITS)) * (SECOND_POW_EDGE_BITS as u64); /// The initial difficulty at launch. This should be over-estimated /// and difficulty should come down at launch rather than up /// Currently grossly over-estimated at 10% of current /// ethereum GPUs (assuming 1GPU can solve a block at diff 1 in one block interval) pub const INITIAL_DIFFICULTY: u64 = 1_000_000 * UNIT_DIFFICULTY; /// Minimal header information required for the Difficulty calculation to /// take place #[derive(Clone, Debug, Eq, PartialEq)] pub struct HeaderInfo { /// Block hash, ZERO_HASH when this is a sythetic entry. pub block_hash: Hash, /// Timestamp of the header, 1 when not used (returned info) pub timestamp: u64, /// Network difficulty or next difficulty to use pub difficulty: Difficulty, /// Network secondary PoW factor or factor to use pub secondary_scaling: u32, /// Whether the header is a secondary proof of work pub is_secondary: bool, } impl HeaderInfo { /// Default constructor pub fn new( block_hash: Hash, timestamp: u64, difficulty: Difficulty, secondary_scaling: u32, is_secondary: bool, ) -> HeaderInfo { HeaderInfo { block_hash, timestamp, difficulty, secondary_scaling, is_secondary, } } /// Constructor from a timestamp and difficulty, setting a default secondary /// PoW factor pub fn from_ts_diff(timestamp: u64, difficulty: Difficulty) -> HeaderInfo { HeaderInfo { block_hash: ZERO_HASH, timestamp, difficulty, secondary_scaling: global::initial_graph_weight(), is_secondary: true, } } /// Constructor from a difficulty and secondary factor, setting a default /// timestamp pub fn from_diff_scaling(difficulty: Difficulty, secondary_scaling: u32) -> HeaderInfo { HeaderInfo { block_hash: ZERO_HASH, timestamp: 1, difficulty, secondary_scaling, is_secondary: true, } } } /// Move value linearly toward a goal pub fn
(actual: u64, goal: u64, damp_factor: u64) -> u64 { (actual + (damp_factor - 1) * goal) / damp_factor } /// limit value to be within some factor from a goal pub fn clamp(actual: u64, goal: u64, clamp_factor: u64) -> u64 { max(goal / clamp_factor, min(actual, goal * clamp_factor)) } /// Computes the proof-of-work difficulty that the next block should comply with. /// Takes an iterator over past block headers information, from latest /// (highest height) to oldest (lowest height). /// Uses either the old dma DAA or, starting from HF4, the new wtema DAA pub fn next_difficulty<T>(height: u64, cursor: T) -> HeaderInfo where T: IntoIterator<Item = HeaderInfo>, { if header_version(height) < HeaderVersion(5) { next_dma_difficulty(height, cursor) } else { next_wtema_difficulty(height, cursor) } } /// Difficulty calculation based on a Damped Moving Average /// of difficulty over a window of DMA_WINDOW blocks. /// The corresponding timespan is calculated /// by using the difference between the timestamps at the beginning /// and the end of the window, with a damping toward the target block time. pub fn next_dma_difficulty<T>(height: u64, cursor: T) -> HeaderInfo where T: IntoIterator<Item = HeaderInfo>, { // Create vector of difficulty data running from earliest // to latest, and pad with simulated pre-genesis data to allow earlier // adjustment if there isn't enough window data length will be // DMA_WINDOW + 1 (for initial block time bound) let diff_data = global::difficulty_data_to_vector(cursor); // First, get the ratio of secondary PoW vs primary, skipping initial header let sec_pow_scaling = secondary_pow_scaling(height, &diff_data[1..]); // Get the timestamp delta across the window let ts_delta: u64 = diff_data[DMA_WINDOW as usize].timestamp - diff_data[0].timestamp; // Get the difficulty sum of the last DMA_WINDOW elements let diff_sum: u64 = diff_data .iter() .skip(1) .map(|dd| dd.difficulty.to_num()) .sum(); // adjust time delta toward goal subject to dampening and clamping let adj_ts = clamp( damp(ts_delta, BLOCK_TIME_WINDOW, DMA_DAMP_FACTOR), BLOCK_TIME_WINDOW, CLAMP_FACTOR, ); // minimum difficulty avoids getting stuck due to dampening let difficulty = max(MIN_DMA_DIFFICULTY, diff_sum * BLOCK_TIME_SEC / adj_ts); HeaderInfo::from_diff_scaling(Difficulty::from_num(difficulty), sec_pow_scaling) } /// Difficulty calculation based on a Weighted Target Exponential Moving Average /// of difficulty, using the ratio of the last block time over the target block time. pub fn next_wtema_difficulty<T>(_height: u64, cursor: T) -> HeaderInfo where T: IntoIterator<Item = HeaderInfo>, { let mut last_headers = cursor.into_iter(); // last two headers let last_header = last_headers.next().unwrap(); let prev_header = last_headers.next().unwrap(); let last_block_time: u64 = last_header.timestamp - prev_header.timestamp; let last_diff = last_header.difficulty.to_num(); // wtema difficulty update let next_diff = last_diff * WTEMA_HALF_LIFE / (WTEMA_HALF_LIFE - BLOCK_TIME_SEC + last_block_time); // mainnet minimum difficulty at graph_weight(32) ensures difficulty increase on 59s block // since 16384 * WTEMA_HALF_LIFE / (WTEMA_HALF_LIFE - 1) > 16384 let difficulty = max(Difficulty::min_wtema(), Difficulty::from_num(next_diff)); HeaderInfo::from_diff_scaling(difficulty, 0) // no more secondary PoW } /// Count, in units of 1/100 (a percent), the number of "secondary" (AR) blocks in the provided window of blocks. pub fn ar_count(_height: u64, diff_data: &[HeaderInfo]) -> u64 { 100 * diff_data.iter().filter(|n| n.is_secondary).count() as u64 } /// The secondary proof-of-work factor is calculated along the same lines as in next_dma_difficulty, /// as an adjustment on the deviation against the ideal value. /// Factor by which the secondary proof of work difficulty will be adjusted pub fn secondary_pow_scaling(height: u64, diff_data: &[HeaderInfo]) -> u32 { // Get the scaling factor sum of the last DMA_WINDOW elements let scale_sum: u64 = diff_data.iter().map(|dd| dd.secondary_scaling as u64).sum(); // compute ideal 2nd_pow_fraction in pct and across window let target_pct = secondary_pow_ratio(height); let target_count = DMA_WINDOW * target_pct; // Get the secondary count across the window, adjusting count toward goal // subject to dampening and clamping. let adj_count = clamp( damp( ar_count(height, diff_data), target_count, AR_SCALE_DAMP_FACTOR, ), target_count, CLAMP_FACTOR, ); let scale = scale_sum * target_pct / max(1, adj_count); // minimum AR scale avoids getting stuck due to dampening max(MIN_AR_SCALE, scale) as u32 } #[cfg(test)] mod test { use super::*; #[test] fn test_graph_weight() { global::set_local_chain_type(global::ChainTypes::Mainnet); // initial weights assert_eq!(graph_weight(1, 31), 256 * 31); assert_eq!(graph_weight(1, 32), 512 * 32); assert_eq!(graph_weight(1, 33), 1024 * 33); // one year in, 31 starts going down, the rest stays the same assert_eq!(graph_weight(YEAR_HEIGHT, 31), 256 * 30); assert_eq!(graph_weight(YEAR_HEIGHT, 32), 512 * 32); assert_eq!(graph_weight(YEAR_HEIGHT, 33), 1024 * 33); // 31 loses one factor per week assert_eq!(graph_weight(YEAR_HEIGHT + WEEK_HEIGHT, 31), 256 * 29); assert_eq!(graph_weight(YEAR_HEIGHT + 2 * WEEK_HEIGHT, 31), 256 * 28); assert_eq!(graph_weight(YEAR_HEIGHT + 32 * WEEK_HEIGHT, 31), 0); // 2 years in, 31 still at 0, 32 starts decreasing assert_eq!(graph_weight(2 * YEAR_HEIGHT, 31), 0); assert_eq!(graph_weight(2 * YEAR_HEIGHT, 32), 512 * 32); assert_eq!(graph_weight(2 * YEAR_HEIGHT, 33), 1024 * 33); // 32 phaseout on hold assert_eq!( graph_weight(2 * YEAR_HEIGHT + WEEK_HEIGHT, 32), C32_GRAPH_WEIGHT ); assert_eq!(graph_weight(2 * YEAR_HEIGHT + WEEK_HEIGHT, 31), 0); assert_eq!( graph_weight(2 * YEAR_HEIGHT + 30 * WEEK_HEIGHT, 32), C32_GRAPH_WEIGHT ); assert_eq!( graph_weight(2 * YEAR_HEIGHT + 31 * WEEK_HEIGHT, 32), C32_GRAPH_WEIGHT ); // 3 years in, nothing changes assert_eq!(graph_weight(3 * YEAR_HEIGHT, 31), 0); assert_eq!(graph_weight(3 * YEAR_HEIGHT, 32), 512 * 32); assert_eq!(graph_weight(3 * YEAR_HEIGHT, 33), 1024 * 33); // 4 years in, still on hold assert_eq!(graph_weight(4 * YEAR_HEIGHT, 31), 0); assert_eq!(graph_weight(4 * YEAR_HEIGHT, 32), 512 * 32); assert_eq!(graph_weight(4 * YEAR_HEIGHT, 33), 1024 * 33); assert_eq!(graph_weight(4 * YEAR_HEIGHT, 33), 1024 * 33); } }
damp
identifier_name
consensus.rs
// Copyright 2020 The Grin Developers // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! All the rules required for a cryptocurrency to have reach consensus across //! the whole network are complex and hard to completely isolate. Some can be //! simple parameters (like block reward), others complex algorithms (like //! Merkle sum trees or reorg rules). However, as long as they're simple //! enough, consensus-relevant constants and short functions should be kept //! here. // Proof of existence: // txid: d043f5cc3e9e135e0bafb010521813668d5bc86eef27c0e30232287fd7f5a85f // document hash: 9b6372224719c5531e0ee1fcc36e7c9e29def9edd22e61aa60c014927191e58a use crate::core::block::HeaderVersion; use crate::core::hash::{Hash, ZERO_HASH}; use crate::global; use crate::pow::Difficulty; use std::cmp::{max, min}; /// A grin is divisible to 10^9, following the SI prefixes pub const GRIN_BASE: u64 = 1_000_000_000; /// Milligrin, a thousand of a grin pub const MILLI_GRIN: u64 = GRIN_BASE / 1_000; /// Microgrin, a thousand of a milligrin pub const MICRO_GRIN: u64 = MILLI_GRIN / 1_000; /// Nanogrin, smallest unit, takes a billion to make a grin pub const NANO_GRIN: u64 = 1; /// Block interval, in seconds, the network will tune its next_target for. Note /// that we may reduce this value in the future as we get more data on mining /// with Cuckoo Cycle, networks improve and block propagation is optimized /// (adjusting the reward accordingly). pub const BLOCK_TIME_SEC: u64 = 60; /// Start at BTC block 717,000 (snapshot) which should occur around /// Jan 3, 2022. This block will reward 6.25 BTC. /// We allocate the remaining of the 6.25 blocks to our /// "long tail" which will last 1000 years and start with 3.25. /// So initial reward is 0.3125 BCMWs for 1,224,600 more blocks. /// This is due to the 1 minute blocks instead of the 10 minute of BTC. /// This is approximately Bitcoin's halving schedule, until /// the 8th halving, after which the long tail will distribute the /// remainder of the BCMWs over 1000 years. At block 717,000 there will be /// 19,246,875 BTC. /// Note that pre-launch we may recalibrate these numbers /// a little. The goal will be to get exactly 21m BCMWs, have /// a 1000 year long tail, and do a snapshot on January 3, 2022. /// Snapshot includes 18,918,750,000,000,000 NanoBCMWs /// Gensis reward is 0. pub const REWARD0: u64 = 0; /// First reward 1,224,600 blocks pub const REWARD1: u64 = 312_500_000; // 382,687,500,000,000 NanoBCMWs /// Second reward for 2,100,000 blocks pub const REWARD2: u64 = 156_250_000; // 328,125,000,000,000 NanoBCMWs /// Third reward for 2,100,000 blocks pub const REWARD3: u64 = 78_125_000; // 164,062,500,000,000 NanoBCMWs /// Fourth reward for 2,100,000 blocks pub const REWARD4: u64 = 39_062_500; // 82,031,250,000,000 NanoBCMWs /// Fifth reward for 2,100,000 blocks pub const REWARD5: u64 = 19_531_250; // 41,015,625,000,000 NanoBCMWs /// Sixth reward for 2,100,000 blocks pub const REWARD6: u64 = 9_675_625; // 20,507,812,500,000 NanoBCMWs /// Seventh reward for 2,100,000 blocks pub const REWARD7: u64 = 4_882_812; // 10,253,905,200,000 NanoBCMWs /// Eigth reward for 525,600,000 blocks pub const REWARD8: u64 = 2_000_000; // 105,120,000,0000,000 NanoBCMWs /// Actual block reward for a given total fee amount pub fn reward(fee: u64, height: u64) -> u64 { calc_block_reward(height).saturating_add(fee) } fn get_epoch_start(num: u64) -> u64 { if num == 1 { 1 } else if num == 2 { 1_224_600 } else if num == 3 { 3_324_600 } else if num == 4 { 5_424_600 } else if num == 5 { 7_524_600 } else if num == 6 { 9_624_600 } else if num == 7 { 11_724_600 } else if num == 8 { 13_824_600 } else if num == 9 { 539_424_600 } else { // shouldn't get here. 0 } } /// Calculate block reward based on height pub fn calc_block_reward(height: u64) -> u64 { if height == 0 { // reward for genesis block REWARD0 } else if height <= get_epoch_start(2) { REWARD1 } else if height <= get_epoch_start(3) { REWARD2 } else if height <= get_epoch_start(4) { REWARD3 } else if height <= get_epoch_start(5) { REWARD4 } else if height <= get_epoch_start(6) { REWARD5 } else if height <= get_epoch_start(7) { REWARD6 } else if height <= get_epoch_start(8) { REWARD7 } else if height <= get_epoch_start(9) { REWARD8 } else { 0 // no reward after this. } } fn get_overage_offset_start_epoch(num: u64) -> u64 { if num == 1 { REWARD0 } else if num == 2 { get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 3 { get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 4 { get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 5 { get_epoch_start(5) * REWARD4 + get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 6 { get_epoch_start(6) * REWARD5 + get_epoch_start(5) * REWARD4 + get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 7 { get_epoch_start(7) * REWARD6 + get_epoch_start(6) * REWARD5 + get_epoch_start(5) * REWARD4 + get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 8 { get_epoch_start(8) * REWARD7 + get_epoch_start(7) * REWARD6 + get_epoch_start(6) * REWARD5 + get_epoch_start(5) * REWARD4 + get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 9
else { // should not get here 1 } } /// Calculate block overage based on height and claimed BTCUtxos pub fn calc_block_overage(height: u64) -> u64 { if height == 0 { 0 } else if height <= get_epoch_start(2) { (REWARD1 * height) + get_overage_offset_start_epoch(1) } else if height <= get_epoch_start(3) { (REWARD2 * (height - get_epoch_start(2))) + get_overage_offset_start_epoch(2) } else if height <= get_epoch_start(4) { (REWARD3 * (height - get_epoch_start(3))) + get_overage_offset_start_epoch(3) } else if height <= get_epoch_start(5) { (REWARD4 * (height - get_epoch_start(4))) + get_overage_offset_start_epoch(4) } else if height <= get_epoch_start(6) { (REWARD5 * (height - get_epoch_start(5))) + get_overage_offset_start_epoch(5) } else if height <= get_epoch_start(7) { (REWARD6 * (height - get_epoch_start(6))) + get_overage_offset_start_epoch(6) } else if height <= get_epoch_start(8) { (REWARD7 * (height - get_epoch_start(7))) + get_overage_offset_start_epoch(7) } else if height <= get_epoch_start(9) { (REWARD8 * (height - get_epoch_start(8))) + get_overage_offset_start_epoch(8) } else { // we exit here. Up to future generations to decide // how to handle. std::process::exit(0); } } /// an hour in seconds pub const HOUR_SEC: u64 = 60 * 60; /// Nominal height for standard time intervals, hour is 60 blocks pub const HOUR_HEIGHT: u64 = HOUR_SEC / BLOCK_TIME_SEC; /// A day is 1440 blocks pub const DAY_HEIGHT: u64 = 24 * HOUR_HEIGHT; /// A week is 10_080 blocks pub const WEEK_HEIGHT: u64 = 7 * DAY_HEIGHT; /// A year is 524_160 blocks pub const YEAR_HEIGHT: u64 = 52 * WEEK_HEIGHT; /// Number of blocks before a coinbase matures and can be spent pub const COINBASE_MATURITY: u64 = DAY_HEIGHT; /// We use all C29d from the start pub fn secondary_pow_ratio(_height: u64) -> u64 { 100 } /// Cuckoo-cycle proof size (cycle length) pub const PROOFSIZE: usize = 42; /// Default Cuckatoo Cycle edge_bits, used for mining and validating. pub const DEFAULT_MIN_EDGE_BITS: u8 = 31; /// Cuckaroo* proof-of-work edge_bits, meant to be ASIC resistant. pub const SECOND_POW_EDGE_BITS: u8 = 29; /// Original reference edge_bits to compute difficulty factors for higher /// Cuckoo graph sizes, changing this would hard fork pub const BASE_EDGE_BITS: u8 = 24; /// Default number of blocks in the past when cross-block cut-through will start /// happening. Needs to be long enough to not overlap with a long reorg. /// Rational /// behind the value is the longest bitcoin fork was about 30 blocks, so 5h. We /// add an order of magnitude to be safe and round to 7x24h of blocks to make it /// easier to reason about. pub const CUT_THROUGH_HORIZON: u32 = WEEK_HEIGHT as u32; /// Default number of blocks in the past to determine the height where we request /// a txhashset (and full blocks from). Needs to be long enough to not overlap with /// a long reorg. /// Rational behind the value is the longest bitcoin fork was about 30 blocks, so 5h. /// We add an order of magnitude to be safe and round to 2x24h of blocks to make it /// easier to reason about. pub const STATE_SYNC_THRESHOLD: u32 = 2 * DAY_HEIGHT as u32; /// Weight of an input when counted against the max block weight capacity pub const INPUT_WEIGHT: u64 = 1; /// Weight of an output when counted against the max block weight capacity pub const OUTPUT_WEIGHT: u64 = 21; /// Weight of a kernel when counted against the max block weight capacity pub const KERNEL_WEIGHT: u64 = 3; /// Total maximum block weight. At current sizes, this means a maximum /// theoretical size of: /// * `(674 + 33 + 1) * (40_000 / 21) = 1_348_571` for a block with only outputs /// * `(1 + 8 + 8 + 33 + 64) * (40_000 / 3) = 1_520_000` for a block with only kernels /// * `(1 + 33) * 40_000 = 1_360_000` for a block with only inputs /// /// Regardless of the relative numbers of inputs/outputs/kernels in a block the maximum /// block size is around 1.5MB /// For a block full of "average" txs (2 inputs, 2 outputs, 1 kernel) we have - /// `(1 * 2) + (21 * 2) + (3 * 1) = 47` (weight per tx) /// `40_000 / 47 = 851` (txs per block) /// pub const MAX_BLOCK_WEIGHT: u64 = 40_000; /// Fork every 6 months. pub const HARD_FORK_INTERVAL: u64 = YEAR_HEIGHT / 2; /// Testnet first hard fork height, set to happen around 2019-06-20 pub const TESTNET_FIRST_HARD_FORK: u64 = 185_040; /// Testnet second hard fork height, set to happen around 2019-12-19 pub const TESTNET_SECOND_HARD_FORK: u64 = 298_080; /// Testnet second hard fork height, set to happen around 2020-06-20 pub const TESTNET_THIRD_HARD_FORK: u64 = 552_960; /// Testnet second hard fork height, set to happen around 2020-12-8 pub const TESTNET_FOURTH_HARD_FORK: u64 = 642_240; /// Fork every 3 blocks pub const TESTING_HARD_FORK_INTERVAL: u64 = 3; /// Compute possible block version at a given height, /// currently no hard forks. pub fn header_version(_height: u64) -> HeaderVersion { HeaderVersion(1) } /// Check whether the block version is valid at a given height, implements /// 6 months interval scheduled hard forks for the first 2 years. pub fn valid_header_version(height: u64, version: HeaderVersion) -> bool { version == header_version(height) } /// Number of blocks used to calculate difficulty adjustment by Damped Moving Average pub const DMA_WINDOW: u64 = HOUR_HEIGHT; /// Difficulty adjustment half life (actually, 60s * number of 0s-blocks to raise diff by factor e) is 4 hours pub const WTEMA_HALF_LIFE: u64 = 4 * HOUR_SEC; /// Average time span of the DMA difficulty adjustment window pub const BLOCK_TIME_WINDOW: u64 = DMA_WINDOW * BLOCK_TIME_SEC; /// Clamp factor to use for DMA difficulty adjustment /// Limit value to within this factor of goal pub const CLAMP_FACTOR: u64 = 2; /// Dampening factor to use for DMA difficulty adjustment pub const DMA_DAMP_FACTOR: u64 = 3; /// Dampening factor to use for AR scale calculation. pub const AR_SCALE_DAMP_FACTOR: u64 = 13; /// Compute weight of a graph as number of siphash bits defining the graph /// The height dependence allows a 30-week linear transition from C31+ to C32+ starting after 1 year pub fn graph_weight(height: u64, edge_bits: u8) -> u64 { let mut xpr_edge_bits = edge_bits as u64; let expiry_height = YEAR_HEIGHT; if edge_bits == 31 && height >= expiry_height { xpr_edge_bits = xpr_edge_bits.saturating_sub(1 + (height - expiry_height) / WEEK_HEIGHT); } // For C31 xpr_edge_bits reaches 0 at height YEAR_HEIGHT + 30 * WEEK_HEIGHT // 30 weeks after Jan 15, 2020 would be Aug 12, 2020 (2u64 << (edge_bits - global::base_edge_bits()) as u64) * xpr_edge_bits } /// minimum solution difficulty after HardFork4 when PoW becomes primary only Cuckatoo32+ pub const C32_GRAPH_WEIGHT: u64 = (2u64 << (32 - BASE_EDGE_BITS) as u64) * 32; // 16384 /// Minimum difficulty, enforced in Damped Moving Average diff retargetting /// avoids getting stuck when trying to increase difficulty subject to dampening pub const MIN_DMA_DIFFICULTY: u64 = DMA_DAMP_FACTOR; /// Minimum scaling factor for AR pow, enforced in diff retargetting /// avoids getting stuck when trying to increase ar_scale subject to dampening pub const MIN_AR_SCALE: u64 = AR_SCALE_DAMP_FACTOR; /// unit difficulty, equal to graph_weight(SECOND_POW_EDGE_BITS) pub const UNIT_DIFFICULTY: u64 = ((2 as u64) << (SECOND_POW_EDGE_BITS - BASE_EDGE_BITS)) * (SECOND_POW_EDGE_BITS as u64); /// The initial difficulty at launch. This should be over-estimated /// and difficulty should come down at launch rather than up /// Currently grossly over-estimated at 10% of current /// ethereum GPUs (assuming 1GPU can solve a block at diff 1 in one block interval) pub const INITIAL_DIFFICULTY: u64 = 1_000_000 * UNIT_DIFFICULTY; /// Minimal header information required for the Difficulty calculation to /// take place #[derive(Clone, Debug, Eq, PartialEq)] pub struct HeaderInfo { /// Block hash, ZERO_HASH when this is a sythetic entry. pub block_hash: Hash, /// Timestamp of the header, 1 when not used (returned info) pub timestamp: u64, /// Network difficulty or next difficulty to use pub difficulty: Difficulty, /// Network secondary PoW factor or factor to use pub secondary_scaling: u32, /// Whether the header is a secondary proof of work pub is_secondary: bool, } impl HeaderInfo { /// Default constructor pub fn new( block_hash: Hash, timestamp: u64, difficulty: Difficulty, secondary_scaling: u32, is_secondary: bool, ) -> HeaderInfo { HeaderInfo { block_hash, timestamp, difficulty, secondary_scaling, is_secondary, } } /// Constructor from a timestamp and difficulty, setting a default secondary /// PoW factor pub fn from_ts_diff(timestamp: u64, difficulty: Difficulty) -> HeaderInfo { HeaderInfo { block_hash: ZERO_HASH, timestamp, difficulty, secondary_scaling: global::initial_graph_weight(), is_secondary: true, } } /// Constructor from a difficulty and secondary factor, setting a default /// timestamp pub fn from_diff_scaling(difficulty: Difficulty, secondary_scaling: u32) -> HeaderInfo { HeaderInfo { block_hash: ZERO_HASH, timestamp: 1, difficulty, secondary_scaling, is_secondary: true, } } } /// Move value linearly toward a goal pub fn damp(actual: u64, goal: u64, damp_factor: u64) -> u64 { (actual + (damp_factor - 1) * goal) / damp_factor } /// limit value to be within some factor from a goal pub fn clamp(actual: u64, goal: u64, clamp_factor: u64) -> u64 { max(goal / clamp_factor, min(actual, goal * clamp_factor)) } /// Computes the proof-of-work difficulty that the next block should comply with. /// Takes an iterator over past block headers information, from latest /// (highest height) to oldest (lowest height). /// Uses either the old dma DAA or, starting from HF4, the new wtema DAA pub fn next_difficulty<T>(height: u64, cursor: T) -> HeaderInfo where T: IntoIterator<Item = HeaderInfo>, { if header_version(height) < HeaderVersion(5) { next_dma_difficulty(height, cursor) } else { next_wtema_difficulty(height, cursor) } } /// Difficulty calculation based on a Damped Moving Average /// of difficulty over a window of DMA_WINDOW blocks. /// The corresponding timespan is calculated /// by using the difference between the timestamps at the beginning /// and the end of the window, with a damping toward the target block time. pub fn next_dma_difficulty<T>(height: u64, cursor: T) -> HeaderInfo where T: IntoIterator<Item = HeaderInfo>, { // Create vector of difficulty data running from earliest // to latest, and pad with simulated pre-genesis data to allow earlier // adjustment if there isn't enough window data length will be // DMA_WINDOW + 1 (for initial block time bound) let diff_data = global::difficulty_data_to_vector(cursor); // First, get the ratio of secondary PoW vs primary, skipping initial header let sec_pow_scaling = secondary_pow_scaling(height, &diff_data[1..]); // Get the timestamp delta across the window let ts_delta: u64 = diff_data[DMA_WINDOW as usize].timestamp - diff_data[0].timestamp; // Get the difficulty sum of the last DMA_WINDOW elements let diff_sum: u64 = diff_data .iter() .skip(1) .map(|dd| dd.difficulty.to_num()) .sum(); // adjust time delta toward goal subject to dampening and clamping let adj_ts = clamp( damp(ts_delta, BLOCK_TIME_WINDOW, DMA_DAMP_FACTOR), BLOCK_TIME_WINDOW, CLAMP_FACTOR, ); // minimum difficulty avoids getting stuck due to dampening let difficulty = max(MIN_DMA_DIFFICULTY, diff_sum * BLOCK_TIME_SEC / adj_ts); HeaderInfo::from_diff_scaling(Difficulty::from_num(difficulty), sec_pow_scaling) } /// Difficulty calculation based on a Weighted Target Exponential Moving Average /// of difficulty, using the ratio of the last block time over the target block time. pub fn next_wtema_difficulty<T>(_height: u64, cursor: T) -> HeaderInfo where T: IntoIterator<Item = HeaderInfo>, { let mut last_headers = cursor.into_iter(); // last two headers let last_header = last_headers.next().unwrap(); let prev_header = last_headers.next().unwrap(); let last_block_time: u64 = last_header.timestamp - prev_header.timestamp; let last_diff = last_header.difficulty.to_num(); // wtema difficulty update let next_diff = last_diff * WTEMA_HALF_LIFE / (WTEMA_HALF_LIFE - BLOCK_TIME_SEC + last_block_time); // mainnet minimum difficulty at graph_weight(32) ensures difficulty increase on 59s block // since 16384 * WTEMA_HALF_LIFE / (WTEMA_HALF_LIFE - 1) > 16384 let difficulty = max(Difficulty::min_wtema(), Difficulty::from_num(next_diff)); HeaderInfo::from_diff_scaling(difficulty, 0) // no more secondary PoW } /// Count, in units of 1/100 (a percent), the number of "secondary" (AR) blocks in the provided window of blocks. pub fn ar_count(_height: u64, diff_data: &[HeaderInfo]) -> u64 { 100 * diff_data.iter().filter(|n| n.is_secondary).count() as u64 } /// The secondary proof-of-work factor is calculated along the same lines as in next_dma_difficulty, /// as an adjustment on the deviation against the ideal value. /// Factor by which the secondary proof of work difficulty will be adjusted pub fn secondary_pow_scaling(height: u64, diff_data: &[HeaderInfo]) -> u32 { // Get the scaling factor sum of the last DMA_WINDOW elements let scale_sum: u64 = diff_data.iter().map(|dd| dd.secondary_scaling as u64).sum(); // compute ideal 2nd_pow_fraction in pct and across window let target_pct = secondary_pow_ratio(height); let target_count = DMA_WINDOW * target_pct; // Get the secondary count across the window, adjusting count toward goal // subject to dampening and clamping. let adj_count = clamp( damp( ar_count(height, diff_data), target_count, AR_SCALE_DAMP_FACTOR, ), target_count, CLAMP_FACTOR, ); let scale = scale_sum * target_pct / max(1, adj_count); // minimum AR scale avoids getting stuck due to dampening max(MIN_AR_SCALE, scale) as u32 } #[cfg(test)] mod test { use super::*; #[test] fn test_graph_weight() { global::set_local_chain_type(global::ChainTypes::Mainnet); // initial weights assert_eq!(graph_weight(1, 31), 256 * 31); assert_eq!(graph_weight(1, 32), 512 * 32); assert_eq!(graph_weight(1, 33), 1024 * 33); // one year in, 31 starts going down, the rest stays the same assert_eq!(graph_weight(YEAR_HEIGHT, 31), 256 * 30); assert_eq!(graph_weight(YEAR_HEIGHT, 32), 512 * 32); assert_eq!(graph_weight(YEAR_HEIGHT, 33), 1024 * 33); // 31 loses one factor per week assert_eq!(graph_weight(YEAR_HEIGHT + WEEK_HEIGHT, 31), 256 * 29); assert_eq!(graph_weight(YEAR_HEIGHT + 2 * WEEK_HEIGHT, 31), 256 * 28); assert_eq!(graph_weight(YEAR_HEIGHT + 32 * WEEK_HEIGHT, 31), 0); // 2 years in, 31 still at 0, 32 starts decreasing assert_eq!(graph_weight(2 * YEAR_HEIGHT, 31), 0); assert_eq!(graph_weight(2 * YEAR_HEIGHT, 32), 512 * 32); assert_eq!(graph_weight(2 * YEAR_HEIGHT, 33), 1024 * 33); // 32 phaseout on hold assert_eq!( graph_weight(2 * YEAR_HEIGHT + WEEK_HEIGHT, 32), C32_GRAPH_WEIGHT ); assert_eq!(graph_weight(2 * YEAR_HEIGHT + WEEK_HEIGHT, 31), 0); assert_eq!( graph_weight(2 * YEAR_HEIGHT + 30 * WEEK_HEIGHT, 32), C32_GRAPH_WEIGHT ); assert_eq!( graph_weight(2 * YEAR_HEIGHT + 31 * WEEK_HEIGHT, 32), C32_GRAPH_WEIGHT ); // 3 years in, nothing changes assert_eq!(graph_weight(3 * YEAR_HEIGHT, 31), 0); assert_eq!(graph_weight(3 * YEAR_HEIGHT, 32), 512 * 32); assert_eq!(graph_weight(3 * YEAR_HEIGHT, 33), 1024 * 33); // 4 years in, still on hold assert_eq!(graph_weight(4 * YEAR_HEIGHT, 31), 0); assert_eq!(graph_weight(4 * YEAR_HEIGHT, 32), 512 * 32); assert_eq!(graph_weight(4 * YEAR_HEIGHT, 33), 1024 * 33); assert_eq!(graph_weight(4 * YEAR_HEIGHT, 33), 1024 * 33); } }
{ get_epoch_start(9) * REWARD8 + get_epoch_start(8) * REWARD7 + get_epoch_start(7) * REWARD6 + get_epoch_start(6) * REWARD5 + get_epoch_start(5) * REWARD4 + get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 }
conditional_block
consensus.rs
// Copyright 2020 The Grin Developers // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! All the rules required for a cryptocurrency to have reach consensus across //! the whole network are complex and hard to completely isolate. Some can be //! simple parameters (like block reward), others complex algorithms (like //! Merkle sum trees or reorg rules). However, as long as they're simple //! enough, consensus-relevant constants and short functions should be kept //! here. // Proof of existence: // txid: d043f5cc3e9e135e0bafb010521813668d5bc86eef27c0e30232287fd7f5a85f // document hash: 9b6372224719c5531e0ee1fcc36e7c9e29def9edd22e61aa60c014927191e58a use crate::core::block::HeaderVersion; use crate::core::hash::{Hash, ZERO_HASH}; use crate::global; use crate::pow::Difficulty; use std::cmp::{max, min}; /// A grin is divisible to 10^9, following the SI prefixes pub const GRIN_BASE: u64 = 1_000_000_000; /// Milligrin, a thousand of a grin pub const MILLI_GRIN: u64 = GRIN_BASE / 1_000; /// Microgrin, a thousand of a milligrin pub const MICRO_GRIN: u64 = MILLI_GRIN / 1_000; /// Nanogrin, smallest unit, takes a billion to make a grin pub const NANO_GRIN: u64 = 1; /// Block interval, in seconds, the network will tune its next_target for. Note /// that we may reduce this value in the future as we get more data on mining /// with Cuckoo Cycle, networks improve and block propagation is optimized /// (adjusting the reward accordingly). pub const BLOCK_TIME_SEC: u64 = 60; /// Start at BTC block 717,000 (snapshot) which should occur around /// Jan 3, 2022. This block will reward 6.25 BTC. /// We allocate the remaining of the 6.25 blocks to our /// "long tail" which will last 1000 years and start with 3.25. /// So initial reward is 0.3125 BCMWs for 1,224,600 more blocks. /// This is due to the 1 minute blocks instead of the 10 minute of BTC. /// This is approximately Bitcoin's halving schedule, until /// the 8th halving, after which the long tail will distribute the /// remainder of the BCMWs over 1000 years. At block 717,000 there will be /// 19,246,875 BTC. /// Note that pre-launch we may recalibrate these numbers /// a little. The goal will be to get exactly 21m BCMWs, have /// a 1000 year long tail, and do a snapshot on January 3, 2022. /// Snapshot includes 18,918,750,000,000,000 NanoBCMWs /// Gensis reward is 0. pub const REWARD0: u64 = 0; /// First reward 1,224,600 blocks pub const REWARD1: u64 = 312_500_000; // 382,687,500,000,000 NanoBCMWs /// Second reward for 2,100,000 blocks pub const REWARD2: u64 = 156_250_000; // 328,125,000,000,000 NanoBCMWs /// Third reward for 2,100,000 blocks pub const REWARD3: u64 = 78_125_000; // 164,062,500,000,000 NanoBCMWs /// Fourth reward for 2,100,000 blocks pub const REWARD4: u64 = 39_062_500; // 82,031,250,000,000 NanoBCMWs /// Fifth reward for 2,100,000 blocks pub const REWARD5: u64 = 19_531_250; // 41,015,625,000,000 NanoBCMWs /// Sixth reward for 2,100,000 blocks pub const REWARD6: u64 = 9_675_625; // 20,507,812,500,000 NanoBCMWs /// Seventh reward for 2,100,000 blocks pub const REWARD7: u64 = 4_882_812; // 10,253,905,200,000 NanoBCMWs /// Eigth reward for 525,600,000 blocks pub const REWARD8: u64 = 2_000_000; // 105,120,000,0000,000 NanoBCMWs /// Actual block reward for a given total fee amount pub fn reward(fee: u64, height: u64) -> u64 { calc_block_reward(height).saturating_add(fee) } fn get_epoch_start(num: u64) -> u64 { if num == 1 { 1 } else if num == 2 { 1_224_600 } else if num == 3 { 3_324_600 } else if num == 4 { 5_424_600 } else if num == 5 { 7_524_600 } else if num == 6 { 9_624_600 } else if num == 7 { 11_724_600 } else if num == 8 { 13_824_600 } else if num == 9 { 539_424_600 } else { // shouldn't get here. 0 } } /// Calculate block reward based on height pub fn calc_block_reward(height: u64) -> u64 { if height == 0 { // reward for genesis block REWARD0 } else if height <= get_epoch_start(2) { REWARD1 } else if height <= get_epoch_start(3) { REWARD2 } else if height <= get_epoch_start(4) { REWARD3 } else if height <= get_epoch_start(5) { REWARD4 } else if height <= get_epoch_start(6) { REWARD5 } else if height <= get_epoch_start(7) { REWARD6 } else if height <= get_epoch_start(8) { REWARD7 } else if height <= get_epoch_start(9) { REWARD8 } else { 0 // no reward after this. } } fn get_overage_offset_start_epoch(num: u64) -> u64 { if num == 1 { REWARD0 } else if num == 2 { get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 3 { get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 4 { get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 5 { get_epoch_start(5) * REWARD4 + get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 6 { get_epoch_start(6) * REWARD5 + get_epoch_start(5) * REWARD4 + get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 7 { get_epoch_start(7) * REWARD6 + get_epoch_start(6) * REWARD5 + get_epoch_start(5) * REWARD4 + get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 8 { get_epoch_start(8) * REWARD7 + get_epoch_start(7) * REWARD6 + get_epoch_start(6) * REWARD5 + get_epoch_start(5) * REWARD4 + get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else if num == 9 { get_epoch_start(9) * REWARD8 + get_epoch_start(8) * REWARD7 + get_epoch_start(7) * REWARD6 + get_epoch_start(6) * REWARD5 + get_epoch_start(5) * REWARD4 + get_epoch_start(4) * REWARD3 + get_epoch_start(3) * REWARD2 + get_epoch_start(2) * REWARD1 + REWARD0 } else { // should not get here 1 } } /// Calculate block overage based on height and claimed BTCUtxos pub fn calc_block_overage(height: u64) -> u64 { if height == 0 { 0 } else if height <= get_epoch_start(2) { (REWARD1 * height) + get_overage_offset_start_epoch(1) } else if height <= get_epoch_start(3) { (REWARD2 * (height - get_epoch_start(2))) + get_overage_offset_start_epoch(2) } else if height <= get_epoch_start(4) { (REWARD3 * (height - get_epoch_start(3))) + get_overage_offset_start_epoch(3) } else if height <= get_epoch_start(5) { (REWARD4 * (height - get_epoch_start(4))) + get_overage_offset_start_epoch(4) } else if height <= get_epoch_start(6) { (REWARD5 * (height - get_epoch_start(5))) + get_overage_offset_start_epoch(5) } else if height <= get_epoch_start(7) { (REWARD6 * (height - get_epoch_start(6))) + get_overage_offset_start_epoch(6) } else if height <= get_epoch_start(8) { (REWARD7 * (height - get_epoch_start(7))) + get_overage_offset_start_epoch(7) } else if height <= get_epoch_start(9) { (REWARD8 * (height - get_epoch_start(8))) + get_overage_offset_start_epoch(8) } else { // we exit here. Up to future generations to decide // how to handle. std::process::exit(0); } } /// an hour in seconds pub const HOUR_SEC: u64 = 60 * 60; /// Nominal height for standard time intervals, hour is 60 blocks pub const HOUR_HEIGHT: u64 = HOUR_SEC / BLOCK_TIME_SEC; /// A day is 1440 blocks pub const DAY_HEIGHT: u64 = 24 * HOUR_HEIGHT; /// A week is 10_080 blocks pub const WEEK_HEIGHT: u64 = 7 * DAY_HEIGHT; /// A year is 524_160 blocks pub const YEAR_HEIGHT: u64 = 52 * WEEK_HEIGHT; /// Number of blocks before a coinbase matures and can be spent pub const COINBASE_MATURITY: u64 = DAY_HEIGHT; /// We use all C29d from the start pub fn secondary_pow_ratio(_height: u64) -> u64 { 100 } /// Cuckoo-cycle proof size (cycle length) pub const PROOFSIZE: usize = 42; /// Default Cuckatoo Cycle edge_bits, used for mining and validating. pub const DEFAULT_MIN_EDGE_BITS: u8 = 31; /// Cuckaroo* proof-of-work edge_bits, meant to be ASIC resistant. pub const SECOND_POW_EDGE_BITS: u8 = 29; /// Original reference edge_bits to compute difficulty factors for higher /// Cuckoo graph sizes, changing this would hard fork pub const BASE_EDGE_BITS: u8 = 24; /// Default number of blocks in the past when cross-block cut-through will start /// happening. Needs to be long enough to not overlap with a long reorg. /// Rational /// behind the value is the longest bitcoin fork was about 30 blocks, so 5h. We /// add an order of magnitude to be safe and round to 7x24h of blocks to make it /// easier to reason about. pub const CUT_THROUGH_HORIZON: u32 = WEEK_HEIGHT as u32; /// Default number of blocks in the past to determine the height where we request /// a txhashset (and full blocks from). Needs to be long enough to not overlap with /// a long reorg. /// Rational behind the value is the longest bitcoin fork was about 30 blocks, so 5h. /// We add an order of magnitude to be safe and round to 2x24h of blocks to make it /// easier to reason about. pub const STATE_SYNC_THRESHOLD: u32 = 2 * DAY_HEIGHT as u32; /// Weight of an input when counted against the max block weight capacity pub const INPUT_WEIGHT: u64 = 1; /// Weight of an output when counted against the max block weight capacity pub const OUTPUT_WEIGHT: u64 = 21; /// Weight of a kernel when counted against the max block weight capacity pub const KERNEL_WEIGHT: u64 = 3; /// Total maximum block weight. At current sizes, this means a maximum /// theoretical size of: /// * `(674 + 33 + 1) * (40_000 / 21) = 1_348_571` for a block with only outputs /// * `(1 + 8 + 8 + 33 + 64) * (40_000 / 3) = 1_520_000` for a block with only kernels /// * `(1 + 33) * 40_000 = 1_360_000` for a block with only inputs /// /// Regardless of the relative numbers of inputs/outputs/kernels in a block the maximum /// block size is around 1.5MB /// For a block full of "average" txs (2 inputs, 2 outputs, 1 kernel) we have - /// `(1 * 2) + (21 * 2) + (3 * 1) = 47` (weight per tx) /// `40_000 / 47 = 851` (txs per block) /// pub const MAX_BLOCK_WEIGHT: u64 = 40_000; /// Fork every 6 months. pub const HARD_FORK_INTERVAL: u64 = YEAR_HEIGHT / 2; /// Testnet first hard fork height, set to happen around 2019-06-20 pub const TESTNET_FIRST_HARD_FORK: u64 = 185_040; /// Testnet second hard fork height, set to happen around 2019-12-19 pub const TESTNET_SECOND_HARD_FORK: u64 = 298_080; /// Testnet second hard fork height, set to happen around 2020-06-20 pub const TESTNET_THIRD_HARD_FORK: u64 = 552_960; /// Testnet second hard fork height, set to happen around 2020-12-8 pub const TESTNET_FOURTH_HARD_FORK: u64 = 642_240; /// Fork every 3 blocks pub const TESTING_HARD_FORK_INTERVAL: u64 = 3; /// Compute possible block version at a given height, /// currently no hard forks. pub fn header_version(_height: u64) -> HeaderVersion { HeaderVersion(1) } /// Check whether the block version is valid at a given height, implements /// 6 months interval scheduled hard forks for the first 2 years. pub fn valid_header_version(height: u64, version: HeaderVersion) -> bool
/// Number of blocks used to calculate difficulty adjustment by Damped Moving Average pub const DMA_WINDOW: u64 = HOUR_HEIGHT; /// Difficulty adjustment half life (actually, 60s * number of 0s-blocks to raise diff by factor e) is 4 hours pub const WTEMA_HALF_LIFE: u64 = 4 * HOUR_SEC; /// Average time span of the DMA difficulty adjustment window pub const BLOCK_TIME_WINDOW: u64 = DMA_WINDOW * BLOCK_TIME_SEC; /// Clamp factor to use for DMA difficulty adjustment /// Limit value to within this factor of goal pub const CLAMP_FACTOR: u64 = 2; /// Dampening factor to use for DMA difficulty adjustment pub const DMA_DAMP_FACTOR: u64 = 3; /// Dampening factor to use for AR scale calculation. pub const AR_SCALE_DAMP_FACTOR: u64 = 13; /// Compute weight of a graph as number of siphash bits defining the graph /// The height dependence allows a 30-week linear transition from C31+ to C32+ starting after 1 year pub fn graph_weight(height: u64, edge_bits: u8) -> u64 { let mut xpr_edge_bits = edge_bits as u64; let expiry_height = YEAR_HEIGHT; if edge_bits == 31 && height >= expiry_height { xpr_edge_bits = xpr_edge_bits.saturating_sub(1 + (height - expiry_height) / WEEK_HEIGHT); } // For C31 xpr_edge_bits reaches 0 at height YEAR_HEIGHT + 30 * WEEK_HEIGHT // 30 weeks after Jan 15, 2020 would be Aug 12, 2020 (2u64 << (edge_bits - global::base_edge_bits()) as u64) * xpr_edge_bits } /// minimum solution difficulty after HardFork4 when PoW becomes primary only Cuckatoo32+ pub const C32_GRAPH_WEIGHT: u64 = (2u64 << (32 - BASE_EDGE_BITS) as u64) * 32; // 16384 /// Minimum difficulty, enforced in Damped Moving Average diff retargetting /// avoids getting stuck when trying to increase difficulty subject to dampening pub const MIN_DMA_DIFFICULTY: u64 = DMA_DAMP_FACTOR; /// Minimum scaling factor for AR pow, enforced in diff retargetting /// avoids getting stuck when trying to increase ar_scale subject to dampening pub const MIN_AR_SCALE: u64 = AR_SCALE_DAMP_FACTOR; /// unit difficulty, equal to graph_weight(SECOND_POW_EDGE_BITS) pub const UNIT_DIFFICULTY: u64 = ((2 as u64) << (SECOND_POW_EDGE_BITS - BASE_EDGE_BITS)) * (SECOND_POW_EDGE_BITS as u64); /// The initial difficulty at launch. This should be over-estimated /// and difficulty should come down at launch rather than up /// Currently grossly over-estimated at 10% of current /// ethereum GPUs (assuming 1GPU can solve a block at diff 1 in one block interval) pub const INITIAL_DIFFICULTY: u64 = 1_000_000 * UNIT_DIFFICULTY; /// Minimal header information required for the Difficulty calculation to /// take place #[derive(Clone, Debug, Eq, PartialEq)] pub struct HeaderInfo { /// Block hash, ZERO_HASH when this is a sythetic entry. pub block_hash: Hash, /// Timestamp of the header, 1 when not used (returned info) pub timestamp: u64, /// Network difficulty or next difficulty to use pub difficulty: Difficulty, /// Network secondary PoW factor or factor to use pub secondary_scaling: u32, /// Whether the header is a secondary proof of work pub is_secondary: bool, } impl HeaderInfo { /// Default constructor pub fn new( block_hash: Hash, timestamp: u64, difficulty: Difficulty, secondary_scaling: u32, is_secondary: bool, ) -> HeaderInfo { HeaderInfo { block_hash, timestamp, difficulty, secondary_scaling, is_secondary, } } /// Constructor from a timestamp and difficulty, setting a default secondary /// PoW factor pub fn from_ts_diff(timestamp: u64, difficulty: Difficulty) -> HeaderInfo { HeaderInfo { block_hash: ZERO_HASH, timestamp, difficulty, secondary_scaling: global::initial_graph_weight(), is_secondary: true, } } /// Constructor from a difficulty and secondary factor, setting a default /// timestamp pub fn from_diff_scaling(difficulty: Difficulty, secondary_scaling: u32) -> HeaderInfo { HeaderInfo { block_hash: ZERO_HASH, timestamp: 1, difficulty, secondary_scaling, is_secondary: true, } } } /// Move value linearly toward a goal pub fn damp(actual: u64, goal: u64, damp_factor: u64) -> u64 { (actual + (damp_factor - 1) * goal) / damp_factor } /// limit value to be within some factor from a goal pub fn clamp(actual: u64, goal: u64, clamp_factor: u64) -> u64 { max(goal / clamp_factor, min(actual, goal * clamp_factor)) } /// Computes the proof-of-work difficulty that the next block should comply with. /// Takes an iterator over past block headers information, from latest /// (highest height) to oldest (lowest height). /// Uses either the old dma DAA or, starting from HF4, the new wtema DAA pub fn next_difficulty<T>(height: u64, cursor: T) -> HeaderInfo where T: IntoIterator<Item = HeaderInfo>, { if header_version(height) < HeaderVersion(5) { next_dma_difficulty(height, cursor) } else { next_wtema_difficulty(height, cursor) } } /// Difficulty calculation based on a Damped Moving Average /// of difficulty over a window of DMA_WINDOW blocks. /// The corresponding timespan is calculated /// by using the difference between the timestamps at the beginning /// and the end of the window, with a damping toward the target block time. pub fn next_dma_difficulty<T>(height: u64, cursor: T) -> HeaderInfo where T: IntoIterator<Item = HeaderInfo>, { // Create vector of difficulty data running from earliest // to latest, and pad with simulated pre-genesis data to allow earlier // adjustment if there isn't enough window data length will be // DMA_WINDOW + 1 (for initial block time bound) let diff_data = global::difficulty_data_to_vector(cursor); // First, get the ratio of secondary PoW vs primary, skipping initial header let sec_pow_scaling = secondary_pow_scaling(height, &diff_data[1..]); // Get the timestamp delta across the window let ts_delta: u64 = diff_data[DMA_WINDOW as usize].timestamp - diff_data[0].timestamp; // Get the difficulty sum of the last DMA_WINDOW elements let diff_sum: u64 = diff_data .iter() .skip(1) .map(|dd| dd.difficulty.to_num()) .sum(); // adjust time delta toward goal subject to dampening and clamping let adj_ts = clamp( damp(ts_delta, BLOCK_TIME_WINDOW, DMA_DAMP_FACTOR), BLOCK_TIME_WINDOW, CLAMP_FACTOR, ); // minimum difficulty avoids getting stuck due to dampening let difficulty = max(MIN_DMA_DIFFICULTY, diff_sum * BLOCK_TIME_SEC / adj_ts); HeaderInfo::from_diff_scaling(Difficulty::from_num(difficulty), sec_pow_scaling) } /// Difficulty calculation based on a Weighted Target Exponential Moving Average /// of difficulty, using the ratio of the last block time over the target block time. pub fn next_wtema_difficulty<T>(_height: u64, cursor: T) -> HeaderInfo where T: IntoIterator<Item = HeaderInfo>, { let mut last_headers = cursor.into_iter(); // last two headers let last_header = last_headers.next().unwrap(); let prev_header = last_headers.next().unwrap(); let last_block_time: u64 = last_header.timestamp - prev_header.timestamp; let last_diff = last_header.difficulty.to_num(); // wtema difficulty update let next_diff = last_diff * WTEMA_HALF_LIFE / (WTEMA_HALF_LIFE - BLOCK_TIME_SEC + last_block_time); // mainnet minimum difficulty at graph_weight(32) ensures difficulty increase on 59s block // since 16384 * WTEMA_HALF_LIFE / (WTEMA_HALF_LIFE - 1) > 16384 let difficulty = max(Difficulty::min_wtema(), Difficulty::from_num(next_diff)); HeaderInfo::from_diff_scaling(difficulty, 0) // no more secondary PoW } /// Count, in units of 1/100 (a percent), the number of "secondary" (AR) blocks in the provided window of blocks. pub fn ar_count(_height: u64, diff_data: &[HeaderInfo]) -> u64 { 100 * diff_data.iter().filter(|n| n.is_secondary).count() as u64 } /// The secondary proof-of-work factor is calculated along the same lines as in next_dma_difficulty, /// as an adjustment on the deviation against the ideal value. /// Factor by which the secondary proof of work difficulty will be adjusted pub fn secondary_pow_scaling(height: u64, diff_data: &[HeaderInfo]) -> u32 { // Get the scaling factor sum of the last DMA_WINDOW elements let scale_sum: u64 = diff_data.iter().map(|dd| dd.secondary_scaling as u64).sum(); // compute ideal 2nd_pow_fraction in pct and across window let target_pct = secondary_pow_ratio(height); let target_count = DMA_WINDOW * target_pct; // Get the secondary count across the window, adjusting count toward goal // subject to dampening and clamping. let adj_count = clamp( damp( ar_count(height, diff_data), target_count, AR_SCALE_DAMP_FACTOR, ), target_count, CLAMP_FACTOR, ); let scale = scale_sum * target_pct / max(1, adj_count); // minimum AR scale avoids getting stuck due to dampening max(MIN_AR_SCALE, scale) as u32 } #[cfg(test)] mod test { use super::*; #[test] fn test_graph_weight() { global::set_local_chain_type(global::ChainTypes::Mainnet); // initial weights assert_eq!(graph_weight(1, 31), 256 * 31); assert_eq!(graph_weight(1, 32), 512 * 32); assert_eq!(graph_weight(1, 33), 1024 * 33); // one year in, 31 starts going down, the rest stays the same assert_eq!(graph_weight(YEAR_HEIGHT, 31), 256 * 30); assert_eq!(graph_weight(YEAR_HEIGHT, 32), 512 * 32); assert_eq!(graph_weight(YEAR_HEIGHT, 33), 1024 * 33); // 31 loses one factor per week assert_eq!(graph_weight(YEAR_HEIGHT + WEEK_HEIGHT, 31), 256 * 29); assert_eq!(graph_weight(YEAR_HEIGHT + 2 * WEEK_HEIGHT, 31), 256 * 28); assert_eq!(graph_weight(YEAR_HEIGHT + 32 * WEEK_HEIGHT, 31), 0); // 2 years in, 31 still at 0, 32 starts decreasing assert_eq!(graph_weight(2 * YEAR_HEIGHT, 31), 0); assert_eq!(graph_weight(2 * YEAR_HEIGHT, 32), 512 * 32); assert_eq!(graph_weight(2 * YEAR_HEIGHT, 33), 1024 * 33); // 32 phaseout on hold assert_eq!( graph_weight(2 * YEAR_HEIGHT + WEEK_HEIGHT, 32), C32_GRAPH_WEIGHT ); assert_eq!(graph_weight(2 * YEAR_HEIGHT + WEEK_HEIGHT, 31), 0); assert_eq!( graph_weight(2 * YEAR_HEIGHT + 30 * WEEK_HEIGHT, 32), C32_GRAPH_WEIGHT ); assert_eq!( graph_weight(2 * YEAR_HEIGHT + 31 * WEEK_HEIGHT, 32), C32_GRAPH_WEIGHT ); // 3 years in, nothing changes assert_eq!(graph_weight(3 * YEAR_HEIGHT, 31), 0); assert_eq!(graph_weight(3 * YEAR_HEIGHT, 32), 512 * 32); assert_eq!(graph_weight(3 * YEAR_HEIGHT, 33), 1024 * 33); // 4 years in, still on hold assert_eq!(graph_weight(4 * YEAR_HEIGHT, 31), 0); assert_eq!(graph_weight(4 * YEAR_HEIGHT, 32), 512 * 32); assert_eq!(graph_weight(4 * YEAR_HEIGHT, 33), 1024 * 33); assert_eq!(graph_weight(4 * YEAR_HEIGHT, 33), 1024 * 33); } }
{ version == header_version(height) }
identifier_body
lib.rs
// Copyright 2016 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #![crate_name="rustcxx_common"] #![feature(rustc_private, slice_patterns)] extern crate syntax; extern crate rustc; mod types; use std::borrow::Cow; use std::hash::{SipHasher, Hash, Hasher}; use std::iter; use syntax::abi::Abi; use syntax::ast::{self, DUMMY_NODE_ID}; use syntax::codemap::{Span, Spanned, respan, spanned, DUMMY_SP}; use syntax::errors::Handler; use syntax::ext::base::ExtCtxt; use syntax::ext::build::AstBuilder; use syntax::ext::quote::rt::ToTokens; use syntax::parse::{token, PResult}; use syntax::parse::common::SeqSep; use syntax::parse::parser::Parser; use syntax::print::pprust::{token_to_string, tts_to_string}; use syntax::ptr::P; use syntax::tokenstream::TokenTree; /// Language specific parsing. /// /// The two macros, `cxx!` and `rust!`, share a similar syntax. /// This trait differentiates the two, such that the rest of the parsing code can be reused. pub trait Lang { type Body: ToTokens; type ArgValue; fn parse_body<'a>(parser: &mut Parser<'a>) -> PResult<'a, Self::Body>; fn parse_arg_value<'a>(ecx: &ExtCtxt, parser: &mut Parser<'a>, ident: ast::SpannedIdent) -> PResult<'a, Self::ArgValue>; } pub enum Rust {} pub enum Cxx {} impl Lang for Rust { type Body = P<ast::Block>; type ArgValue = Vec<Spanned<String>>; fn parse_body<'a>(parser: &mut Parser<'a>) -> PResult<'a, Self::Body> { parser.parse_block() } fn parse_arg_value<'a>(_ecx: &ExtCtxt, parser: &mut Parser<'a>, ident: ast::SpannedIdent) -> PResult<'a, Self::ArgValue> { if parser.eat(&token::Eq) { let mut tokens = Vec::new(); while!parser.check(&token::Comma) && !parser.check(&token::CloseDelim(token::Paren)) { tokens.push(try!(parser.parse_token_tree())); } Ok(flatten_tts(&tokens)) } else { Ok(vec![respan(ident.span, ident.node.to_string())]) } } } impl Lang for Cxx { type Body = Vec<TokenTree>; type ArgValue = P<ast::Expr>; fn parse_body<'a>(parser: &mut Parser<'a>) -> PResult<'a, Self::Body> { try!(parser.expect(&token::OpenDelim(token::Brace))); parser.parse_seq_to_end( &token::CloseDelim(token::Brace), SeqSep::none(), |parser| parser.parse_token_tree()) } fn parse_arg_value<'a>(ecx: &ExtCtxt, parser: &mut Parser<'a>, ident: ast::SpannedIdent) -> PResult<'a, Self::ArgValue> { if parser.eat(&token::Eq) { parser.parse_expr() } else { Ok(ecx.expr_ident(ident.span, ident.node)) } } } pub struct Function<L: Lang> { pub span: Span, pub name: ast::Ident, pub ret_ty: Option<P<ast::Ty>>, pub args: Vec<ArgSpec<L>>, pub body: L::Body, } impl <L: Lang> Function<L> { pub fn parse<'a>(ecx: &ExtCtxt<'a>, span: Span, tts: &[TokenTree]) -> PResult<'a, Function<L>> { let mut parser = ecx.new_parser_from_tts(tts); let args = if parser.check(&token::OpenDelim(token::Paren)) { Some(try!(Self::parse_args(ecx, &mut parser))) } else { None }; let ret_ty = if args.is_some() && parser.check(&token::RArrow) { Some(try!(Self::parse_ret_ty(&mut parser))) } else { None }; let body = try!(L::parse_body(&mut parser)); let hash = { let mut hasher = SipHasher::new(); tts_to_string(tts).hash(&mut hasher); hasher.finish() }; let name = ecx.ident_of(&format!("rustcxx_{:016x}", hash)); Ok(Function { span: span, name: name, ret_ty: ret_ty, args: args.unwrap_or_else(|| Vec::new()), body: body, }) } fn parse_args<'a>(ecx: &ExtCtxt, parser: &mut Parser<'a>) -> PResult<'a, Vec<ArgSpec<L>>> { parser.parse_unspanned_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), SeqSep::trailing_allowed(token::Comma), |parser| ArgSpec::parse(ecx, parser)) } fn parse_ret_ty<'a>(parser: &mut Parser<'a>) -> PResult<'a, P<ast::Ty>> { try!(parser.expect(&token::RArrow)); parser.parse_ty() } pub fn fn_decl(&self, ecx: &ExtCtxt) -> P<ast::FnDecl> { let args = self.args.iter().map(|arg| { ecx.arg(arg.ident.span, arg.ident.node, arg.ty.clone()) }).collect(); let ret_ty = self.ret_ty.clone() .map(ast::FunctionRetTy::Ty) .unwrap_or(ast::FunctionRetTy::Default(DUMMY_SP)); P(ast::FnDecl { inputs: args, output: ret_ty, variadic: false }) } pub fn foreign_item(&self, ecx: &ExtCtxt) -> ast::ForeignItem { let fn_decl = self.fn_decl(ecx); ast::ForeignItem { id: DUMMY_NODE_ID, ident: self.name, attrs: Vec::new(), node: ast::ForeignItemKind::Fn(fn_decl, ast::Generics::default()), vis: ast::Visibility::Inherited, span: self.span, } } pub fn cxx_args<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, String> { let args = try!(self.args.iter().map(|arg| { let ty = try!(arg.cxx_type(&ecx.parse_sess.span_diagnostic)); Ok(format!("{} const {}", ty, arg.ident.node)) }).collect::<PResult<Vec<String>>>()); Ok(args.join(", ")) } pub fn cxx_ret_ty<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, Cow<'static, str>> { self.ret_ty.as_ref().map(|ty| { types::convert_ty_to_cxx(&ecx.parse_sess.span_diagnostic, &ty) }).unwrap_or(Ok(Cow::from("void"))) } } #[derive(Debug)] pub struct ArgSpec<L: Lang> { pub ident: ast::SpannedIdent, pub ty: P<ast::Ty>, pub value: L::ArgValue, } impl <L: Lang> ArgSpec<L> { pub fn parse<'a>(ecx: &ExtCtxt, parser: &mut Parser<'a>) -> PResult<'a, ArgSpec<L>> { let ident = { let lo = parser.span.lo; let ident = try!(parser.parse_ident()); let hi = parser.span.lo; spanned(lo, hi, ident) }; try!(parser.expect(&token::Colon)); let ty = try!(parser.parse_ty()); let value = try!(L::parse_arg_value(ecx, parser, ident)); Ok(ArgSpec { ident: ident, ty: ty, value: value, }) } pub fn cxx_type<'a>(&self, handler: &'a Handler) -> PResult<'a, Cow<'static, str>> { types::convert_ty_to_cxx(handler, &self.ty) } } impl Function<Cxx> { pub fn call_expr<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, P<ast::Expr>> { let name = self.name.clone(); let args = self.args.iter().map(|arg| arg.value.clone()).collect(); Ok(ecx.expr_call_ident(self.span, name, args)) } pub fn cxx_code<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, String> { let ret_ty = try!(self.cxx_ret_ty(ecx)); let args = try!(self.cxx_args(ecx)); let signature = format!( "{span}\nextern \"C\" {ret_ty} {name}({args})", span = span_to_cpp_directive(ecx, self.span), ret_ty = ret_ty, name = self.name, args = args); let mut body = tokens_to_cpp(ecx, &flatten_tts(&self.body)); if self.ret_ty.is_some() { body = format!("return ({{\n{};\n}});", body); } Ok(format!("{} {{\n{}\n}}\n", signature, body)) } } // Calling rust from C++ is a bit trickier. // We must declare the function before it can be used. // However C++ requires the function to be declared outside the current function, but then we may // miss type definitions which are in scope due to being in a namespace, or some includes. // // For example : // ```c++ // #include <stdint.h> // #include <stdio.h> // // void foo() { // uint32_t a = 3; // uint32_t double = rust![(a: uint32_t) -> uint32_t { // a * 2 // }]; // printf("double: ", a); // } // ``` // // Declaring the extern function before the includes would not work, as uint32_t is not defined at // this point. Finding the right place to declare it would be complicated and would almost require // a full C++ parser. // // Instead we use an alternative approach. The function's symbol is declared with an opaque type at // the top of the file. This does not require argument types to be in scope. // When invoking the function, the symbol is first casted into a function pointer of the correct type. // This way, the same typing context as in the original source is used. // // The example above would be translated into the following : // // ```c++ // struct rustcxx_XXXXXXXX; // extern "C" rustcxx_XXXXXXXX rustcxx_XXXXXXXX; // // #include <stdint.h> // #include <stdio.h> // // void foo() { // uint32_t a = 3; // uint32_t double = ((uint32_t (*)(uint32_t a)) &rustcxx_XXXXXXXX)(a); // printf("double: ", a); // } // ``` impl Function<Rust> { pub fn cxx_decl<'a>(&self, _ecx: &'a ExtCtxt) -> PResult<'a, String> { Ok(format!("struct {}; extern \"C\" {} {};", self.name, self.name, self.name)) } pub fn cxx_call<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, String> { let ret_ty = try!(self.cxx_ret_ty(ecx)); let args_sig = try!(self.cxx_args(ecx)); let arg_separator = respan(DUMMY_SP, String::from(",")); let args_value = self.args.iter().map(|arg| { arg.value.clone() }).collect::<Vec<_>>().join(&arg_separator); let cast_ty = format!("{} (*) ({})", ret_ty, args_sig); let fn_ptr = format!("( ({}) &{} )", cast_ty, self.name); let call = format!("{} ({})", fn_ptr, tokens_to_cpp(ecx, &args_value)); Ok(call) } pub fn item<'a>(&self, ecx: &'a ExtCtxt) -> P<ast::Item> { let decl = self.fn_decl(ecx); // Function has to be no_mangle, otherwise it can't be called from C++ let no_mangle = ecx.meta_word(self.span, token::intern("no_mangle").as_str()); // The function has to be exported or it would be optimized out by the compiler. // The compiler already prints an error, but it is easy to miss, so make it a hard error. let deny = ecx.meta_list( self.span, token::intern("deny").as_str(), vec![ecx.meta_word(self.span, token::intern("private_no_mangle_fns").as_str())]); let attrs = vec![ ecx.attribute(self.span, no_mangle), ecx.attribute(self.span, deny), ]; let fn_item = ast::ItemKind::Fn( decl, ast::Unsafety::Unsafe, ast::Constness::NotConst, Abi::C, ast::Generics::default(), self.body.clone()); P(ast::Item { ident: self.name, attrs: attrs, id: ast::DUMMY_NODE_ID, node: fn_item, vis: ast::Visibility::Public, span: self.span, }) } } /// Find and replace uses of rust![.. ] in a token tree stream. /// /// The callback is invoked for every use of the rust! macro and it's result is used to replace it. pub fn parse_rust_macro<F>(tts: &[TokenTree], f: &mut F) -> Vec<Spanned<String>> where F: FnMut(Span, &[TokenTree]) -> Vec<Spanned<String>> { let mut result = Vec::new(); // Iterate over the tokens with 3 tokens of lookahead. let mut i = 0; loop { match (tts.get(i), tts.get(i+1), tts.get(i+2)) { (Some(&TokenTree::Token(_, token::Ident(ident))), Some(&TokenTree::Token(_, token::Not)), Some(&TokenTree::Delimited(span, ref contents))) if ident.name.to_string() == "rust" => { i += 2; result.extend(f(span, &contents.tts)); } (Some(&TokenTree::Delimited(_, ref contents)), _, _) => { // Recursively look into the token tree result.push(respan(contents.open_span, token_to_string(&contents.open_token()))); result.extend(parse_rust_macro(&contents.tts, f)); result.push(respan(contents.close_span, token_to_string(&contents.close_token()))); } (Some(&TokenTree::Token(span, ref tok)), _, _) => { result.push(respan(span, token_to_string(tok))); } (Some(&TokenTree::Sequence(..)), _, _) => unimplemented!(), (None, _, _) => break, } i += 1; } result } /// Flatten a token tree stream. /// /// Each token is stringified and paired with it's span. pub fn flatten_tts(tts: &[TokenTree]) -> Vec<Spanned<String>> { tts.iter().flat_map(|tt| { match tt { &TokenTree::Token(span, ref tok) => { vec![respan(span, token_to_string(tok))] } &TokenTree::Delimited(_, ref delimited) => { let open = respan(delimited.open_span, token_to_string(&delimited.open_token())); let close = respan(delimited.close_span, token_to_string(&delimited.close_token())); iter::once(open) .chain(flatten_tts(&delimited.tts)) .chain(iter::once(close)) .collect() } &TokenTree::Sequence(..) => unimplemented!() } }).collect() } /// Join tokens, using `#line` C preprocessor directives to maintain span /// information. pub fn tokens_to_cpp(ecx: &ExtCtxt, tokens: &[Spanned<String>]) -> String { let codemap = ecx.parse_sess.codemap(); let mut last_pos = codemap.lookup_char_pos(DUMMY_SP.lo); let mut column = 0; let mut contents = String::new(); for token in tokens { if token.span!= DUMMY_SP { let pos = codemap.lookup_char_pos(token.span.lo); if pos.file.name == pos.file.name && pos.line == last_pos.line + 1 { contents.push('\n'); column = 0; } else if pos.file.name!= pos.file.name || pos.line!= last_pos.line { contents.push('\n'); contents.push_str(&span_to_cpp_directive(ecx, token.span)); contents.push('\n'); column = 0; }
contents.push(' '); column += 1; } last_pos = pos; } column += token.node.len(); contents.push_str(&token.node); } return contents; } pub fn span_to_cpp_directive(ecx: &ExtCtxt, span: Span) -> String { let codemap = ecx.parse_sess.codemap(); let pos = codemap.lookup_char_pos(span.lo); format!("#line {} {:?}", pos.line, pos.file.name) }
// Pad the code such that the token remains on the same column while column < pos.col.0 {
random_line_split
lib.rs
// Copyright 2016 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #![crate_name="rustcxx_common"] #![feature(rustc_private, slice_patterns)] extern crate syntax; extern crate rustc; mod types; use std::borrow::Cow; use std::hash::{SipHasher, Hash, Hasher}; use std::iter; use syntax::abi::Abi; use syntax::ast::{self, DUMMY_NODE_ID}; use syntax::codemap::{Span, Spanned, respan, spanned, DUMMY_SP}; use syntax::errors::Handler; use syntax::ext::base::ExtCtxt; use syntax::ext::build::AstBuilder; use syntax::ext::quote::rt::ToTokens; use syntax::parse::{token, PResult}; use syntax::parse::common::SeqSep; use syntax::parse::parser::Parser; use syntax::print::pprust::{token_to_string, tts_to_string}; use syntax::ptr::P; use syntax::tokenstream::TokenTree; /// Language specific parsing. /// /// The two macros, `cxx!` and `rust!`, share a similar syntax. /// This trait differentiates the two, such that the rest of the parsing code can be reused. pub trait Lang { type Body: ToTokens; type ArgValue; fn parse_body<'a>(parser: &mut Parser<'a>) -> PResult<'a, Self::Body>; fn parse_arg_value<'a>(ecx: &ExtCtxt, parser: &mut Parser<'a>, ident: ast::SpannedIdent) -> PResult<'a, Self::ArgValue>; } pub enum Rust {} pub enum Cxx {} impl Lang for Rust { type Body = P<ast::Block>; type ArgValue = Vec<Spanned<String>>; fn parse_body<'a>(parser: &mut Parser<'a>) -> PResult<'a, Self::Body> { parser.parse_block() } fn parse_arg_value<'a>(_ecx: &ExtCtxt, parser: &mut Parser<'a>, ident: ast::SpannedIdent) -> PResult<'a, Self::ArgValue> { if parser.eat(&token::Eq) { let mut tokens = Vec::new(); while!parser.check(&token::Comma) && !parser.check(&token::CloseDelim(token::Paren)) { tokens.push(try!(parser.parse_token_tree())); } Ok(flatten_tts(&tokens)) } else { Ok(vec![respan(ident.span, ident.node.to_string())]) } } } impl Lang for Cxx { type Body = Vec<TokenTree>; type ArgValue = P<ast::Expr>; fn parse_body<'a>(parser: &mut Parser<'a>) -> PResult<'a, Self::Body> { try!(parser.expect(&token::OpenDelim(token::Brace))); parser.parse_seq_to_end( &token::CloseDelim(token::Brace), SeqSep::none(), |parser| parser.parse_token_tree()) } fn parse_arg_value<'a>(ecx: &ExtCtxt, parser: &mut Parser<'a>, ident: ast::SpannedIdent) -> PResult<'a, Self::ArgValue> { if parser.eat(&token::Eq) { parser.parse_expr() } else { Ok(ecx.expr_ident(ident.span, ident.node)) } } } pub struct Function<L: Lang> { pub span: Span, pub name: ast::Ident, pub ret_ty: Option<P<ast::Ty>>, pub args: Vec<ArgSpec<L>>, pub body: L::Body, } impl <L: Lang> Function<L> { pub fn
<'a>(ecx: &ExtCtxt<'a>, span: Span, tts: &[TokenTree]) -> PResult<'a, Function<L>> { let mut parser = ecx.new_parser_from_tts(tts); let args = if parser.check(&token::OpenDelim(token::Paren)) { Some(try!(Self::parse_args(ecx, &mut parser))) } else { None }; let ret_ty = if args.is_some() && parser.check(&token::RArrow) { Some(try!(Self::parse_ret_ty(&mut parser))) } else { None }; let body = try!(L::parse_body(&mut parser)); let hash = { let mut hasher = SipHasher::new(); tts_to_string(tts).hash(&mut hasher); hasher.finish() }; let name = ecx.ident_of(&format!("rustcxx_{:016x}", hash)); Ok(Function { span: span, name: name, ret_ty: ret_ty, args: args.unwrap_or_else(|| Vec::new()), body: body, }) } fn parse_args<'a>(ecx: &ExtCtxt, parser: &mut Parser<'a>) -> PResult<'a, Vec<ArgSpec<L>>> { parser.parse_unspanned_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), SeqSep::trailing_allowed(token::Comma), |parser| ArgSpec::parse(ecx, parser)) } fn parse_ret_ty<'a>(parser: &mut Parser<'a>) -> PResult<'a, P<ast::Ty>> { try!(parser.expect(&token::RArrow)); parser.parse_ty() } pub fn fn_decl(&self, ecx: &ExtCtxt) -> P<ast::FnDecl> { let args = self.args.iter().map(|arg| { ecx.arg(arg.ident.span, arg.ident.node, arg.ty.clone()) }).collect(); let ret_ty = self.ret_ty.clone() .map(ast::FunctionRetTy::Ty) .unwrap_or(ast::FunctionRetTy::Default(DUMMY_SP)); P(ast::FnDecl { inputs: args, output: ret_ty, variadic: false }) } pub fn foreign_item(&self, ecx: &ExtCtxt) -> ast::ForeignItem { let fn_decl = self.fn_decl(ecx); ast::ForeignItem { id: DUMMY_NODE_ID, ident: self.name, attrs: Vec::new(), node: ast::ForeignItemKind::Fn(fn_decl, ast::Generics::default()), vis: ast::Visibility::Inherited, span: self.span, } } pub fn cxx_args<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, String> { let args = try!(self.args.iter().map(|arg| { let ty = try!(arg.cxx_type(&ecx.parse_sess.span_diagnostic)); Ok(format!("{} const {}", ty, arg.ident.node)) }).collect::<PResult<Vec<String>>>()); Ok(args.join(", ")) } pub fn cxx_ret_ty<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, Cow<'static, str>> { self.ret_ty.as_ref().map(|ty| { types::convert_ty_to_cxx(&ecx.parse_sess.span_diagnostic, &ty) }).unwrap_or(Ok(Cow::from("void"))) } } #[derive(Debug)] pub struct ArgSpec<L: Lang> { pub ident: ast::SpannedIdent, pub ty: P<ast::Ty>, pub value: L::ArgValue, } impl <L: Lang> ArgSpec<L> { pub fn parse<'a>(ecx: &ExtCtxt, parser: &mut Parser<'a>) -> PResult<'a, ArgSpec<L>> { let ident = { let lo = parser.span.lo; let ident = try!(parser.parse_ident()); let hi = parser.span.lo; spanned(lo, hi, ident) }; try!(parser.expect(&token::Colon)); let ty = try!(parser.parse_ty()); let value = try!(L::parse_arg_value(ecx, parser, ident)); Ok(ArgSpec { ident: ident, ty: ty, value: value, }) } pub fn cxx_type<'a>(&self, handler: &'a Handler) -> PResult<'a, Cow<'static, str>> { types::convert_ty_to_cxx(handler, &self.ty) } } impl Function<Cxx> { pub fn call_expr<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, P<ast::Expr>> { let name = self.name.clone(); let args = self.args.iter().map(|arg| arg.value.clone()).collect(); Ok(ecx.expr_call_ident(self.span, name, args)) } pub fn cxx_code<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, String> { let ret_ty = try!(self.cxx_ret_ty(ecx)); let args = try!(self.cxx_args(ecx)); let signature = format!( "{span}\nextern \"C\" {ret_ty} {name}({args})", span = span_to_cpp_directive(ecx, self.span), ret_ty = ret_ty, name = self.name, args = args); let mut body = tokens_to_cpp(ecx, &flatten_tts(&self.body)); if self.ret_ty.is_some() { body = format!("return ({{\n{};\n}});", body); } Ok(format!("{} {{\n{}\n}}\n", signature, body)) } } // Calling rust from C++ is a bit trickier. // We must declare the function before it can be used. // However C++ requires the function to be declared outside the current function, but then we may // miss type definitions which are in scope due to being in a namespace, or some includes. // // For example : // ```c++ // #include <stdint.h> // #include <stdio.h> // // void foo() { // uint32_t a = 3; // uint32_t double = rust![(a: uint32_t) -> uint32_t { // a * 2 // }]; // printf("double: ", a); // } // ``` // // Declaring the extern function before the includes would not work, as uint32_t is not defined at // this point. Finding the right place to declare it would be complicated and would almost require // a full C++ parser. // // Instead we use an alternative approach. The function's symbol is declared with an opaque type at // the top of the file. This does not require argument types to be in scope. // When invoking the function, the symbol is first casted into a function pointer of the correct type. // This way, the same typing context as in the original source is used. // // The example above would be translated into the following : // // ```c++ // struct rustcxx_XXXXXXXX; // extern "C" rustcxx_XXXXXXXX rustcxx_XXXXXXXX; // // #include <stdint.h> // #include <stdio.h> // // void foo() { // uint32_t a = 3; // uint32_t double = ((uint32_t (*)(uint32_t a)) &rustcxx_XXXXXXXX)(a); // printf("double: ", a); // } // ``` impl Function<Rust> { pub fn cxx_decl<'a>(&self, _ecx: &'a ExtCtxt) -> PResult<'a, String> { Ok(format!("struct {}; extern \"C\" {} {};", self.name, self.name, self.name)) } pub fn cxx_call<'a>(&self, ecx: &'a ExtCtxt) -> PResult<'a, String> { let ret_ty = try!(self.cxx_ret_ty(ecx)); let args_sig = try!(self.cxx_args(ecx)); let arg_separator = respan(DUMMY_SP, String::from(",")); let args_value = self.args.iter().map(|arg| { arg.value.clone() }).collect::<Vec<_>>().join(&arg_separator); let cast_ty = format!("{} (*) ({})", ret_ty, args_sig); let fn_ptr = format!("( ({}) &{} )", cast_ty, self.name); let call = format!("{} ({})", fn_ptr, tokens_to_cpp(ecx, &args_value)); Ok(call) } pub fn item<'a>(&self, ecx: &'a ExtCtxt) -> P<ast::Item> { let decl = self.fn_decl(ecx); // Function has to be no_mangle, otherwise it can't be called from C++ let no_mangle = ecx.meta_word(self.span, token::intern("no_mangle").as_str()); // The function has to be exported or it would be optimized out by the compiler. // The compiler already prints an error, but it is easy to miss, so make it a hard error. let deny = ecx.meta_list( self.span, token::intern("deny").as_str(), vec![ecx.meta_word(self.span, token::intern("private_no_mangle_fns").as_str())]); let attrs = vec![ ecx.attribute(self.span, no_mangle), ecx.attribute(self.span, deny), ]; let fn_item = ast::ItemKind::Fn( decl, ast::Unsafety::Unsafe, ast::Constness::NotConst, Abi::C, ast::Generics::default(), self.body.clone()); P(ast::Item { ident: self.name, attrs: attrs, id: ast::DUMMY_NODE_ID, node: fn_item, vis: ast::Visibility::Public, span: self.span, }) } } /// Find and replace uses of rust![.. ] in a token tree stream. /// /// The callback is invoked for every use of the rust! macro and it's result is used to replace it. pub fn parse_rust_macro<F>(tts: &[TokenTree], f: &mut F) -> Vec<Spanned<String>> where F: FnMut(Span, &[TokenTree]) -> Vec<Spanned<String>> { let mut result = Vec::new(); // Iterate over the tokens with 3 tokens of lookahead. let mut i = 0; loop { match (tts.get(i), tts.get(i+1), tts.get(i+2)) { (Some(&TokenTree::Token(_, token::Ident(ident))), Some(&TokenTree::Token(_, token::Not)), Some(&TokenTree::Delimited(span, ref contents))) if ident.name.to_string() == "rust" => { i += 2; result.extend(f(span, &contents.tts)); } (Some(&TokenTree::Delimited(_, ref contents)), _, _) => { // Recursively look into the token tree result.push(respan(contents.open_span, token_to_string(&contents.open_token()))); result.extend(parse_rust_macro(&contents.tts, f)); result.push(respan(contents.close_span, token_to_string(&contents.close_token()))); } (Some(&TokenTree::Token(span, ref tok)), _, _) => { result.push(respan(span, token_to_string(tok))); } (Some(&TokenTree::Sequence(..)), _, _) => unimplemented!(), (None, _, _) => break, } i += 1; } result } /// Flatten a token tree stream. /// /// Each token is stringified and paired with it's span. pub fn flatten_tts(tts: &[TokenTree]) -> Vec<Spanned<String>> { tts.iter().flat_map(|tt| { match tt { &TokenTree::Token(span, ref tok) => { vec![respan(span, token_to_string(tok))] } &TokenTree::Delimited(_, ref delimited) => { let open = respan(delimited.open_span, token_to_string(&delimited.open_token())); let close = respan(delimited.close_span, token_to_string(&delimited.close_token())); iter::once(open) .chain(flatten_tts(&delimited.tts)) .chain(iter::once(close)) .collect() } &TokenTree::Sequence(..) => unimplemented!() } }).collect() } /// Join tokens, using `#line` C preprocessor directives to maintain span /// information. pub fn tokens_to_cpp(ecx: &ExtCtxt, tokens: &[Spanned<String>]) -> String { let codemap = ecx.parse_sess.codemap(); let mut last_pos = codemap.lookup_char_pos(DUMMY_SP.lo); let mut column = 0; let mut contents = String::new(); for token in tokens { if token.span!= DUMMY_SP { let pos = codemap.lookup_char_pos(token.span.lo); if pos.file.name == pos.file.name && pos.line == last_pos.line + 1 { contents.push('\n'); column = 0; } else if pos.file.name!= pos.file.name || pos.line!= last_pos.line { contents.push('\n'); contents.push_str(&span_to_cpp_directive(ecx, token.span)); contents.push('\n'); column = 0; } // Pad the code such that the token remains on the same column while column < pos.col.0 { contents.push(' '); column += 1; } last_pos = pos; } column += token.node.len(); contents.push_str(&token.node); } return contents; } pub fn span_to_cpp_directive(ecx: &ExtCtxt, span: Span) -> String { let codemap = ecx.parse_sess.codemap(); let pos = codemap.lookup_char_pos(span.lo); format!("#line {} {:?}", pos.line, pos.file.name) }
parse
identifier_name
server.rs
use std::thread; use std::sync::mpsc; use websocket; use websocket::{Message, Sender, Receiver}; use websocket::server::sender; use websocket::stream::WebSocketStream; use websocket::message::CloseData; use std::io::prelude::*; use std::fs::{OpenOptions, File}; use std::net::Shutdown; use rustc_serialize::json::{Json, ToJson}; use cbor; use hyper::header::Cookie; use value::Value; use relation::Change; use flow::{Changes, Flow}; use client; pub trait FromJson { fn from_json(json: &Json) -> Self; } impl ToJson for Value { fn to_json(&self) -> Json { match *self { Value::Null => panic!("Cannot allow the client to see nulls"), Value::Bool(bool) => Json::Boolean(bool), Value::String(ref string) => Json::String(string.clone()), Value::Float(float) => Json::F64(float), Value::Column(ref column) => Json::Array(column.iter().map(|v| v.to_json()).collect()), } } } impl FromJson for Value { fn from_json(json: &Json) -> Self { match *json { Json::Boolean(bool) => Value::Bool(bool), Json::String(ref string) => Value::String(string.clone()), Json::F64(float) => Value::Float(float), Json::I64(int) => Value::Float(int as f64), Json::U64(uint) => Value::Float(uint as f64), Json::Array(ref array) => Value::Column(array.iter().map(FromJson::from_json).collect()), _ => panic!("Cannot decode {:?} as Value", json), } } } impl FromJson for String { fn
(json: &Json) -> Self { json.as_string().unwrap().to_owned() } } impl<T: FromJson> FromJson for Vec<T> { fn from_json(json: &Json) -> Self { json.as_array().unwrap().iter().map(|t| FromJson::from_json(t)).collect() } } #[derive(Debug, Clone)] pub struct Event { pub changes: Changes, pub session: String, } impl ToJson for Event { fn to_json(&self) -> Json { Json::Object(vec![ ("changes".to_string(), Json::Array( self.changes.iter().map(|&(ref view_id, ref view_changes)| { Json::Array(vec![ view_id.to_json(), view_changes.fields.to_json(), view_changes.insert.to_json(), view_changes.remove.to_json(), ]) }).collect() ) ), ("session".to_string(), self.session.to_json()), ].into_iter().collect()) } } impl FromJson for Event { fn from_json(json: &Json) -> Self { Event{ changes: json.as_object().unwrap()["changes"] .as_array().unwrap().iter().map(|change| { let change = change.as_array().unwrap(); assert_eq!(change.len(), 4); let view_id = FromJson::from_json(&change[0]); let fields = FromJson::from_json(&change[1]); let insert = FromJson::from_json(&change[2]); let remove = FromJson::from_json(&change[3]); (view_id, Change{fields:fields, insert: insert, remove: remove}) }).collect(), session: "".to_string(), } } } pub enum ServerEvent { Change(Vec<u8>), Sync((sender::Sender<WebSocketStream>,Option<String>)), Terminate(Option<CloseData>), } // TODO holy crap why is everything blocking? this is a mess pub fn server_events() -> mpsc::Receiver<ServerEvent> { let (event_sender, event_receiver) = mpsc::channel(); thread::spawn(move || { let server = websocket::Server::bind("0.0.0.0:2794").unwrap(); for connection in server { let event_sender = event_sender.clone(); thread::spawn(move || { // accept request let request = connection.unwrap().read_request().unwrap(); request.validate().unwrap(); // Get the User ID from a cookie in the headers let user_id = get_user_id(request.headers.get::<Cookie>()); let response = request.accept(); let (mut sender, mut receiver) = response.send().unwrap().split(); let ip = sender.get_mut().peer_addr().unwrap(); println!("Connection from {}", ip); ::std::io::stdout().flush().unwrap(); // TODO is this actually necessary? // hand over sender event_sender.send(ServerEvent::Sync((sender,user_id))).unwrap(); // handle messages for message in receiver.incoming_messages() { let message = match message { Ok(m) => m, Err(_) => return, }; match message { Message::Binary(bytes) => { event_sender.send(ServerEvent::Change(bytes)).unwrap(); } Message::Close(_) => { let ip_addr = format!("{}", ip); println!("Received close message from {}.",ip_addr); let close_message = CloseData{status_code: 0, reason: ip_addr}; event_sender.send(ServerEvent::Terminate(Some(close_message))).unwrap(); } _ => println!("Unknown message: {:?}", message) } } }); } }); event_receiver } pub fn load(flow: &mut Flow, filename: &str) { let mut events = OpenOptions::new().create(true).open(filename).unwrap(); let mut old_events = String::new(); events.read_to_string(&mut old_events).unwrap(); for line in old_events.lines() { let json = Json::from_str(&line).unwrap(); let event: Event = FromJson::from_json(&json); flow.quiesce(event.changes); } } pub struct Server { pub flow: Flow, pub events: File, pub senders: Vec<sender::Sender<WebSocketStream>>, } pub fn handle_event(server: &mut Server, event: Event, event_json: Json) { server.events.write_all(format!("{}", event_json).as_bytes()).unwrap(); server.events.write_all("\n".as_bytes()).unwrap(); server.events.flush().unwrap(); let old_flow = time!("cloning", { server.flow.clone() }); server.flow.quiesce(event.changes); let changes = time!("diffing", { server.flow.changes_from(old_flow) }); for sender in server.senders.iter_mut() { let session_id = format!("{}", sender.get_mut().peer_addr().unwrap()); let text = format!("{}", Event{changes: changes.clone(), session: session_id}.to_json()); match sender.send_message(Message::Text(text)) { Ok(_) => (), Err(error) => println!("Send error: {}", error), }; } } pub fn run() { let mut flow = Flow::new(); time!("reading saved state", { load(&mut flow, "./bootstrap"); load(&mut flow, "./events"); }); let events = OpenOptions::new().write(true).append(true).open("./events").unwrap(); let senders: Vec<sender::Sender<WebSocketStream>> = Vec::new(); let mut server = Server{flow: flow, events: events, senders: senders}; for server_event in server_events() { match server_event { ServerEvent::Sync((mut sender,user_id)) => { // Add a session to the session table let session_id = format!("{}", sender.get_mut().peer_addr().unwrap()); let mut add_session = client::insert_fact(&"sessions",&vec!["id","status"],&vec![Value::String(session_id.clone()), Value::Float(1f64) ],None); // If we have a user ID, add a mapping from the session ID to the user ID add_session = match user_id { Some(user_id) => { client::insert_fact(&"session id to user id",&vec!["session id","user id"],&vec![Value::String(session_id.clone()), Value::String(user_id), ],Some(add_session)) }, None => add_session, }; let json = add_session.to_json(); handle_event(&mut server, add_session, json); let changes = server.flow.as_changes(); let text = format!("{}", Event{changes: changes, session: session_id}.to_json()); match sender.send_message(Message::Text(text)) { Ok(_) => (), Err(error) => println!("Send error: {}", error), }; server.senders.push(sender) } ServerEvent::Change(input_bytes) => { // TODO we throw cbor in here to avoid https://github.com/rust-lang/rustc-serialize/issues/113 let mut decoder = cbor::Decoder::from_bytes(&input_bytes[..]); let cbor = decoder.items().next().unwrap().unwrap(); let json = cbor.to_json(); let event = FromJson::from_json(&json); handle_event(&mut server, event, json); } ServerEvent::Terminate(m) => { let terminate_ip = m.unwrap().reason; println!("Closing connection from {}...",terminate_ip); // Find the index of the connection's sender let ip_ix = server.senders.iter_mut().position(|mut sender| { let ip = format!("{}",sender.get_mut().peer_addr().unwrap()); ip == terminate_ip }); // Properly clean up connections and the session table match ip_ix { Some(ix) => { // Close the connection let _ = server.senders[ix].send_message(Message::Close(None)); match server.senders[ix].get_mut().shutdown(Shutdown::Both) { Ok(_) => println!("Connection from {} has closed successfully.",terminate_ip), Err(e) => println!("Connection from {} failed to shut down properly: {}",terminate_ip,e), } server.senders.remove(ix); // Update the session table let sessions = server.flow.get_output("sessions").clone(); let ip_string = Value::String(terminate_ip.clone()); match sessions.find_maybe("id",&ip_string) { Some(session) => { let closed_session = session.clone(); let mut close_session_values = &mut closed_session.values.to_vec(); let status_ix = match closed_session.names.iter().position(|name| name == "status") { Some(ix) => ix, None => panic!("No field named \"status\""), }; close_session_values[status_ix] = Value::Float(0f64); let change = Change { fields: sessions.fields.clone(), insert: vec![close_session_values.clone()], remove: vec![session.values.to_vec().clone()], }; let event = Event{changes: vec![("sessions".to_string(),change)], session: "".to_string()}; let json = event.to_json(); handle_event(&mut server, event, json); }, None => println!("No session found"), } }, None => panic!("IP address {} is not connected",terminate_ip), } } } } } pub fn get_user_id(cookies: Option<&Cookie>) -> Option<String> { match cookies { Some(cookies) => { match cookies.iter().find(|cookie| cookie.name == "userid") { Some(user_id) => Some(user_id.value.clone()), None => None, } }, None => None, } }
from_json
identifier_name
server.rs
use std::thread; use std::sync::mpsc; use websocket; use websocket::{Message, Sender, Receiver}; use websocket::server::sender; use websocket::stream::WebSocketStream; use websocket::message::CloseData; use std::io::prelude::*; use std::fs::{OpenOptions, File}; use std::net::Shutdown; use rustc_serialize::json::{Json, ToJson}; use cbor; use hyper::header::Cookie; use value::Value; use relation::Change; use flow::{Changes, Flow}; use client; pub trait FromJson { fn from_json(json: &Json) -> Self; } impl ToJson for Value { fn to_json(&self) -> Json { match *self { Value::Null => panic!("Cannot allow the client to see nulls"), Value::Bool(bool) => Json::Boolean(bool), Value::String(ref string) => Json::String(string.clone()), Value::Float(float) => Json::F64(float), Value::Column(ref column) => Json::Array(column.iter().map(|v| v.to_json()).collect()), } } } impl FromJson for Value { fn from_json(json: &Json) -> Self { match *json { Json::Boolean(bool) => Value::Bool(bool), Json::String(ref string) => Value::String(string.clone()), Json::F64(float) => Value::Float(float), Json::I64(int) => Value::Float(int as f64), Json::U64(uint) => Value::Float(uint as f64), Json::Array(ref array) => Value::Column(array.iter().map(FromJson::from_json).collect()), _ => panic!("Cannot decode {:?} as Value", json), } } } impl FromJson for String { fn from_json(json: &Json) -> Self { json.as_string().unwrap().to_owned() } } impl<T: FromJson> FromJson for Vec<T> { fn from_json(json: &Json) -> Self
} #[derive(Debug, Clone)] pub struct Event { pub changes: Changes, pub session: String, } impl ToJson for Event { fn to_json(&self) -> Json { Json::Object(vec![ ("changes".to_string(), Json::Array( self.changes.iter().map(|&(ref view_id, ref view_changes)| { Json::Array(vec![ view_id.to_json(), view_changes.fields.to_json(), view_changes.insert.to_json(), view_changes.remove.to_json(), ]) }).collect() ) ), ("session".to_string(), self.session.to_json()), ].into_iter().collect()) } } impl FromJson for Event { fn from_json(json: &Json) -> Self { Event{ changes: json.as_object().unwrap()["changes"] .as_array().unwrap().iter().map(|change| { let change = change.as_array().unwrap(); assert_eq!(change.len(), 4); let view_id = FromJson::from_json(&change[0]); let fields = FromJson::from_json(&change[1]); let insert = FromJson::from_json(&change[2]); let remove = FromJson::from_json(&change[3]); (view_id, Change{fields:fields, insert: insert, remove: remove}) }).collect(), session: "".to_string(), } } } pub enum ServerEvent { Change(Vec<u8>), Sync((sender::Sender<WebSocketStream>,Option<String>)), Terminate(Option<CloseData>), } // TODO holy crap why is everything blocking? this is a mess pub fn server_events() -> mpsc::Receiver<ServerEvent> { let (event_sender, event_receiver) = mpsc::channel(); thread::spawn(move || { let server = websocket::Server::bind("0.0.0.0:2794").unwrap(); for connection in server { let event_sender = event_sender.clone(); thread::spawn(move || { // accept request let request = connection.unwrap().read_request().unwrap(); request.validate().unwrap(); // Get the User ID from a cookie in the headers let user_id = get_user_id(request.headers.get::<Cookie>()); let response = request.accept(); let (mut sender, mut receiver) = response.send().unwrap().split(); let ip = sender.get_mut().peer_addr().unwrap(); println!("Connection from {}", ip); ::std::io::stdout().flush().unwrap(); // TODO is this actually necessary? // hand over sender event_sender.send(ServerEvent::Sync((sender,user_id))).unwrap(); // handle messages for message in receiver.incoming_messages() { let message = match message { Ok(m) => m, Err(_) => return, }; match message { Message::Binary(bytes) => { event_sender.send(ServerEvent::Change(bytes)).unwrap(); } Message::Close(_) => { let ip_addr = format!("{}", ip); println!("Received close message from {}.",ip_addr); let close_message = CloseData{status_code: 0, reason: ip_addr}; event_sender.send(ServerEvent::Terminate(Some(close_message))).unwrap(); } _ => println!("Unknown message: {:?}", message) } } }); } }); event_receiver } pub fn load(flow: &mut Flow, filename: &str) { let mut events = OpenOptions::new().create(true).open(filename).unwrap(); let mut old_events = String::new(); events.read_to_string(&mut old_events).unwrap(); for line in old_events.lines() { let json = Json::from_str(&line).unwrap(); let event: Event = FromJson::from_json(&json); flow.quiesce(event.changes); } } pub struct Server { pub flow: Flow, pub events: File, pub senders: Vec<sender::Sender<WebSocketStream>>, } pub fn handle_event(server: &mut Server, event: Event, event_json: Json) { server.events.write_all(format!("{}", event_json).as_bytes()).unwrap(); server.events.write_all("\n".as_bytes()).unwrap(); server.events.flush().unwrap(); let old_flow = time!("cloning", { server.flow.clone() }); server.flow.quiesce(event.changes); let changes = time!("diffing", { server.flow.changes_from(old_flow) }); for sender in server.senders.iter_mut() { let session_id = format!("{}", sender.get_mut().peer_addr().unwrap()); let text = format!("{}", Event{changes: changes.clone(), session: session_id}.to_json()); match sender.send_message(Message::Text(text)) { Ok(_) => (), Err(error) => println!("Send error: {}", error), }; } } pub fn run() { let mut flow = Flow::new(); time!("reading saved state", { load(&mut flow, "./bootstrap"); load(&mut flow, "./events"); }); let events = OpenOptions::new().write(true).append(true).open("./events").unwrap(); let senders: Vec<sender::Sender<WebSocketStream>> = Vec::new(); let mut server = Server{flow: flow, events: events, senders: senders}; for server_event in server_events() { match server_event { ServerEvent::Sync((mut sender,user_id)) => { // Add a session to the session table let session_id = format!("{}", sender.get_mut().peer_addr().unwrap()); let mut add_session = client::insert_fact(&"sessions",&vec!["id","status"],&vec![Value::String(session_id.clone()), Value::Float(1f64) ],None); // If we have a user ID, add a mapping from the session ID to the user ID add_session = match user_id { Some(user_id) => { client::insert_fact(&"session id to user id",&vec!["session id","user id"],&vec![Value::String(session_id.clone()), Value::String(user_id), ],Some(add_session)) }, None => add_session, }; let json = add_session.to_json(); handle_event(&mut server, add_session, json); let changes = server.flow.as_changes(); let text = format!("{}", Event{changes: changes, session: session_id}.to_json()); match sender.send_message(Message::Text(text)) { Ok(_) => (), Err(error) => println!("Send error: {}", error), }; server.senders.push(sender) } ServerEvent::Change(input_bytes) => { // TODO we throw cbor in here to avoid https://github.com/rust-lang/rustc-serialize/issues/113 let mut decoder = cbor::Decoder::from_bytes(&input_bytes[..]); let cbor = decoder.items().next().unwrap().unwrap(); let json = cbor.to_json(); let event = FromJson::from_json(&json); handle_event(&mut server, event, json); } ServerEvent::Terminate(m) => { let terminate_ip = m.unwrap().reason; println!("Closing connection from {}...",terminate_ip); // Find the index of the connection's sender let ip_ix = server.senders.iter_mut().position(|mut sender| { let ip = format!("{}",sender.get_mut().peer_addr().unwrap()); ip == terminate_ip }); // Properly clean up connections and the session table match ip_ix { Some(ix) => { // Close the connection let _ = server.senders[ix].send_message(Message::Close(None)); match server.senders[ix].get_mut().shutdown(Shutdown::Both) { Ok(_) => println!("Connection from {} has closed successfully.",terminate_ip), Err(e) => println!("Connection from {} failed to shut down properly: {}",terminate_ip,e), } server.senders.remove(ix); // Update the session table let sessions = server.flow.get_output("sessions").clone(); let ip_string = Value::String(terminate_ip.clone()); match sessions.find_maybe("id",&ip_string) { Some(session) => { let closed_session = session.clone(); let mut close_session_values = &mut closed_session.values.to_vec(); let status_ix = match closed_session.names.iter().position(|name| name == "status") { Some(ix) => ix, None => panic!("No field named \"status\""), }; close_session_values[status_ix] = Value::Float(0f64); let change = Change { fields: sessions.fields.clone(), insert: vec![close_session_values.clone()], remove: vec![session.values.to_vec().clone()], }; let event = Event{changes: vec![("sessions".to_string(),change)], session: "".to_string()}; let json = event.to_json(); handle_event(&mut server, event, json); }, None => println!("No session found"), } }, None => panic!("IP address {} is not connected",terminate_ip), } } } } } pub fn get_user_id(cookies: Option<&Cookie>) -> Option<String> { match cookies { Some(cookies) => { match cookies.iter().find(|cookie| cookie.name == "userid") { Some(user_id) => Some(user_id.value.clone()), None => None, } }, None => None, } }
{ json.as_array().unwrap().iter().map(|t| FromJson::from_json(t)).collect() }
identifier_body
server.rs
use std::thread; use std::sync::mpsc; use websocket; use websocket::{Message, Sender, Receiver}; use websocket::server::sender; use websocket::stream::WebSocketStream; use websocket::message::CloseData; use std::io::prelude::*; use std::fs::{OpenOptions, File}; use std::net::Shutdown; use rustc_serialize::json::{Json, ToJson}; use cbor; use hyper::header::Cookie; use value::Value; use relation::Change; use flow::{Changes, Flow}; use client; pub trait FromJson { fn from_json(json: &Json) -> Self; } impl ToJson for Value { fn to_json(&self) -> Json { match *self { Value::Null => panic!("Cannot allow the client to see nulls"), Value::Bool(bool) => Json::Boolean(bool), Value::String(ref string) => Json::String(string.clone()), Value::Float(float) => Json::F64(float), Value::Column(ref column) => Json::Array(column.iter().map(|v| v.to_json()).collect()), } } } impl FromJson for Value { fn from_json(json: &Json) -> Self { match *json { Json::Boolean(bool) => Value::Bool(bool), Json::String(ref string) => Value::String(string.clone()), Json::F64(float) => Value::Float(float), Json::I64(int) => Value::Float(int as f64), Json::U64(uint) => Value::Float(uint as f64), Json::Array(ref array) => Value::Column(array.iter().map(FromJson::from_json).collect()), _ => panic!("Cannot decode {:?} as Value", json), } } } impl FromJson for String { fn from_json(json: &Json) -> Self { json.as_string().unwrap().to_owned() } } impl<T: FromJson> FromJson for Vec<T> { fn from_json(json: &Json) -> Self { json.as_array().unwrap().iter().map(|t| FromJson::from_json(t)).collect() } } #[derive(Debug, Clone)] pub struct Event { pub changes: Changes, pub session: String, } impl ToJson for Event { fn to_json(&self) -> Json {
("changes".to_string(), Json::Array( self.changes.iter().map(|&(ref view_id, ref view_changes)| { Json::Array(vec![ view_id.to_json(), view_changes.fields.to_json(), view_changes.insert.to_json(), view_changes.remove.to_json(), ]) }).collect() ) ), ("session".to_string(), self.session.to_json()), ].into_iter().collect()) } } impl FromJson for Event { fn from_json(json: &Json) -> Self { Event{ changes: json.as_object().unwrap()["changes"] .as_array().unwrap().iter().map(|change| { let change = change.as_array().unwrap(); assert_eq!(change.len(), 4); let view_id = FromJson::from_json(&change[0]); let fields = FromJson::from_json(&change[1]); let insert = FromJson::from_json(&change[2]); let remove = FromJson::from_json(&change[3]); (view_id, Change{fields:fields, insert: insert, remove: remove}) }).collect(), session: "".to_string(), } } } pub enum ServerEvent { Change(Vec<u8>), Sync((sender::Sender<WebSocketStream>,Option<String>)), Terminate(Option<CloseData>), } // TODO holy crap why is everything blocking? this is a mess pub fn server_events() -> mpsc::Receiver<ServerEvent> { let (event_sender, event_receiver) = mpsc::channel(); thread::spawn(move || { let server = websocket::Server::bind("0.0.0.0:2794").unwrap(); for connection in server { let event_sender = event_sender.clone(); thread::spawn(move || { // accept request let request = connection.unwrap().read_request().unwrap(); request.validate().unwrap(); // Get the User ID from a cookie in the headers let user_id = get_user_id(request.headers.get::<Cookie>()); let response = request.accept(); let (mut sender, mut receiver) = response.send().unwrap().split(); let ip = sender.get_mut().peer_addr().unwrap(); println!("Connection from {}", ip); ::std::io::stdout().flush().unwrap(); // TODO is this actually necessary? // hand over sender event_sender.send(ServerEvent::Sync((sender,user_id))).unwrap(); // handle messages for message in receiver.incoming_messages() { let message = match message { Ok(m) => m, Err(_) => return, }; match message { Message::Binary(bytes) => { event_sender.send(ServerEvent::Change(bytes)).unwrap(); } Message::Close(_) => { let ip_addr = format!("{}", ip); println!("Received close message from {}.",ip_addr); let close_message = CloseData{status_code: 0, reason: ip_addr}; event_sender.send(ServerEvent::Terminate(Some(close_message))).unwrap(); } _ => println!("Unknown message: {:?}", message) } } }); } }); event_receiver } pub fn load(flow: &mut Flow, filename: &str) { let mut events = OpenOptions::new().create(true).open(filename).unwrap(); let mut old_events = String::new(); events.read_to_string(&mut old_events).unwrap(); for line in old_events.lines() { let json = Json::from_str(&line).unwrap(); let event: Event = FromJson::from_json(&json); flow.quiesce(event.changes); } } pub struct Server { pub flow: Flow, pub events: File, pub senders: Vec<sender::Sender<WebSocketStream>>, } pub fn handle_event(server: &mut Server, event: Event, event_json: Json) { server.events.write_all(format!("{}", event_json).as_bytes()).unwrap(); server.events.write_all("\n".as_bytes()).unwrap(); server.events.flush().unwrap(); let old_flow = time!("cloning", { server.flow.clone() }); server.flow.quiesce(event.changes); let changes = time!("diffing", { server.flow.changes_from(old_flow) }); for sender in server.senders.iter_mut() { let session_id = format!("{}", sender.get_mut().peer_addr().unwrap()); let text = format!("{}", Event{changes: changes.clone(), session: session_id}.to_json()); match sender.send_message(Message::Text(text)) { Ok(_) => (), Err(error) => println!("Send error: {}", error), }; } } pub fn run() { let mut flow = Flow::new(); time!("reading saved state", { load(&mut flow, "./bootstrap"); load(&mut flow, "./events"); }); let events = OpenOptions::new().write(true).append(true).open("./events").unwrap(); let senders: Vec<sender::Sender<WebSocketStream>> = Vec::new(); let mut server = Server{flow: flow, events: events, senders: senders}; for server_event in server_events() { match server_event { ServerEvent::Sync((mut sender,user_id)) => { // Add a session to the session table let session_id = format!("{}", sender.get_mut().peer_addr().unwrap()); let mut add_session = client::insert_fact(&"sessions",&vec!["id","status"],&vec![Value::String(session_id.clone()), Value::Float(1f64) ],None); // If we have a user ID, add a mapping from the session ID to the user ID add_session = match user_id { Some(user_id) => { client::insert_fact(&"session id to user id",&vec!["session id","user id"],&vec![Value::String(session_id.clone()), Value::String(user_id), ],Some(add_session)) }, None => add_session, }; let json = add_session.to_json(); handle_event(&mut server, add_session, json); let changes = server.flow.as_changes(); let text = format!("{}", Event{changes: changes, session: session_id}.to_json()); match sender.send_message(Message::Text(text)) { Ok(_) => (), Err(error) => println!("Send error: {}", error), }; server.senders.push(sender) } ServerEvent::Change(input_bytes) => { // TODO we throw cbor in here to avoid https://github.com/rust-lang/rustc-serialize/issues/113 let mut decoder = cbor::Decoder::from_bytes(&input_bytes[..]); let cbor = decoder.items().next().unwrap().unwrap(); let json = cbor.to_json(); let event = FromJson::from_json(&json); handle_event(&mut server, event, json); } ServerEvent::Terminate(m) => { let terminate_ip = m.unwrap().reason; println!("Closing connection from {}...",terminate_ip); // Find the index of the connection's sender let ip_ix = server.senders.iter_mut().position(|mut sender| { let ip = format!("{}",sender.get_mut().peer_addr().unwrap()); ip == terminate_ip }); // Properly clean up connections and the session table match ip_ix { Some(ix) => { // Close the connection let _ = server.senders[ix].send_message(Message::Close(None)); match server.senders[ix].get_mut().shutdown(Shutdown::Both) { Ok(_) => println!("Connection from {} has closed successfully.",terminate_ip), Err(e) => println!("Connection from {} failed to shut down properly: {}",terminate_ip,e), } server.senders.remove(ix); // Update the session table let sessions = server.flow.get_output("sessions").clone(); let ip_string = Value::String(terminate_ip.clone()); match sessions.find_maybe("id",&ip_string) { Some(session) => { let closed_session = session.clone(); let mut close_session_values = &mut closed_session.values.to_vec(); let status_ix = match closed_session.names.iter().position(|name| name == "status") { Some(ix) => ix, None => panic!("No field named \"status\""), }; close_session_values[status_ix] = Value::Float(0f64); let change = Change { fields: sessions.fields.clone(), insert: vec![close_session_values.clone()], remove: vec![session.values.to_vec().clone()], }; let event = Event{changes: vec![("sessions".to_string(),change)], session: "".to_string()}; let json = event.to_json(); handle_event(&mut server, event, json); }, None => println!("No session found"), } }, None => panic!("IP address {} is not connected",terminate_ip), } } } } } pub fn get_user_id(cookies: Option<&Cookie>) -> Option<String> { match cookies { Some(cookies) => { match cookies.iter().find(|cookie| cookie.name == "userid") { Some(user_id) => Some(user_id.value.clone()), None => None, } }, None => None, } }
Json::Object(vec![
random_line_split
mpsc.rs
//! A multi-producer, single-consumer, futures-aware, FIFO queue with back //! pressure, for use communicating between tasks on the same thread. //! //! These queues are the same as those in `futures::sync`, except they're not //! intended to be sent across threads. use std::any::Any; use std::cell::RefCell; use std::collections::VecDeque; use std::error::Error; use std::fmt; use std::mem; use std::rc::{Rc, Weak}; use task::{self, Task}; use future::Executor; use sink::SendAll; use resultstream::{self, Results}; use unsync::oneshot; use {Async, AsyncSink, Future, Poll, StartSend, Sink, Stream}; /// Creates a bounded in-memory channel with buffered storage. /// /// This method creates concrete implementations of the `Stream` and `Sink` /// traits which can be used to communicate a stream of values between tasks /// with backpressure. The channel capacity is exactly `buffer`. On average, /// sending a message through this channel performs no dynamic allocation. pub fn channel<T>(buffer: usize) -> (Sender<T>, Receiver<T>) { channel_(Some(buffer)) } fn channel_<T>(buffer: Option<usize>) -> (Sender<T>, Receiver<T>) { let shared = Rc::new(RefCell::new(Shared { buffer: VecDeque::new(), capacity: buffer, blocked_senders: VecDeque::new(), blocked_recv: None, })); let sender = Sender { shared: Rc::downgrade(&shared) }; let receiver = Receiver { state: State::Open(shared) }; (sender, receiver) } #[derive(Debug)] struct Shared<T> { buffer: VecDeque<T>, capacity: Option<usize>, blocked_senders: VecDeque<Task>, blocked_recv: Option<Task>, } /// The transmission end of a channel. /// /// This is created by the `channel` function. #[derive(Debug)] pub struct Sender<T> { shared: Weak<RefCell<Shared<T>>>, } impl<T> Sender<T> { fn do_send(&self, msg: T) -> StartSend<T, SendError<T>> { let shared = match self.shared.upgrade() { Some(shared) => shared, None => return Err(SendError(msg)), // receiver was dropped }; let mut shared = shared.borrow_mut(); match shared.capacity { Some(capacity) if shared.buffer.len() == capacity => { shared.blocked_senders.push_back(task::current()); Ok(AsyncSink::NotReady(msg)) } _ => { shared.buffer.push_back(msg); if let Some(task) = shared.blocked_recv.take() { task.notify(); } Ok(AsyncSink::Ready) } } } } impl<T> Clone for Sender<T> { fn clone(&self) -> Self { Sender { shared: self.shared.clone() } } } impl<T> Sink for Sender<T> { type SinkItem = T; type SinkError = SendError<T>; fn
(&mut self, msg: T) -> StartSend<T, SendError<T>> { self.do_send(msg) } fn poll_complete(&mut self) -> Poll<(), SendError<T>> { Ok(Async::Ready(())) } fn close(&mut self) -> Poll<(), SendError<T>> { Ok(Async::Ready(())) } } impl<T> Drop for Sender<T> { fn drop(&mut self) { let shared = match self.shared.upgrade() { Some(shared) => shared, None => return, }; // The number of existing `Weak` indicates if we are possibly the last // `Sender`. If we are the last, we possibly must notify a blocked // `Receiver`. `self.shared` is always one of the `Weak` to this shared // data. Therefore the smallest possible Rc::weak_count(&shared) is 1. if Rc::weak_count(&shared) == 1 { if let Some(task) = shared.borrow_mut().blocked_recv.take() { // Wake up receiver as its stream has ended task.notify(); } } } } /// The receiving end of a channel which implements the `Stream` trait. /// /// This is created by the `channel` function. #[derive(Debug)] pub struct Receiver<T> { state: State<T>, } /// Possible states of a receiver. We're either Open (can receive more messages) /// or we're closed with a list of messages we have left to receive. #[derive(Debug)] enum State<T> { Open(Rc<RefCell<Shared<T>>>), Closed(VecDeque<T>), } impl<T> Receiver<T> { /// Closes the receiving half /// /// This prevents any further messages from being sent on the channel while /// still enabling the receiver to drain messages that are buffered. pub fn close(&mut self) { let (blockers, items) = match self.state { State::Open(ref state) => { let mut state = state.borrow_mut(); let items = mem::replace(&mut state.buffer, VecDeque::new()); let blockers = mem::replace(&mut state.blocked_senders, VecDeque::new()); (blockers, items) } State::Closed(_) => return, }; self.state = State::Closed(items); for task in blockers { task.notify(); } } } impl<T> Stream for Receiver<T> { type Item = T; type Error = (); fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { let me = match self.state { State::Open(ref mut me) => me, State::Closed(ref mut items) => { return Ok(Async::Ready(items.pop_front())) } }; if let Some(shared) = Rc::get_mut(me) { // All senders have been dropped, so drain the buffer and end the // stream. return Ok(Async::Ready(shared.borrow_mut().buffer.pop_front())); } let mut shared = me.borrow_mut(); if let Some(msg) = shared.buffer.pop_front() { if let Some(task) = shared.blocked_senders.pop_front() { drop(shared); task.notify(); } Ok(Async::Ready(Some(msg))) } else { shared.blocked_recv = Some(task::current()); Ok(Async::NotReady) } } } impl<T> Drop for Receiver<T> { fn drop(&mut self) { self.close(); } } /// The transmission end of an unbounded channel. /// /// This is created by the `unbounded` function. #[derive(Debug)] pub struct UnboundedSender<T>(Sender<T>); impl<T> Clone for UnboundedSender<T> { fn clone(&self) -> Self { UnboundedSender(self.0.clone()) } } impl<T> Sink for UnboundedSender<T> { type SinkItem = T; type SinkError = SendError<T>; fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> { self.0.start_send(msg) } fn poll_complete(&mut self) -> Poll<(), SendError<T>> { Ok(Async::Ready(())) } fn close(&mut self) -> Poll<(), SendError<T>> { Ok(Async::Ready(())) } } impl<'a, T> Sink for &'a UnboundedSender<T> { type SinkItem = T; type SinkError = SendError<T>; fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> { self.0.do_send(msg) } fn poll_complete(&mut self) -> Poll<(), SendError<T>> { Ok(Async::Ready(())) } fn close(&mut self) -> Poll<(), SendError<T>> { Ok(Async::Ready(())) } } impl<T> UnboundedSender<T> { /// Sends the provided message along this channel. /// /// This is an unbounded sender, so this function differs from `Sink::send` /// by ensuring the return type reflects that the channel is always ready to /// receive messages. #[deprecated(note = "renamed to `unbounded_send`")] #[doc(hidden)] pub fn send(&self, msg: T) -> Result<(), SendError<T>> { self.unbounded_send(msg) } /// Sends the provided message along this channel. /// /// This is an unbounded sender, so this function differs from `Sink::send` /// by ensuring the return type reflects that the channel is always ready to /// receive messages. pub fn unbounded_send(&self, msg: T) -> Result<(), SendError<T>> { let shared = match self.0.shared.upgrade() { Some(shared) => shared, None => return Err(SendError(msg)), }; let mut shared = shared.borrow_mut(); shared.buffer.push_back(msg); if let Some(task) = shared.blocked_recv.take() { drop(shared); task.notify(); } Ok(()) } } /// The receiving end of an unbounded channel. /// /// This is created by the `unbounded` function. #[derive(Debug)] pub struct UnboundedReceiver<T>(Receiver<T>); impl<T> UnboundedReceiver<T> { /// Closes the receiving half /// /// This prevents any further messages from being sent on the channel while /// still enabling the receiver to drain messages that are buffered. pub fn close(&mut self) { self.0.close(); } } impl<T> Stream for UnboundedReceiver<T> { type Item = T; type Error = (); fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { self.0.poll() } } /// Creates an unbounded in-memory channel with buffered storage. /// /// Identical semantics to `channel`, except with no limit to buffer size. pub fn unbounded<T>() -> (UnboundedSender<T>, UnboundedReceiver<T>) { let (send, recv) = channel_(None); (UnboundedSender(send), UnboundedReceiver(recv)) } /// Error type for sending, used when the receiving end of a channel is /// dropped pub struct SendError<T>(T); impl<T> fmt::Debug for SendError<T> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_tuple("SendError") .field(&"...") .finish() } } impl<T> fmt::Display for SendError<T> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "send failed because receiver is gone") } } impl<T: Any> Error for SendError<T> { fn description(&self) -> &str { "send failed because receiver is gone" } } impl<T> SendError<T> { /// Returns the message that was attempted to be sent but failed. pub fn into_inner(self) -> T { self.0 } } /// Handle returned from the `spawn` function. /// /// This handle is a stream that proxies a stream on a separate `Executor`. /// Created through the `mpsc::spawn` function, this handle will produce /// the same values as the proxied stream, as they are produced in the executor, /// and uses a limited buffer to exert back-pressure on the remote stream. /// /// If this handle is dropped, then the stream will no longer be polled and is /// scheduled to be dropped. pub struct SpawnHandle<Item, Error> { inner: Receiver<Result<Item, Error>>, _cancel_tx: oneshot::Sender<()>, } /// Type of future which `Executor` instances must be able to execute for `spawn`. pub struct Execute<S: Stream> { inner: SendAll<Sender<Result<S::Item, S::Error>>, Results<S, SendError<Result<S::Item, S::Error>>>>, cancel_rx: oneshot::Receiver<()>, } /// Spawns a `stream` onto the instance of `Executor` provided, `executor`, /// returning a handle representing the remote stream. /// /// The `stream` will be canceled if the `SpawnHandle` is dropped. /// /// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself. /// When `stream` has additional items available, then the `SpawnHandle` /// will have those same items available. /// /// At most `buffer + 1` elements will be buffered at a time. If the buffer /// is full, then `stream` will stop progressing until more space is available. /// This allows the `SpawnHandle` to exert backpressure on the `stream`. /// /// # Panics /// /// This function will panic if `executor` is unable spawn a `Future` containing /// the entirety of the `stream`. pub fn spawn<S, E>(stream: S, executor: &E, buffer: usize) -> SpawnHandle<S::Item, S::Error> where S: Stream, E: Executor<Execute<S>> { let (cancel_tx, cancel_rx) = oneshot::channel(); let (tx, rx) = channel(buffer); executor.execute(Execute { inner: tx.send_all(resultstream::new(stream)), cancel_rx: cancel_rx, }).expect("failed to spawn stream"); SpawnHandle { inner: rx, _cancel_tx: cancel_tx, } } /// Spawns a `stream` onto the instance of `Executor` provided, `executor`, /// returning a handle representing the remote stream, with unbounded buffering. /// /// The `stream` will be canceled if the `SpawnHandle` is dropped. /// /// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself. /// When `stream` has additional items available, then the `SpawnHandle` /// will have those same items available. /// /// An unbounded buffer is used, which means that values will be buffered as /// fast as `stream` can produce them, without any backpressure. Therefore, if /// `stream` is an infinite stream, it can use an unbounded amount of memory, and /// potentially hog CPU resources. In particular, if `stream` is infinite /// and doesn't ever yield (by returning `Async::NotReady` from `poll`), it /// will result in an infinite loop. /// /// # Panics /// /// This function will panic if `executor` is unable spawn a `Future` containing /// the entirety of the `stream`. pub fn spawn_unbounded<S,E>(stream: S, executor: &E) -> SpawnHandle<S::Item, S::Error> where S: Stream, E: Executor<Execute<S>> { let (cancel_tx, cancel_rx) = oneshot::channel(); let (tx, rx) = channel_(None); executor.execute(Execute { inner: tx.send_all(resultstream::new(stream)), cancel_rx: cancel_rx, }).expect("failed to spawn stream"); SpawnHandle { inner: rx, _cancel_tx: cancel_tx, } } impl<I, E> Stream for SpawnHandle<I, E> { type Item = I; type Error = E; fn poll(&mut self) -> Poll<Option<I>, E> { match self.inner.poll() { Ok(Async::Ready(Some(Ok(t)))) => Ok(Async::Ready(Some(t.into()))), Ok(Async::Ready(Some(Err(e)))) => Err(e), Ok(Async::Ready(None)) => Ok(Async::Ready(None)), Ok(Async::NotReady) => Ok(Async::NotReady), Err(_) => unreachable!("mpsc::Receiver should never return Err"), } } } impl<I, E> fmt::Debug for SpawnHandle<I, E> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("SpawnHandle") .finish() } } impl<S: Stream> Future for Execute<S> { type Item = (); type Error = (); fn poll(&mut self) -> Poll<(), ()> { match self.cancel_rx.poll() { Ok(Async::NotReady) => (), _ => return Ok(Async::Ready(())), } match self.inner.poll() { Ok(Async::NotReady) => Ok(Async::NotReady), _ => Ok(Async::Ready(())) } } } impl<S: Stream> fmt::Debug for Execute<S> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Execute") .finish() } }
start_send
identifier_name
mpsc.rs
//! A multi-producer, single-consumer, futures-aware, FIFO queue with back //! pressure, for use communicating between tasks on the same thread. //! //! These queues are the same as those in `futures::sync`, except they're not //! intended to be sent across threads. use std::any::Any; use std::cell::RefCell; use std::collections::VecDeque; use std::error::Error; use std::fmt; use std::mem; use std::rc::{Rc, Weak}; use task::{self, Task}; use future::Executor; use sink::SendAll; use resultstream::{self, Results}; use unsync::oneshot; use {Async, AsyncSink, Future, Poll, StartSend, Sink, Stream}; /// Creates a bounded in-memory channel with buffered storage. /// /// This method creates concrete implementations of the `Stream` and `Sink` /// traits which can be used to communicate a stream of values between tasks /// with backpressure. The channel capacity is exactly `buffer`. On average, /// sending a message through this channel performs no dynamic allocation. pub fn channel<T>(buffer: usize) -> (Sender<T>, Receiver<T>) { channel_(Some(buffer)) } fn channel_<T>(buffer: Option<usize>) -> (Sender<T>, Receiver<T>) { let shared = Rc::new(RefCell::new(Shared { buffer: VecDeque::new(), capacity: buffer, blocked_senders: VecDeque::new(), blocked_recv: None, })); let sender = Sender { shared: Rc::downgrade(&shared) }; let receiver = Receiver { state: State::Open(shared) }; (sender, receiver) } #[derive(Debug)] struct Shared<T> { buffer: VecDeque<T>, capacity: Option<usize>, blocked_senders: VecDeque<Task>, blocked_recv: Option<Task>, } /// The transmission end of a channel. /// /// This is created by the `channel` function. #[derive(Debug)] pub struct Sender<T> { shared: Weak<RefCell<Shared<T>>>, } impl<T> Sender<T> { fn do_send(&self, msg: T) -> StartSend<T, SendError<T>> { let shared = match self.shared.upgrade() { Some(shared) => shared, None => return Err(SendError(msg)), // receiver was dropped }; let mut shared = shared.borrow_mut(); match shared.capacity { Some(capacity) if shared.buffer.len() == capacity => { shared.blocked_senders.push_back(task::current()); Ok(AsyncSink::NotReady(msg)) } _ => { shared.buffer.push_back(msg); if let Some(task) = shared.blocked_recv.take() { task.notify(); } Ok(AsyncSink::Ready) } } } } impl<T> Clone for Sender<T> { fn clone(&self) -> Self { Sender { shared: self.shared.clone() } } } impl<T> Sink for Sender<T> { type SinkItem = T; type SinkError = SendError<T>; fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> { self.do_send(msg) } fn poll_complete(&mut self) -> Poll<(), SendError<T>> { Ok(Async::Ready(())) } fn close(&mut self) -> Poll<(), SendError<T>> { Ok(Async::Ready(())) } } impl<T> Drop for Sender<T> { fn drop(&mut self) { let shared = match self.shared.upgrade() { Some(shared) => shared, None => return, }; // The number of existing `Weak` indicates if we are possibly the last // `Sender`. If we are the last, we possibly must notify a blocked // `Receiver`. `self.shared` is always one of the `Weak` to this shared // data. Therefore the smallest possible Rc::weak_count(&shared) is 1. if Rc::weak_count(&shared) == 1 { if let Some(task) = shared.borrow_mut().blocked_recv.take() { // Wake up receiver as its stream has ended task.notify(); } } } } /// The receiving end of a channel which implements the `Stream` trait. /// /// This is created by the `channel` function. #[derive(Debug)] pub struct Receiver<T> { state: State<T>, } /// Possible states of a receiver. We're either Open (can receive more messages) /// or we're closed with a list of messages we have left to receive. #[derive(Debug)] enum State<T> { Open(Rc<RefCell<Shared<T>>>), Closed(VecDeque<T>), } impl<T> Receiver<T> { /// Closes the receiving half /// /// This prevents any further messages from being sent on the channel while /// still enabling the receiver to drain messages that are buffered. pub fn close(&mut self) { let (blockers, items) = match self.state { State::Open(ref state) => { let mut state = state.borrow_mut(); let items = mem::replace(&mut state.buffer, VecDeque::new()); let blockers = mem::replace(&mut state.blocked_senders, VecDeque::new()); (blockers, items) } State::Closed(_) => return, }; self.state = State::Closed(items); for task in blockers { task.notify(); } } } impl<T> Stream for Receiver<T> { type Item = T; type Error = (); fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { let me = match self.state { State::Open(ref mut me) => me, State::Closed(ref mut items) => { return Ok(Async::Ready(items.pop_front())) } }; if let Some(shared) = Rc::get_mut(me) { // All senders have been dropped, so drain the buffer and end the // stream. return Ok(Async::Ready(shared.borrow_mut().buffer.pop_front())); } let mut shared = me.borrow_mut(); if let Some(msg) = shared.buffer.pop_front() { if let Some(task) = shared.blocked_senders.pop_front() { drop(shared); task.notify(); } Ok(Async::Ready(Some(msg))) } else { shared.blocked_recv = Some(task::current()); Ok(Async::NotReady) } } } impl<T> Drop for Receiver<T> { fn drop(&mut self) { self.close(); } } /// The transmission end of an unbounded channel. /// /// This is created by the `unbounded` function. #[derive(Debug)] pub struct UnboundedSender<T>(Sender<T>); impl<T> Clone for UnboundedSender<T> { fn clone(&self) -> Self { UnboundedSender(self.0.clone()) } } impl<T> Sink for UnboundedSender<T> { type SinkItem = T; type SinkError = SendError<T>; fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> { self.0.start_send(msg) } fn poll_complete(&mut self) -> Poll<(), SendError<T>> { Ok(Async::Ready(())) } fn close(&mut self) -> Poll<(), SendError<T>> { Ok(Async::Ready(())) } } impl<'a, T> Sink for &'a UnboundedSender<T> { type SinkItem = T; type SinkError = SendError<T>; fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> { self.0.do_send(msg) } fn poll_complete(&mut self) -> Poll<(), SendError<T>> { Ok(Async::Ready(())) } fn close(&mut self) -> Poll<(), SendError<T>> { Ok(Async::Ready(())) } } impl<T> UnboundedSender<T> { /// Sends the provided message along this channel. /// /// This is an unbounded sender, so this function differs from `Sink::send` /// by ensuring the return type reflects that the channel is always ready to /// receive messages. #[deprecated(note = "renamed to `unbounded_send`")] #[doc(hidden)] pub fn send(&self, msg: T) -> Result<(), SendError<T>> { self.unbounded_send(msg) } /// Sends the provided message along this channel. /// /// This is an unbounded sender, so this function differs from `Sink::send` /// by ensuring the return type reflects that the channel is always ready to /// receive messages. pub fn unbounded_send(&self, msg: T) -> Result<(), SendError<T>> { let shared = match self.0.shared.upgrade() { Some(shared) => shared, None => return Err(SendError(msg)), }; let mut shared = shared.borrow_mut(); shared.buffer.push_back(msg); if let Some(task) = shared.blocked_recv.take() { drop(shared); task.notify(); } Ok(()) } } /// The receiving end of an unbounded channel. /// /// This is created by the `unbounded` function. #[derive(Debug)] pub struct UnboundedReceiver<T>(Receiver<T>); impl<T> UnboundedReceiver<T> { /// Closes the receiving half /// /// This prevents any further messages from being sent on the channel while /// still enabling the receiver to drain messages that are buffered. pub fn close(&mut self) { self.0.close(); } } impl<T> Stream for UnboundedReceiver<T> { type Item = T; type Error = (); fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { self.0.poll() } } /// Creates an unbounded in-memory channel with buffered storage. /// /// Identical semantics to `channel`, except with no limit to buffer size. pub fn unbounded<T>() -> (UnboundedSender<T>, UnboundedReceiver<T>) { let (send, recv) = channel_(None); (UnboundedSender(send), UnboundedReceiver(recv)) } /// Error type for sending, used when the receiving end of a channel is /// dropped pub struct SendError<T>(T); impl<T> fmt::Debug for SendError<T> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_tuple("SendError") .field(&"...") .finish() } } impl<T> fmt::Display for SendError<T> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "send failed because receiver is gone") } } impl<T: Any> Error for SendError<T> { fn description(&self) -> &str { "send failed because receiver is gone" } } impl<T> SendError<T> { /// Returns the message that was attempted to be sent but failed. pub fn into_inner(self) -> T { self.0 } } /// Handle returned from the `spawn` function. /// /// This handle is a stream that proxies a stream on a separate `Executor`. /// Created through the `mpsc::spawn` function, this handle will produce /// the same values as the proxied stream, as they are produced in the executor, /// and uses a limited buffer to exert back-pressure on the remote stream. /// /// If this handle is dropped, then the stream will no longer be polled and is /// scheduled to be dropped. pub struct SpawnHandle<Item, Error> { inner: Receiver<Result<Item, Error>>, _cancel_tx: oneshot::Sender<()>, } /// Type of future which `Executor` instances must be able to execute for `spawn`. pub struct Execute<S: Stream> { inner: SendAll<Sender<Result<S::Item, S::Error>>, Results<S, SendError<Result<S::Item, S::Error>>>>, cancel_rx: oneshot::Receiver<()>, } /// Spawns a `stream` onto the instance of `Executor` provided, `executor`, /// returning a handle representing the remote stream. /// /// The `stream` will be canceled if the `SpawnHandle` is dropped. /// /// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself. /// When `stream` has additional items available, then the `SpawnHandle` /// will have those same items available. /// /// At most `buffer + 1` elements will be buffered at a time. If the buffer /// is full, then `stream` will stop progressing until more space is available. /// This allows the `SpawnHandle` to exert backpressure on the `stream`. /// /// # Panics /// /// This function will panic if `executor` is unable spawn a `Future` containing /// the entirety of the `stream`. pub fn spawn<S, E>(stream: S, executor: &E, buffer: usize) -> SpawnHandle<S::Item, S::Error> where S: Stream, E: Executor<Execute<S>> { let (cancel_tx, cancel_rx) = oneshot::channel(); let (tx, rx) = channel(buffer); executor.execute(Execute { inner: tx.send_all(resultstream::new(stream)), cancel_rx: cancel_rx, }).expect("failed to spawn stream"); SpawnHandle { inner: rx, _cancel_tx: cancel_tx, } } /// Spawns a `stream` onto the instance of `Executor` provided, `executor`, /// returning a handle representing the remote stream, with unbounded buffering. /// /// The `stream` will be canceled if the `SpawnHandle` is dropped. /// /// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself.
/// /// An unbounded buffer is used, which means that values will be buffered as /// fast as `stream` can produce them, without any backpressure. Therefore, if /// `stream` is an infinite stream, it can use an unbounded amount of memory, and /// potentially hog CPU resources. In particular, if `stream` is infinite /// and doesn't ever yield (by returning `Async::NotReady` from `poll`), it /// will result in an infinite loop. /// /// # Panics /// /// This function will panic if `executor` is unable spawn a `Future` containing /// the entirety of the `stream`. pub fn spawn_unbounded<S,E>(stream: S, executor: &E) -> SpawnHandle<S::Item, S::Error> where S: Stream, E: Executor<Execute<S>> { let (cancel_tx, cancel_rx) = oneshot::channel(); let (tx, rx) = channel_(None); executor.execute(Execute { inner: tx.send_all(resultstream::new(stream)), cancel_rx: cancel_rx, }).expect("failed to spawn stream"); SpawnHandle { inner: rx, _cancel_tx: cancel_tx, } } impl<I, E> Stream for SpawnHandle<I, E> { type Item = I; type Error = E; fn poll(&mut self) -> Poll<Option<I>, E> { match self.inner.poll() { Ok(Async::Ready(Some(Ok(t)))) => Ok(Async::Ready(Some(t.into()))), Ok(Async::Ready(Some(Err(e)))) => Err(e), Ok(Async::Ready(None)) => Ok(Async::Ready(None)), Ok(Async::NotReady) => Ok(Async::NotReady), Err(_) => unreachable!("mpsc::Receiver should never return Err"), } } } impl<I, E> fmt::Debug for SpawnHandle<I, E> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("SpawnHandle") .finish() } } impl<S: Stream> Future for Execute<S> { type Item = (); type Error = (); fn poll(&mut self) -> Poll<(), ()> { match self.cancel_rx.poll() { Ok(Async::NotReady) => (), _ => return Ok(Async::Ready(())), } match self.inner.poll() { Ok(Async::NotReady) => Ok(Async::NotReady), _ => Ok(Async::Ready(())) } } } impl<S: Stream> fmt::Debug for Execute<S> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Execute") .finish() } }
/// When `stream` has additional items available, then the `SpawnHandle` /// will have those same items available.
random_line_split
mpsc.rs
//! A multi-producer, single-consumer, futures-aware, FIFO queue with back //! pressure, for use communicating between tasks on the same thread. //! //! These queues are the same as those in `futures::sync`, except they're not //! intended to be sent across threads. use std::any::Any; use std::cell::RefCell; use std::collections::VecDeque; use std::error::Error; use std::fmt; use std::mem; use std::rc::{Rc, Weak}; use task::{self, Task}; use future::Executor; use sink::SendAll; use resultstream::{self, Results}; use unsync::oneshot; use {Async, AsyncSink, Future, Poll, StartSend, Sink, Stream}; /// Creates a bounded in-memory channel with buffered storage. /// /// This method creates concrete implementations of the `Stream` and `Sink` /// traits which can be used to communicate a stream of values between tasks /// with backpressure. The channel capacity is exactly `buffer`. On average, /// sending a message through this channel performs no dynamic allocation. pub fn channel<T>(buffer: usize) -> (Sender<T>, Receiver<T>) { channel_(Some(buffer)) } fn channel_<T>(buffer: Option<usize>) -> (Sender<T>, Receiver<T>) { let shared = Rc::new(RefCell::new(Shared { buffer: VecDeque::new(), capacity: buffer, blocked_senders: VecDeque::new(), blocked_recv: None, })); let sender = Sender { shared: Rc::downgrade(&shared) }; let receiver = Receiver { state: State::Open(shared) }; (sender, receiver) } #[derive(Debug)] struct Shared<T> { buffer: VecDeque<T>, capacity: Option<usize>, blocked_senders: VecDeque<Task>, blocked_recv: Option<Task>, } /// The transmission end of a channel. /// /// This is created by the `channel` function. #[derive(Debug)] pub struct Sender<T> { shared: Weak<RefCell<Shared<T>>>, } impl<T> Sender<T> { fn do_send(&self, msg: T) -> StartSend<T, SendError<T>> { let shared = match self.shared.upgrade() { Some(shared) => shared, None => return Err(SendError(msg)), // receiver was dropped }; let mut shared = shared.borrow_mut(); match shared.capacity { Some(capacity) if shared.buffer.len() == capacity => { shared.blocked_senders.push_back(task::current()); Ok(AsyncSink::NotReady(msg)) } _ => { shared.buffer.push_back(msg); if let Some(task) = shared.blocked_recv.take() { task.notify(); } Ok(AsyncSink::Ready) } } } } impl<T> Clone for Sender<T> { fn clone(&self) -> Self { Sender { shared: self.shared.clone() } } } impl<T> Sink for Sender<T> { type SinkItem = T; type SinkError = SendError<T>; fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> { self.do_send(msg) } fn poll_complete(&mut self) -> Poll<(), SendError<T>> { Ok(Async::Ready(())) } fn close(&mut self) -> Poll<(), SendError<T>> { Ok(Async::Ready(())) } } impl<T> Drop for Sender<T> { fn drop(&mut self) { let shared = match self.shared.upgrade() { Some(shared) => shared, None => return, }; // The number of existing `Weak` indicates if we are possibly the last // `Sender`. If we are the last, we possibly must notify a blocked // `Receiver`. `self.shared` is always one of the `Weak` to this shared // data. Therefore the smallest possible Rc::weak_count(&shared) is 1. if Rc::weak_count(&shared) == 1 { if let Some(task) = shared.borrow_mut().blocked_recv.take() { // Wake up receiver as its stream has ended task.notify(); } } } } /// The receiving end of a channel which implements the `Stream` trait. /// /// This is created by the `channel` function. #[derive(Debug)] pub struct Receiver<T> { state: State<T>, } /// Possible states of a receiver. We're either Open (can receive more messages) /// or we're closed with a list of messages we have left to receive. #[derive(Debug)] enum State<T> { Open(Rc<RefCell<Shared<T>>>), Closed(VecDeque<T>), } impl<T> Receiver<T> { /// Closes the receiving half /// /// This prevents any further messages from being sent on the channel while /// still enabling the receiver to drain messages that are buffered. pub fn close(&mut self) { let (blockers, items) = match self.state { State::Open(ref state) => { let mut state = state.borrow_mut(); let items = mem::replace(&mut state.buffer, VecDeque::new()); let blockers = mem::replace(&mut state.blocked_senders, VecDeque::new()); (blockers, items) } State::Closed(_) => return, }; self.state = State::Closed(items); for task in blockers { task.notify(); } } } impl<T> Stream for Receiver<T> { type Item = T; type Error = (); fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { let me = match self.state { State::Open(ref mut me) => me, State::Closed(ref mut items) => { return Ok(Async::Ready(items.pop_front())) } }; if let Some(shared) = Rc::get_mut(me) { // All senders have been dropped, so drain the buffer and end the // stream. return Ok(Async::Ready(shared.borrow_mut().buffer.pop_front())); } let mut shared = me.borrow_mut(); if let Some(msg) = shared.buffer.pop_front() { if let Some(task) = shared.blocked_senders.pop_front() { drop(shared); task.notify(); } Ok(Async::Ready(Some(msg))) } else { shared.blocked_recv = Some(task::current()); Ok(Async::NotReady) } } } impl<T> Drop for Receiver<T> { fn drop(&mut self) { self.close(); } } /// The transmission end of an unbounded channel. /// /// This is created by the `unbounded` function. #[derive(Debug)] pub struct UnboundedSender<T>(Sender<T>); impl<T> Clone for UnboundedSender<T> { fn clone(&self) -> Self { UnboundedSender(self.0.clone()) } } impl<T> Sink for UnboundedSender<T> { type SinkItem = T; type SinkError = SendError<T>; fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> { self.0.start_send(msg) } fn poll_complete(&mut self) -> Poll<(), SendError<T>> { Ok(Async::Ready(())) } fn close(&mut self) -> Poll<(), SendError<T>> { Ok(Async::Ready(())) } } impl<'a, T> Sink for &'a UnboundedSender<T> { type SinkItem = T; type SinkError = SendError<T>; fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> { self.0.do_send(msg) } fn poll_complete(&mut self) -> Poll<(), SendError<T>> { Ok(Async::Ready(())) } fn close(&mut self) -> Poll<(), SendError<T>> { Ok(Async::Ready(())) } } impl<T> UnboundedSender<T> { /// Sends the provided message along this channel. /// /// This is an unbounded sender, so this function differs from `Sink::send` /// by ensuring the return type reflects that the channel is always ready to /// receive messages. #[deprecated(note = "renamed to `unbounded_send`")] #[doc(hidden)] pub fn send(&self, msg: T) -> Result<(), SendError<T>> { self.unbounded_send(msg) } /// Sends the provided message along this channel. /// /// This is an unbounded sender, so this function differs from `Sink::send` /// by ensuring the return type reflects that the channel is always ready to /// receive messages. pub fn unbounded_send(&self, msg: T) -> Result<(), SendError<T>> { let shared = match self.0.shared.upgrade() { Some(shared) => shared, None => return Err(SendError(msg)), }; let mut shared = shared.borrow_mut(); shared.buffer.push_back(msg); if let Some(task) = shared.blocked_recv.take() { drop(shared); task.notify(); } Ok(()) } } /// The receiving end of an unbounded channel. /// /// This is created by the `unbounded` function. #[derive(Debug)] pub struct UnboundedReceiver<T>(Receiver<T>); impl<T> UnboundedReceiver<T> { /// Closes the receiving half /// /// This prevents any further messages from being sent on the channel while /// still enabling the receiver to drain messages that are buffered. pub fn close(&mut self)
} impl<T> Stream for UnboundedReceiver<T> { type Item = T; type Error = (); fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> { self.0.poll() } } /// Creates an unbounded in-memory channel with buffered storage. /// /// Identical semantics to `channel`, except with no limit to buffer size. pub fn unbounded<T>() -> (UnboundedSender<T>, UnboundedReceiver<T>) { let (send, recv) = channel_(None); (UnboundedSender(send), UnboundedReceiver(recv)) } /// Error type for sending, used when the receiving end of a channel is /// dropped pub struct SendError<T>(T); impl<T> fmt::Debug for SendError<T> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_tuple("SendError") .field(&"...") .finish() } } impl<T> fmt::Display for SendError<T> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "send failed because receiver is gone") } } impl<T: Any> Error for SendError<T> { fn description(&self) -> &str { "send failed because receiver is gone" } } impl<T> SendError<T> { /// Returns the message that was attempted to be sent but failed. pub fn into_inner(self) -> T { self.0 } } /// Handle returned from the `spawn` function. /// /// This handle is a stream that proxies a stream on a separate `Executor`. /// Created through the `mpsc::spawn` function, this handle will produce /// the same values as the proxied stream, as they are produced in the executor, /// and uses a limited buffer to exert back-pressure on the remote stream. /// /// If this handle is dropped, then the stream will no longer be polled and is /// scheduled to be dropped. pub struct SpawnHandle<Item, Error> { inner: Receiver<Result<Item, Error>>, _cancel_tx: oneshot::Sender<()>, } /// Type of future which `Executor` instances must be able to execute for `spawn`. pub struct Execute<S: Stream> { inner: SendAll<Sender<Result<S::Item, S::Error>>, Results<S, SendError<Result<S::Item, S::Error>>>>, cancel_rx: oneshot::Receiver<()>, } /// Spawns a `stream` onto the instance of `Executor` provided, `executor`, /// returning a handle representing the remote stream. /// /// The `stream` will be canceled if the `SpawnHandle` is dropped. /// /// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself. /// When `stream` has additional items available, then the `SpawnHandle` /// will have those same items available. /// /// At most `buffer + 1` elements will be buffered at a time. If the buffer /// is full, then `stream` will stop progressing until more space is available. /// This allows the `SpawnHandle` to exert backpressure on the `stream`. /// /// # Panics /// /// This function will panic if `executor` is unable spawn a `Future` containing /// the entirety of the `stream`. pub fn spawn<S, E>(stream: S, executor: &E, buffer: usize) -> SpawnHandle<S::Item, S::Error> where S: Stream, E: Executor<Execute<S>> { let (cancel_tx, cancel_rx) = oneshot::channel(); let (tx, rx) = channel(buffer); executor.execute(Execute { inner: tx.send_all(resultstream::new(stream)), cancel_rx: cancel_rx, }).expect("failed to spawn stream"); SpawnHandle { inner: rx, _cancel_tx: cancel_tx, } } /// Spawns a `stream` onto the instance of `Executor` provided, `executor`, /// returning a handle representing the remote stream, with unbounded buffering. /// /// The `stream` will be canceled if the `SpawnHandle` is dropped. /// /// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself. /// When `stream` has additional items available, then the `SpawnHandle` /// will have those same items available. /// /// An unbounded buffer is used, which means that values will be buffered as /// fast as `stream` can produce them, without any backpressure. Therefore, if /// `stream` is an infinite stream, it can use an unbounded amount of memory, and /// potentially hog CPU resources. In particular, if `stream` is infinite /// and doesn't ever yield (by returning `Async::NotReady` from `poll`), it /// will result in an infinite loop. /// /// # Panics /// /// This function will panic if `executor` is unable spawn a `Future` containing /// the entirety of the `stream`. pub fn spawn_unbounded<S,E>(stream: S, executor: &E) -> SpawnHandle<S::Item, S::Error> where S: Stream, E: Executor<Execute<S>> { let (cancel_tx, cancel_rx) = oneshot::channel(); let (tx, rx) = channel_(None); executor.execute(Execute { inner: tx.send_all(resultstream::new(stream)), cancel_rx: cancel_rx, }).expect("failed to spawn stream"); SpawnHandle { inner: rx, _cancel_tx: cancel_tx, } } impl<I, E> Stream for SpawnHandle<I, E> { type Item = I; type Error = E; fn poll(&mut self) -> Poll<Option<I>, E> { match self.inner.poll() { Ok(Async::Ready(Some(Ok(t)))) => Ok(Async::Ready(Some(t.into()))), Ok(Async::Ready(Some(Err(e)))) => Err(e), Ok(Async::Ready(None)) => Ok(Async::Ready(None)), Ok(Async::NotReady) => Ok(Async::NotReady), Err(_) => unreachable!("mpsc::Receiver should never return Err"), } } } impl<I, E> fmt::Debug for SpawnHandle<I, E> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("SpawnHandle") .finish() } } impl<S: Stream> Future for Execute<S> { type Item = (); type Error = (); fn poll(&mut self) -> Poll<(), ()> { match self.cancel_rx.poll() { Ok(Async::NotReady) => (), _ => return Ok(Async::Ready(())), } match self.inner.poll() { Ok(Async::NotReady) => Ok(Async::NotReady), _ => Ok(Async::Ready(())) } } } impl<S: Stream> fmt::Debug for Execute<S> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Execute") .finish() } }
{ self.0.close(); }
identifier_body
stopwords.rs
use lazy_static::lazy_static; use std::collections::{HashMap, HashSet}; lazy_static! { /// ignore these as keywords pub(crate) static ref STOPWORDS: HashSet<&'static str> = [ "a", "sys", "ffi", "placeholder", "app", "loops", "master", "library", "rs", "accidentally", "additional", "adds", "against", "all", "allow", "allows", "already", "also", "alternative", "always", "an", "and", "any", "appropriate", "arbitrary", "are", "as", "at", "available", "based", "be", "because", "been", "both", "but", "by", "can", "certain", "changes", "comes", "contains", "code", "core", "cost", "crate", "crates.io", "current", "currently", "custom", "dependencies", "dependency", "developers", "do", "don't", "e.g", "easily", "easy", "either", "enables", "etc", "even", "every", "example", "examples", "features", "feel", "files", "fast", "for", "from", "fully", "function", "get", "given", "had", "has", "hacktoberfest", "have", "here", "if", "implementing", "implements", "implementation", "in", "includes", "including", "incurring", "installation", "interested", "into", "is", "it", "it's", "its", "itself", "just", "known", "large", "later", "library", "license", "lightweight", "like", "made", "main", "make", "makes", "many", "may", "me", "means", "method", "minimal", "mit", "more", "mostly", "much", "need", "needed", "never", "new", "no", "noop", "not", "of", "on", "one", "only", "or", "other", "over", "plausible", "please", "possible", "program", "project", "provides", "put", "readme", "release", "runs", "rust", "rust's", "same", "see", "selected", "should", "similar", "simple", "simply", "since", "small", "so", "some", "specific", "still", "stuff", "such", "take", "than", "that", "the", "their", "them", "then", "there", "therefore", "these", "they", "things", "this", "those", "to", "todo", "too", "took", "travis", "two", "under", "us", "usable", "use", "used", "useful", "using", "usage", "v1", "v2", "v3", "v4", "various", "very", "via", "want", "way", "well", "we'll", "what", "when", "where", "which", "while", "will", "wip", "with", "without", "working", "works", "writing", "written", "yet", "you", "your", "build status", "meritbadge", "common", "file was generated", "easy to use", "general-purpose", "fundamental", ].iter().copied().collect(); /// If one is present, ignore the others pub(crate) static ref COND_STOPWORDS: HashMap<&'static str, Option<&'static [&'static str]>> = [ ("game-engine", Some(&["game", "ffi"][..])), ("game-engines", Some(&["game", "ffi"])), ("game-development", Some(&["game", "ffi"])), ("game-dev", Some(&["game", "games"])), ("gamedev", Some(&["game", "games"])), ("game", Some(&["wasm", "webassembly"])), // wasm games are nice, but should be in games category ("contract", Some(&["wasm", "webassembly"])), // that's crypto-babble-nothingburger ("opengl", Some(&["terminal", "console"])), ("protocol", Some(&["game", "games", "container"])), ("framework", Some(&["game", "games"])), ("engine", Some(&["ffi"])), ("mock", Some(&["macro", "derive", "plugin", "cargo"])), ("ffi", Some(&["api-bindings"])), ("caching", Some(&["allocator"])), ("distributed", Some(&["filesystem", "file"])), ("container", Some(&["filesystem", "file"])), ("aws", Some(&["ecs"])), // not game engine ("raspberry", Some(&["osx", "windows"])), ("linux", Some(&["windows", "winsdk", "macos", "mac", "osx"])), ("cross-platform", Some(&["windows", "winsdk", "macos", "mac", "osx", "linux", "unix", "gnu"])), ("portable", Some(&["windows", "winsdk", "macos", "mac", "osx", "linux", "unix", "gnu"])), ("winapi", Some(&["target", "windows", "gnu", "x86", "i686", "64", "pc"])), ("windows", Some(&["gnu"])), ("compile-time", Some(&["time"])), ("constant-time", Some(&["time"])), ("real-time", Some(&["time"])), ("time-series", Some(&["time"])), ("execution", Some(&["time"])), ("iterator", Some(&["window", "windows"])), ("buffer", Some(&["window", "windows"])), ("sliding", Some(&["window", "windows"])), ("web", Some(&["windows", "macos", "mac", "osx", "linux"])), ("error", Some(&["color"])), ("pretty-print", Some(&["color"])), ("pretty-printer", Some(&["color"])), ("ios", Some(&["core"])), ("macos", Some(&["core"])), ("osx", Some(&["core"])), ("mac", Some(&["core"])), ("module", Some(&["core"])), ("wasm", Some(&["embedded", "javascript", "no-std", "no_std", "feature:no_std", "deploy"])), ("javascript", Some(&["embedded", "no-std", "no_std", "feature:no_std"])), ("webassembly", Some(&["embedded", "javascript", "no-std", "no_std", "feature:no_std"])), ("deep-learning", Some(&["math", "statistics"])), ("machine-learning", Some(&["math", "statistics"])), ("neural-networks", Some(&["math", "statistics", "network"])), ("neural", Some(&["network"])), ("fantasy", Some(&["console"])), ("learning", Some(&["network"])), ("safe", Some(&["network"])), ("database", Some(&["embedded"])), ("robotics", Some(&["localization"])), ("thread", Some(&["storage"])), ("exchange", Some(&["twitch", "animation"])), ("animation", Some(&["kraken"])), ("bitcoin", Some(&["http", "day", "database", "key-value", "network", "wasm", "secp256k1", "client", "rpc", "websocket"])), ("solana", Some(&["http", "day", "database", "key-value", "network", "wasm", "secp256k1", "client", "cryptographic", "gfx", "sdk"])), ("exonum", Some(&["http", "day", "database", "key-value", "network", "wasm", "client"])), ("blockchain", Some(&["database", "key-value", "network", "wasm", "nosql", "orm", "driver", "fun", "rpc", "client", "server", "p2p", "networking", "websocket"])), ("cryptocurrencies", Some(&["database", "key-value", "network", "wasm", "nosql", "orm", "driver", "fun", "rpc", "client", "server", "p2p", "networking", "websocket"])), ("cryptocurrency", Some(&["database", "key-value", "network", "wasm", "nosql", "orm", "driver", "fun", "rpc", "client", "server", "p2p", "networking", "websocket", "twitch"])), ("ethereum", Some(&["http", "day", "nosql", "eth", "log", "generic", "network", "wasm", "key-value", "orm", "client", "database", "secp256k1", "websocket", "parity"])), ("iter", Some(&["math"])), ("ethernet", Some(&["eth"])), ("macro", Some(&["no-std", "no_std", "feature:no_std"])), ("macros", Some(&["no-std", "no_std", "feature:no_std"])), ("embedded", Some(&["no-std", "no_std", "feature:no_std"])), ("arm", Some(&["no-std", "no_std", "feature:no_std"])), ("float", Some(&["math"])), ("c64", Some(&["terminal", "core"])), ("emulator", Some(&["6502", "core", "gpu", "color", "timer"])), ("garbage", Some(&["tracing"])), ("terminal", Some(&["math", "emulator"])), ("terminal-emulator", Some(&["math", "emulator"])), ("editor", Some(&["terminal"])), ("build", Some(&["logic"])), // confuses categorization ("messaging", Some(&["matrix"])), // confuses categorization ("led", Some(&["matrix"])), // confuses categorization ("rgb", Some(&["matrix"])), // confuses categorization ("chat", Some(&["matrix"])), // confuses categorization ("math", Some(&["num", "symbolic", "algorithms", "algorithm", "utils"])), // confuses categorization ("mathematics", Some(&["num", "numeric", "symbolic", "algorithms", "algorithm", "utils"])), // confuses categorization ("cuda", Some(&["nvidia"])), // confuses categorization ("subcommand", Some(&["plugin"])), ("lint", Some(&["plugin"])), ("email", Some(&["validator", "validation"])), ("e-mail", Some(&["validator", "validation"])), ("template", Some(&["derive"])), ("dsl", Some(&["template"])), ("syn", Some(&["nom"])), ("cargo", Some(&["plugin"])), ("git", Some(&["terminal"])), ("nzxt", Some(&["kraken"])), ("wide", Some(&["windows", "win32"])), ("i18n", Some(&["text", "format", "message", "json", "ffi"])), ("l10n", Some(&["text", "format", "message", "json", "ffi"])), ("unicode", Some(&["text"])), ("parity", Some(&["fun", "backend"])), ("secp256k1", Some(&["fun", "backend", "alloc", "ecc"])), ("font", Some(&["text", "bitmap"])), ("freetype", Some(&["text", "bitmap"])), ("tex", Some(&["font"])), ("regex", Some(&["text", "linear", "time", "search"])), ("language", Some(&["server"])), ("server", Some(&["files"])), ("medical", Some(&["image"])), ("social", Some(&["media"])), ("codegen", Some(&["backend"])), ("game", Some(&["simulator", "simulation"])), ("vkontakte", Some(&["vk"])), ("vulkan", Some(&["vk"])), ("2d", Some(&["path", "paths"])), ("video", Some(&["audio"])), // have to pick one… ("sound", Some(&["3d", "windows"])), ("memory", Some(&["os", "system", "storage"])), // too generic ("data-structure", Some(&["no-std", "no_std"])), // it's a nice feature, but not defining one ("crypto", Some(&["no-std", "no_std"])), // it's a nice feature, but not defining one ("macro", Some(&["no-std", "no_std"])), // it's a nice feature, but not defining one ("parser", Some(&["no-std", "no_std", "game"])), // it's a nice feature, but not defining one ("cryptography", Some(&["no-std", "no_std"])), // it's a nice feature, but not defining one ("websocket", Some(&["http", "cli", "tokio", "client", "io", "network", "servo", "web"])), // there's a separate category for it ("rest", Some(&["api"])), ("cargo-subcommand", None), ("substrate", None), ("twitch", Some(&["kraken"])), ("chess", Some(&["bot"])), ("lichess", Some(&["bot"])), ("nftables", Some(&["nft"])), ("placeholder", None), // spam ("reserved", None), // spam ("name-squatting", None), // spam ("parked", None), // spam ("squatting", None), // spam ("malware", None), // spam ("unfinished", None), // spam
}
].iter().copied().collect();
random_line_split
rasterbackend.rs
use crate::aabb::*; use crate::mesh::*; use crate::picture::*; use crate::zbuffer::*; use std::f32::consts::PI; use std::time::{Duration, Instant}; #[derive(Debug)] pub struct RenderOptions { pub view_pos: Vec3, pub light_pos: Vec3, pub light_color: Vec3, pub ambient_color: Vec3, pub model_color: Vec3, pub grid_color: Vec3, pub background_color: Vec4, pub zoom: f32, pub grid_visible: bool, pub draw_size_hint: bool, } impl Default for RenderOptions { fn default() -> Self { Self { view_pos: Vec3::new(-1.0, 1.0, -1.0).normalize(), light_pos: Vec3::new(-1.0, 0.5, -0.5), light_color: Vec3::new(0.6, 0.6, 0.6), ambient_color: Vec3::new(0.4, 0.4, 0.4), model_color: Vec3::new(0.0, 0.45, 1.0), grid_color: Vec3::new(0.1, 0.1, 0.1), background_color: Vec4::new(1.0, 1.0, 1.0, 1.0), grid_visible: true, zoom: 1.0, draw_size_hint: true, } } } #[derive(Debug)] pub struct RasterBackend { pub render_options: RenderOptions, width: u32, height: u32, aspect_ratio: f32, } impl RasterBackend { pub fn new(width: u32, height: u32) -> Self { Self { render_options: RenderOptions::default(), width, height, aspect_ratio: width as f32 / height as f32, } } fn view_projection(&self, zoom: f32) -> Mat4 { // calculate view projection matrix let proj = glm::ortho( zoom * 0.5 * self.aspect_ratio, -zoom * 0.5 * self.aspect_ratio, -zoom * 0.5, zoom * 0.5, 0.0, 1.0, ); let view = glm::look_at( &self.render_options.view_pos, &Vec3::new(0.0, 0.0, 0.0), &Vec3::new(0.0, 0.0, -1.0), ); proj * view } pub fn fit_mesh_scale(&self, mesh: impl IntoIterator<Item = Triangle> + Copy) -> (AABB, f32) { let aabb = AABB::from_iterable(mesh); let vp = self.view_projection(1.0); // scale the model such that is fills the entire canvas (aabb, scale_for_unitsize(&vp, &aabb)) } pub fn render( &self, mesh: impl IntoIterator<Item = Triangle> + Copy, model_scale: f32, aabb: &AABB, timeout: Option<Duration>, ) -> Picture { let start_time = Instant::now(); let mut pic = Picture::new(self.width, self.height); let mut zbuf = ZBuffer::new(self.width, self.height); let mut scaled_aabb = *aabb; pic.fill(&(&self.render_options.background_color).into()); let vp = self.view_projection(self.render_options.zoom); // calculate transforms taking the new model scale into account let model = Mat4::identity() .append_translation(&-aabb.center()) .append_scaling(model_scale); let mvp = vp * model; // let the AABB match the transformed model scaled_aabb.apply_transform(&model); // eye normal pointing towards the camera in world space let eye_normal = self.render_options.view_pos.normalize(); // grid in x and y direction if self.render_options.grid_visible { draw_grid( &mut pic, &vp, scaled_aabb.lower.z, &self.render_options.grid_color, aabb.size(), model_scale, ); draw_grid( &mut pic, &(vp * glm::rotation(PI / 2.0, &Vec3::new(0.0, 0.0, 1.0))), scaled_aabb.lower.z, &self.render_options.grid_color, aabb.size(), model_scale, ); } for t in mesh { // timed out? if let Some(timeout) = timeout { let dt = Instant::now() - start_time; if dt > timeout { // abort println!("... timeout!"); return pic; } } let normal = -t.normal; // backface culling if glm::dot(&eye_normal, &normal) < 0.0 { continue; } let v = &t.vertices; let v0 = matmul(&mvp, &v[0]); let v1 = matmul(&mvp, &v[1]); let v2 = matmul(&mvp, &v[2]); let v0m = matmul(&model, &v[0]); let v1m = matmul(&model, &v[1]); let v2m = matmul(&model, &v[2]); // triangle bounding box let min_x = v0.x.min(v1.x).min(v2.x); let min_y = v0.y.min(v1.y).min(v2.y); let max_x = v0.x.max(v1.x).max(v2.x); let max_y = v0.y.max(v1.y).max(v2.y); // triangle bounding box in screen space let smin_x = 0.max(((min_x + 1.0) / 2.0 * pic.width() as f32) as u32); let smin_y = 0.max(((min_y + 1.0) / 2.0 * pic.height() as f32) as u32); let smax_x = 0.max(pic.width().min(((max_x + 1.0) / 2.0 * pic.width() as f32) as u32)); let smax_y = 0.max(pic.height().min(((max_y + 1.0) / 2.0 * pic.height() as f32) as u32)); for y in smin_y..=smax_y { for x in smin_x..=smax_x { // normalized screen coordinates [-1,1] let nx = 2.0 * ((x as f32 / pic.width() as f32) - 0.5); let ny = 2.0 * ((y as f32 / pic.height() as f32) - 0.5); let p = Vec2::new(nx, ny); let p0 = v0.xy(); let p1 = v1.xy(); let p2 = v2.xy(); let inside = edge_fn(&p, &p0, &p1) <= 0.0 && edge_fn(&p, &p1, &p2) <= 0.0 && edge_fn(&p, &p2, &p0) <= 0.0; if inside { // calculate barycentric coordinates let area = edge_fn(&p0, &p1, &p2); let w0 = edge_fn(&p1, &p2, &p) / area; let w1 = edge_fn(&p2, &p0, &p) / area; let w2 = edge_fn(&p0, &p1, &p) / area; // fragment position in screen space let frag_pos = Vec3::new( w0 * v0.x + w1 * v1.x + w2 * v2.x,
w0 * v0.y + w1 * v1.y + w2 * v2.y, w0 * v0.z + w1 * v1.z + w2 * v2.z, ); // fragment position in world space let fp = Vec3::new( w0 * v0m.x + w1 * v1m.x + w2 * v2m.x, w0 * v0m.y + w1 * v1m.y + w2 * v2m.y, w0 * v0m.z + w1 * v1m.z + w2 * v2m.z, ); //let fp = matmul(&mvp_inv, &frag_pos); if zbuf.test_and_set(x, y, frag_pos.z) { // calculate lightning let light_normal = (self.render_options.light_pos - fp).normalize(); // normal frag pos to light (world space) let view_normal = (self.render_options.view_pos - fp).normalize(); // normal frag pos to view (world space) let reflect_dir = glm::reflect_vec(&-light_normal, &normal); // diffuse let diff_color = glm::dot(&normal, &light_normal).max(0.0) * self.render_options.light_color * 1.0; // specular let spec_color = (glm::dot(&view_normal, &reflect_dir).powf(16.0) * 0.7) * self.render_options.light_color; // merge let mut color = self.render_options.ambient_color + diff_color + spec_color; color.x *= self.render_options.model_color.x; color.y *= self.render_options.model_color.y; color.z *= self.render_options.model_color.z; pic.set(x, y, &(color.x, color.y, color.z, 1.0).into()); } } } } } if self.render_options.draw_size_hint { let margin = 3; let text_to_height_ratio = 16; let text = format!( "{}x{}x{}", aabb.size().x as i32, aabb.size().y as i32, aabb.size().z as i32 ); let text_size = pic.height() / text_to_height_ratio; pic.fill_rect( 0, pic.height() as i32 - (text_size + margin * 2) as i32, pic.width() as i32, pic.height() as i32, &"333333FF".into(), ); pic.stroke_string( margin, pic.height() - text_size - margin, &text, text_size as f32, &"FFFFFFFF".into(), ); } pic } } fn edge_fn(a: &Vec2, b: &Vec2, c: &Vec2) -> f32 { (c.x - a.x) * (b.y - a.y) - (c.y - a.y) * (b.x - a.x) } fn scale_for_unitsize(mvp: &Mat4, aabb: &AABB) -> f32 { let edges = [ matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.lower.y, aabb.lower.z)), matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.lower.y, aabb.lower.z)), matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.upper.y, aabb.lower.z)), matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.upper.y, aabb.lower.z)), matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.lower.y, aabb.upper.z)), matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.lower.y, aabb.upper.z)), matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.upper.y, aabb.upper.z)), matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.upper.y, aabb.upper.z)), ]; let mut min = Vec3::new(f32::MAX, f32::MAX, f32::MAX); let mut max = Vec3::new(f32::MIN, f32::MIN, f32::MIN); for e in &edges { min.x = min.x.min(e.x); min.y = min.y.min(e.y); max.x = max.x.max(e.x); max.y = max.y.max(e.y); } 1.0 / ((f32::abs(max.x - min.x)).max(f32::abs(max.y - min.y)) / 2.0) } fn draw_grid(pic: &mut Picture, vp: &Mat4, z: f32, color: &Vec3, model_size: Vec3, scale: f32) { // draw grid let max_xy = model_size.x.max(model_size.y); let grid_color = (color.x, color.y, color.z, 1.0).into(); let grid_size = 10.0; // mm let grid_count = ((max_xy * scale) / scale / grid_size + 1.0) as i32; let grid_spacing = grid_size * scale as f32; let ox = grid_count as f32 * grid_spacing / 2.0; for x in 0..=grid_count { let p0 = Vec3::new(grid_spacing * x as f32 - ox, grid_count as f32 * grid_spacing * 0.5, z); let p1 = Vec3::new(p0.x, -grid_count as f32 * grid_spacing * 0.5, z); // to screen space let sp0 = matmul(&vp, &p0).xy(); let sp1 = matmul(&vp, &p1).xy(); pic.thick_line( ((sp0.x + 1.0) / 2.0 * pic.width() as f32) as i32, ((sp0.y + 1.0) / 2.0 * pic.height() as f32) as i32, ((sp1.x + 1.0) / 2.0 * pic.width() as f32) as i32, ((sp1.y + 1.0) / 2.0 * pic.height() as f32) as i32, &grid_color, 1.0, ); } }
random_line_split
rasterbackend.rs
use crate::aabb::*; use crate::mesh::*; use crate::picture::*; use crate::zbuffer::*; use std::f32::consts::PI; use std::time::{Duration, Instant}; #[derive(Debug)] pub struct RenderOptions { pub view_pos: Vec3, pub light_pos: Vec3, pub light_color: Vec3, pub ambient_color: Vec3, pub model_color: Vec3, pub grid_color: Vec3, pub background_color: Vec4, pub zoom: f32, pub grid_visible: bool, pub draw_size_hint: bool, } impl Default for RenderOptions { fn default() -> Self { Self { view_pos: Vec3::new(-1.0, 1.0, -1.0).normalize(), light_pos: Vec3::new(-1.0, 0.5, -0.5), light_color: Vec3::new(0.6, 0.6, 0.6), ambient_color: Vec3::new(0.4, 0.4, 0.4), model_color: Vec3::new(0.0, 0.45, 1.0), grid_color: Vec3::new(0.1, 0.1, 0.1), background_color: Vec4::new(1.0, 1.0, 1.0, 1.0), grid_visible: true, zoom: 1.0, draw_size_hint: true, } } } #[derive(Debug)] pub struct RasterBackend { pub render_options: RenderOptions, width: u32, height: u32, aspect_ratio: f32, } impl RasterBackend { pub fn new(width: u32, height: u32) -> Self
fn view_projection(&self, zoom: f32) -> Mat4 { // calculate view projection matrix let proj = glm::ortho( zoom * 0.5 * self.aspect_ratio, -zoom * 0.5 * self.aspect_ratio, -zoom * 0.5, zoom * 0.5, 0.0, 1.0, ); let view = glm::look_at( &self.render_options.view_pos, &Vec3::new(0.0, 0.0, 0.0), &Vec3::new(0.0, 0.0, -1.0), ); proj * view } pub fn fit_mesh_scale(&self, mesh: impl IntoIterator<Item = Triangle> + Copy) -> (AABB, f32) { let aabb = AABB::from_iterable(mesh); let vp = self.view_projection(1.0); // scale the model such that is fills the entire canvas (aabb, scale_for_unitsize(&vp, &aabb)) } pub fn render( &self, mesh: impl IntoIterator<Item = Triangle> + Copy, model_scale: f32, aabb: &AABB, timeout: Option<Duration>, ) -> Picture { let start_time = Instant::now(); let mut pic = Picture::new(self.width, self.height); let mut zbuf = ZBuffer::new(self.width, self.height); let mut scaled_aabb = *aabb; pic.fill(&(&self.render_options.background_color).into()); let vp = self.view_projection(self.render_options.zoom); // calculate transforms taking the new model scale into account let model = Mat4::identity() .append_translation(&-aabb.center()) .append_scaling(model_scale); let mvp = vp * model; // let the AABB match the transformed model scaled_aabb.apply_transform(&model); // eye normal pointing towards the camera in world space let eye_normal = self.render_options.view_pos.normalize(); // grid in x and y direction if self.render_options.grid_visible { draw_grid( &mut pic, &vp, scaled_aabb.lower.z, &self.render_options.grid_color, aabb.size(), model_scale, ); draw_grid( &mut pic, &(vp * glm::rotation(PI / 2.0, &Vec3::new(0.0, 0.0, 1.0))), scaled_aabb.lower.z, &self.render_options.grid_color, aabb.size(), model_scale, ); } for t in mesh { // timed out? if let Some(timeout) = timeout { let dt = Instant::now() - start_time; if dt > timeout { // abort println!("... timeout!"); return pic; } } let normal = -t.normal; // backface culling if glm::dot(&eye_normal, &normal) < 0.0 { continue; } let v = &t.vertices; let v0 = matmul(&mvp, &v[0]); let v1 = matmul(&mvp, &v[1]); let v2 = matmul(&mvp, &v[2]); let v0m = matmul(&model, &v[0]); let v1m = matmul(&model, &v[1]); let v2m = matmul(&model, &v[2]); // triangle bounding box let min_x = v0.x.min(v1.x).min(v2.x); let min_y = v0.y.min(v1.y).min(v2.y); let max_x = v0.x.max(v1.x).max(v2.x); let max_y = v0.y.max(v1.y).max(v2.y); // triangle bounding box in screen space let smin_x = 0.max(((min_x + 1.0) / 2.0 * pic.width() as f32) as u32); let smin_y = 0.max(((min_y + 1.0) / 2.0 * pic.height() as f32) as u32); let smax_x = 0.max(pic.width().min(((max_x + 1.0) / 2.0 * pic.width() as f32) as u32)); let smax_y = 0.max(pic.height().min(((max_y + 1.0) / 2.0 * pic.height() as f32) as u32)); for y in smin_y..=smax_y { for x in smin_x..=smax_x { // normalized screen coordinates [-1,1] let nx = 2.0 * ((x as f32 / pic.width() as f32) - 0.5); let ny = 2.0 * ((y as f32 / pic.height() as f32) - 0.5); let p = Vec2::new(nx, ny); let p0 = v0.xy(); let p1 = v1.xy(); let p2 = v2.xy(); let inside = edge_fn(&p, &p0, &p1) <= 0.0 && edge_fn(&p, &p1, &p2) <= 0.0 && edge_fn(&p, &p2, &p0) <= 0.0; if inside { // calculate barycentric coordinates let area = edge_fn(&p0, &p1, &p2); let w0 = edge_fn(&p1, &p2, &p) / area; let w1 = edge_fn(&p2, &p0, &p) / area; let w2 = edge_fn(&p0, &p1, &p) / area; // fragment position in screen space let frag_pos = Vec3::new( w0 * v0.x + w1 * v1.x + w2 * v2.x, w0 * v0.y + w1 * v1.y + w2 * v2.y, w0 * v0.z + w1 * v1.z + w2 * v2.z, ); // fragment position in world space let fp = Vec3::new( w0 * v0m.x + w1 * v1m.x + w2 * v2m.x, w0 * v0m.y + w1 * v1m.y + w2 * v2m.y, w0 * v0m.z + w1 * v1m.z + w2 * v2m.z, ); //let fp = matmul(&mvp_inv, &frag_pos); if zbuf.test_and_set(x, y, frag_pos.z) { // calculate lightning let light_normal = (self.render_options.light_pos - fp).normalize(); // normal frag pos to light (world space) let view_normal = (self.render_options.view_pos - fp).normalize(); // normal frag pos to view (world space) let reflect_dir = glm::reflect_vec(&-light_normal, &normal); // diffuse let diff_color = glm::dot(&normal, &light_normal).max(0.0) * self.render_options.light_color * 1.0; // specular let spec_color = (glm::dot(&view_normal, &reflect_dir).powf(16.0) * 0.7) * self.render_options.light_color; // merge let mut color = self.render_options.ambient_color + diff_color + spec_color; color.x *= self.render_options.model_color.x; color.y *= self.render_options.model_color.y; color.z *= self.render_options.model_color.z; pic.set(x, y, &(color.x, color.y, color.z, 1.0).into()); } } } } } if self.render_options.draw_size_hint { let margin = 3; let text_to_height_ratio = 16; let text = format!( "{}x{}x{}", aabb.size().x as i32, aabb.size().y as i32, aabb.size().z as i32 ); let text_size = pic.height() / text_to_height_ratio; pic.fill_rect( 0, pic.height() as i32 - (text_size + margin * 2) as i32, pic.width() as i32, pic.height() as i32, &"333333FF".into(), ); pic.stroke_string( margin, pic.height() - text_size - margin, &text, text_size as f32, &"FFFFFFFF".into(), ); } pic } } fn edge_fn(a: &Vec2, b: &Vec2, c: &Vec2) -> f32 { (c.x - a.x) * (b.y - a.y) - (c.y - a.y) * (b.x - a.x) } fn scale_for_unitsize(mvp: &Mat4, aabb: &AABB) -> f32 { let edges = [ matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.lower.y, aabb.lower.z)), matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.lower.y, aabb.lower.z)), matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.upper.y, aabb.lower.z)), matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.upper.y, aabb.lower.z)), matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.lower.y, aabb.upper.z)), matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.lower.y, aabb.upper.z)), matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.upper.y, aabb.upper.z)), matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.upper.y, aabb.upper.z)), ]; let mut min = Vec3::new(f32::MAX, f32::MAX, f32::MAX); let mut max = Vec3::new(f32::MIN, f32::MIN, f32::MIN); for e in &edges { min.x = min.x.min(e.x); min.y = min.y.min(e.y); max.x = max.x.max(e.x); max.y = max.y.max(e.y); } 1.0 / ((f32::abs(max.x - min.x)).max(f32::abs(max.y - min.y)) / 2.0) } fn draw_grid(pic: &mut Picture, vp: &Mat4, z: f32, color: &Vec3, model_size: Vec3, scale: f32) { // draw grid let max_xy = model_size.x.max(model_size.y); let grid_color = (color.x, color.y, color.z, 1.0).into(); let grid_size = 10.0; // mm let grid_count = ((max_xy * scale) / scale / grid_size + 1.0) as i32; let grid_spacing = grid_size * scale as f32; let ox = grid_count as f32 * grid_spacing / 2.0; for x in 0..=grid_count { let p0 = Vec3::new(grid_spacing * x as f32 - ox, grid_count as f32 * grid_spacing * 0.5, z); let p1 = Vec3::new(p0.x, -grid_count as f32 * grid_spacing * 0.5, z); // to screen space let sp0 = matmul(&vp, &p0).xy(); let sp1 = matmul(&vp, &p1).xy(); pic.thick_line( ((sp0.x + 1.0) / 2.0 * pic.width() as f32) as i32, ((sp0.y + 1.0) / 2.0 * pic.height() as f32) as i32, ((sp1.x + 1.0) / 2.0 * pic.width() as f32) as i32, ((sp1.y + 1.0) / 2.0 * pic.height() as f32) as i32, &grid_color, 1.0, ); } }
{ Self { render_options: RenderOptions::default(), width, height, aspect_ratio: width as f32 / height as f32, } }
identifier_body
rasterbackend.rs
use crate::aabb::*; use crate::mesh::*; use crate::picture::*; use crate::zbuffer::*; use std::f32::consts::PI; use std::time::{Duration, Instant}; #[derive(Debug)] pub struct RenderOptions { pub view_pos: Vec3, pub light_pos: Vec3, pub light_color: Vec3, pub ambient_color: Vec3, pub model_color: Vec3, pub grid_color: Vec3, pub background_color: Vec4, pub zoom: f32, pub grid_visible: bool, pub draw_size_hint: bool, } impl Default for RenderOptions { fn default() -> Self { Self { view_pos: Vec3::new(-1.0, 1.0, -1.0).normalize(), light_pos: Vec3::new(-1.0, 0.5, -0.5), light_color: Vec3::new(0.6, 0.6, 0.6), ambient_color: Vec3::new(0.4, 0.4, 0.4), model_color: Vec3::new(0.0, 0.45, 1.0), grid_color: Vec3::new(0.1, 0.1, 0.1), background_color: Vec4::new(1.0, 1.0, 1.0, 1.0), grid_visible: true, zoom: 1.0, draw_size_hint: true, } } } #[derive(Debug)] pub struct RasterBackend { pub render_options: RenderOptions, width: u32, height: u32, aspect_ratio: f32, } impl RasterBackend { pub fn new(width: u32, height: u32) -> Self { Self { render_options: RenderOptions::default(), width, height, aspect_ratio: width as f32 / height as f32, } } fn view_projection(&self, zoom: f32) -> Mat4 { // calculate view projection matrix let proj = glm::ortho( zoom * 0.5 * self.aspect_ratio, -zoom * 0.5 * self.aspect_ratio, -zoom * 0.5, zoom * 0.5, 0.0, 1.0, ); let view = glm::look_at( &self.render_options.view_pos, &Vec3::new(0.0, 0.0, 0.0), &Vec3::new(0.0, 0.0, -1.0), ); proj * view } pub fn fit_mesh_scale(&self, mesh: impl IntoIterator<Item = Triangle> + Copy) -> (AABB, f32) { let aabb = AABB::from_iterable(mesh); let vp = self.view_projection(1.0); // scale the model such that is fills the entire canvas (aabb, scale_for_unitsize(&vp, &aabb)) } pub fn
( &self, mesh: impl IntoIterator<Item = Triangle> + Copy, model_scale: f32, aabb: &AABB, timeout: Option<Duration>, ) -> Picture { let start_time = Instant::now(); let mut pic = Picture::new(self.width, self.height); let mut zbuf = ZBuffer::new(self.width, self.height); let mut scaled_aabb = *aabb; pic.fill(&(&self.render_options.background_color).into()); let vp = self.view_projection(self.render_options.zoom); // calculate transforms taking the new model scale into account let model = Mat4::identity() .append_translation(&-aabb.center()) .append_scaling(model_scale); let mvp = vp * model; // let the AABB match the transformed model scaled_aabb.apply_transform(&model); // eye normal pointing towards the camera in world space let eye_normal = self.render_options.view_pos.normalize(); // grid in x and y direction if self.render_options.grid_visible { draw_grid( &mut pic, &vp, scaled_aabb.lower.z, &self.render_options.grid_color, aabb.size(), model_scale, ); draw_grid( &mut pic, &(vp * glm::rotation(PI / 2.0, &Vec3::new(0.0, 0.0, 1.0))), scaled_aabb.lower.z, &self.render_options.grid_color, aabb.size(), model_scale, ); } for t in mesh { // timed out? if let Some(timeout) = timeout { let dt = Instant::now() - start_time; if dt > timeout { // abort println!("... timeout!"); return pic; } } let normal = -t.normal; // backface culling if glm::dot(&eye_normal, &normal) < 0.0 { continue; } let v = &t.vertices; let v0 = matmul(&mvp, &v[0]); let v1 = matmul(&mvp, &v[1]); let v2 = matmul(&mvp, &v[2]); let v0m = matmul(&model, &v[0]); let v1m = matmul(&model, &v[1]); let v2m = matmul(&model, &v[2]); // triangle bounding box let min_x = v0.x.min(v1.x).min(v2.x); let min_y = v0.y.min(v1.y).min(v2.y); let max_x = v0.x.max(v1.x).max(v2.x); let max_y = v0.y.max(v1.y).max(v2.y); // triangle bounding box in screen space let smin_x = 0.max(((min_x + 1.0) / 2.0 * pic.width() as f32) as u32); let smin_y = 0.max(((min_y + 1.0) / 2.0 * pic.height() as f32) as u32); let smax_x = 0.max(pic.width().min(((max_x + 1.0) / 2.0 * pic.width() as f32) as u32)); let smax_y = 0.max(pic.height().min(((max_y + 1.0) / 2.0 * pic.height() as f32) as u32)); for y in smin_y..=smax_y { for x in smin_x..=smax_x { // normalized screen coordinates [-1,1] let nx = 2.0 * ((x as f32 / pic.width() as f32) - 0.5); let ny = 2.0 * ((y as f32 / pic.height() as f32) - 0.5); let p = Vec2::new(nx, ny); let p0 = v0.xy(); let p1 = v1.xy(); let p2 = v2.xy(); let inside = edge_fn(&p, &p0, &p1) <= 0.0 && edge_fn(&p, &p1, &p2) <= 0.0 && edge_fn(&p, &p2, &p0) <= 0.0; if inside { // calculate barycentric coordinates let area = edge_fn(&p0, &p1, &p2); let w0 = edge_fn(&p1, &p2, &p) / area; let w1 = edge_fn(&p2, &p0, &p) / area; let w2 = edge_fn(&p0, &p1, &p) / area; // fragment position in screen space let frag_pos = Vec3::new( w0 * v0.x + w1 * v1.x + w2 * v2.x, w0 * v0.y + w1 * v1.y + w2 * v2.y, w0 * v0.z + w1 * v1.z + w2 * v2.z, ); // fragment position in world space let fp = Vec3::new( w0 * v0m.x + w1 * v1m.x + w2 * v2m.x, w0 * v0m.y + w1 * v1m.y + w2 * v2m.y, w0 * v0m.z + w1 * v1m.z + w2 * v2m.z, ); //let fp = matmul(&mvp_inv, &frag_pos); if zbuf.test_and_set(x, y, frag_pos.z) { // calculate lightning let light_normal = (self.render_options.light_pos - fp).normalize(); // normal frag pos to light (world space) let view_normal = (self.render_options.view_pos - fp).normalize(); // normal frag pos to view (world space) let reflect_dir = glm::reflect_vec(&-light_normal, &normal); // diffuse let diff_color = glm::dot(&normal, &light_normal).max(0.0) * self.render_options.light_color * 1.0; // specular let spec_color = (glm::dot(&view_normal, &reflect_dir).powf(16.0) * 0.7) * self.render_options.light_color; // merge let mut color = self.render_options.ambient_color + diff_color + spec_color; color.x *= self.render_options.model_color.x; color.y *= self.render_options.model_color.y; color.z *= self.render_options.model_color.z; pic.set(x, y, &(color.x, color.y, color.z, 1.0).into()); } } } } } if self.render_options.draw_size_hint { let margin = 3; let text_to_height_ratio = 16; let text = format!( "{}x{}x{}", aabb.size().x as i32, aabb.size().y as i32, aabb.size().z as i32 ); let text_size = pic.height() / text_to_height_ratio; pic.fill_rect( 0, pic.height() as i32 - (text_size + margin * 2) as i32, pic.width() as i32, pic.height() as i32, &"333333FF".into(), ); pic.stroke_string( margin, pic.height() - text_size - margin, &text, text_size as f32, &"FFFFFFFF".into(), ); } pic } } fn edge_fn(a: &Vec2, b: &Vec2, c: &Vec2) -> f32 { (c.x - a.x) * (b.y - a.y) - (c.y - a.y) * (b.x - a.x) } fn scale_for_unitsize(mvp: &Mat4, aabb: &AABB) -> f32 { let edges = [ matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.lower.y, aabb.lower.z)), matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.lower.y, aabb.lower.z)), matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.upper.y, aabb.lower.z)), matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.upper.y, aabb.lower.z)), matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.lower.y, aabb.upper.z)), matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.lower.y, aabb.upper.z)), matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.upper.y, aabb.upper.z)), matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.upper.y, aabb.upper.z)), ]; let mut min = Vec3::new(f32::MAX, f32::MAX, f32::MAX); let mut max = Vec3::new(f32::MIN, f32::MIN, f32::MIN); for e in &edges { min.x = min.x.min(e.x); min.y = min.y.min(e.y); max.x = max.x.max(e.x); max.y = max.y.max(e.y); } 1.0 / ((f32::abs(max.x - min.x)).max(f32::abs(max.y - min.y)) / 2.0) } fn draw_grid(pic: &mut Picture, vp: &Mat4, z: f32, color: &Vec3, model_size: Vec3, scale: f32) { // draw grid let max_xy = model_size.x.max(model_size.y); let grid_color = (color.x, color.y, color.z, 1.0).into(); let grid_size = 10.0; // mm let grid_count = ((max_xy * scale) / scale / grid_size + 1.0) as i32; let grid_spacing = grid_size * scale as f32; let ox = grid_count as f32 * grid_spacing / 2.0; for x in 0..=grid_count { let p0 = Vec3::new(grid_spacing * x as f32 - ox, grid_count as f32 * grid_spacing * 0.5, z); let p1 = Vec3::new(p0.x, -grid_count as f32 * grid_spacing * 0.5, z); // to screen space let sp0 = matmul(&vp, &p0).xy(); let sp1 = matmul(&vp, &p1).xy(); pic.thick_line( ((sp0.x + 1.0) / 2.0 * pic.width() as f32) as i32, ((sp0.y + 1.0) / 2.0 * pic.height() as f32) as i32, ((sp1.x + 1.0) / 2.0 * pic.width() as f32) as i32, ((sp1.y + 1.0) / 2.0 * pic.height() as f32) as i32, &grid_color, 1.0, ); } }
render
identifier_name
rasterbackend.rs
use crate::aabb::*; use crate::mesh::*; use crate::picture::*; use crate::zbuffer::*; use std::f32::consts::PI; use std::time::{Duration, Instant}; #[derive(Debug)] pub struct RenderOptions { pub view_pos: Vec3, pub light_pos: Vec3, pub light_color: Vec3, pub ambient_color: Vec3, pub model_color: Vec3, pub grid_color: Vec3, pub background_color: Vec4, pub zoom: f32, pub grid_visible: bool, pub draw_size_hint: bool, } impl Default for RenderOptions { fn default() -> Self { Self { view_pos: Vec3::new(-1.0, 1.0, -1.0).normalize(), light_pos: Vec3::new(-1.0, 0.5, -0.5), light_color: Vec3::new(0.6, 0.6, 0.6), ambient_color: Vec3::new(0.4, 0.4, 0.4), model_color: Vec3::new(0.0, 0.45, 1.0), grid_color: Vec3::new(0.1, 0.1, 0.1), background_color: Vec4::new(1.0, 1.0, 1.0, 1.0), grid_visible: true, zoom: 1.0, draw_size_hint: true, } } } #[derive(Debug)] pub struct RasterBackend { pub render_options: RenderOptions, width: u32, height: u32, aspect_ratio: f32, } impl RasterBackend { pub fn new(width: u32, height: u32) -> Self { Self { render_options: RenderOptions::default(), width, height, aspect_ratio: width as f32 / height as f32, } } fn view_projection(&self, zoom: f32) -> Mat4 { // calculate view projection matrix let proj = glm::ortho( zoom * 0.5 * self.aspect_ratio, -zoom * 0.5 * self.aspect_ratio, -zoom * 0.5, zoom * 0.5, 0.0, 1.0, ); let view = glm::look_at( &self.render_options.view_pos, &Vec3::new(0.0, 0.0, 0.0), &Vec3::new(0.0, 0.0, -1.0), ); proj * view } pub fn fit_mesh_scale(&self, mesh: impl IntoIterator<Item = Triangle> + Copy) -> (AABB, f32) { let aabb = AABB::from_iterable(mesh); let vp = self.view_projection(1.0); // scale the model such that is fills the entire canvas (aabb, scale_for_unitsize(&vp, &aabb)) } pub fn render( &self, mesh: impl IntoIterator<Item = Triangle> + Copy, model_scale: f32, aabb: &AABB, timeout: Option<Duration>, ) -> Picture { let start_time = Instant::now(); let mut pic = Picture::new(self.width, self.height); let mut zbuf = ZBuffer::new(self.width, self.height); let mut scaled_aabb = *aabb; pic.fill(&(&self.render_options.background_color).into()); let vp = self.view_projection(self.render_options.zoom); // calculate transforms taking the new model scale into account let model = Mat4::identity() .append_translation(&-aabb.center()) .append_scaling(model_scale); let mvp = vp * model; // let the AABB match the transformed model scaled_aabb.apply_transform(&model); // eye normal pointing towards the camera in world space let eye_normal = self.render_options.view_pos.normalize(); // grid in x and y direction if self.render_options.grid_visible { draw_grid( &mut pic, &vp, scaled_aabb.lower.z, &self.render_options.grid_color, aabb.size(), model_scale, ); draw_grid( &mut pic, &(vp * glm::rotation(PI / 2.0, &Vec3::new(0.0, 0.0, 1.0))), scaled_aabb.lower.z, &self.render_options.grid_color, aabb.size(), model_scale, ); } for t in mesh { // timed out? if let Some(timeout) = timeout { let dt = Instant::now() - start_time; if dt > timeout { // abort println!("... timeout!"); return pic; } } let normal = -t.normal; // backface culling if glm::dot(&eye_normal, &normal) < 0.0 { continue; } let v = &t.vertices; let v0 = matmul(&mvp, &v[0]); let v1 = matmul(&mvp, &v[1]); let v2 = matmul(&mvp, &v[2]); let v0m = matmul(&model, &v[0]); let v1m = matmul(&model, &v[1]); let v2m = matmul(&model, &v[2]); // triangle bounding box let min_x = v0.x.min(v1.x).min(v2.x); let min_y = v0.y.min(v1.y).min(v2.y); let max_x = v0.x.max(v1.x).max(v2.x); let max_y = v0.y.max(v1.y).max(v2.y); // triangle bounding box in screen space let smin_x = 0.max(((min_x + 1.0) / 2.0 * pic.width() as f32) as u32); let smin_y = 0.max(((min_y + 1.0) / 2.0 * pic.height() as f32) as u32); let smax_x = 0.max(pic.width().min(((max_x + 1.0) / 2.0 * pic.width() as f32) as u32)); let smax_y = 0.max(pic.height().min(((max_y + 1.0) / 2.0 * pic.height() as f32) as u32)); for y in smin_y..=smax_y { for x in smin_x..=smax_x { // normalized screen coordinates [-1,1] let nx = 2.0 * ((x as f32 / pic.width() as f32) - 0.5); let ny = 2.0 * ((y as f32 / pic.height() as f32) - 0.5); let p = Vec2::new(nx, ny); let p0 = v0.xy(); let p1 = v1.xy(); let p2 = v2.xy(); let inside = edge_fn(&p, &p0, &p1) <= 0.0 && edge_fn(&p, &p1, &p2) <= 0.0 && edge_fn(&p, &p2, &p0) <= 0.0; if inside { // calculate barycentric coordinates let area = edge_fn(&p0, &p1, &p2); let w0 = edge_fn(&p1, &p2, &p) / area; let w1 = edge_fn(&p2, &p0, &p) / area; let w2 = edge_fn(&p0, &p1, &p) / area; // fragment position in screen space let frag_pos = Vec3::new( w0 * v0.x + w1 * v1.x + w2 * v2.x, w0 * v0.y + w1 * v1.y + w2 * v2.y, w0 * v0.z + w1 * v1.z + w2 * v2.z, ); // fragment position in world space let fp = Vec3::new( w0 * v0m.x + w1 * v1m.x + w2 * v2m.x, w0 * v0m.y + w1 * v1m.y + w2 * v2m.y, w0 * v0m.z + w1 * v1m.z + w2 * v2m.z, ); //let fp = matmul(&mvp_inv, &frag_pos); if zbuf.test_and_set(x, y, frag_pos.z) { // calculate lightning let light_normal = (self.render_options.light_pos - fp).normalize(); // normal frag pos to light (world space) let view_normal = (self.render_options.view_pos - fp).normalize(); // normal frag pos to view (world space) let reflect_dir = glm::reflect_vec(&-light_normal, &normal); // diffuse let diff_color = glm::dot(&normal, &light_normal).max(0.0) * self.render_options.light_color * 1.0; // specular let spec_color = (glm::dot(&view_normal, &reflect_dir).powf(16.0) * 0.7) * self.render_options.light_color; // merge let mut color = self.render_options.ambient_color + diff_color + spec_color; color.x *= self.render_options.model_color.x; color.y *= self.render_options.model_color.y; color.z *= self.render_options.model_color.z; pic.set(x, y, &(color.x, color.y, color.z, 1.0).into()); } } } } } if self.render_options.draw_size_hint
pic.stroke_string( margin, pic.height() - text_size - margin, &text, text_size as f32, &"FFFFFFFF".into(), ); } pic } } fn edge_fn(a: &Vec2, b: &Vec2, c: &Vec2) -> f32 { (c.x - a.x) * (b.y - a.y) - (c.y - a.y) * (b.x - a.x) } fn scale_for_unitsize(mvp: &Mat4, aabb: &AABB) -> f32 { let edges = [ matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.lower.y, aabb.lower.z)), matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.lower.y, aabb.lower.z)), matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.upper.y, aabb.lower.z)), matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.upper.y, aabb.lower.z)), matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.lower.y, aabb.upper.z)), matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.lower.y, aabb.upper.z)), matmul(&mvp, &Vec3::new(aabb.lower.x, aabb.upper.y, aabb.upper.z)), matmul(&mvp, &Vec3::new(aabb.upper.x, aabb.upper.y, aabb.upper.z)), ]; let mut min = Vec3::new(f32::MAX, f32::MAX, f32::MAX); let mut max = Vec3::new(f32::MIN, f32::MIN, f32::MIN); for e in &edges { min.x = min.x.min(e.x); min.y = min.y.min(e.y); max.x = max.x.max(e.x); max.y = max.y.max(e.y); } 1.0 / ((f32::abs(max.x - min.x)).max(f32::abs(max.y - min.y)) / 2.0) } fn draw_grid(pic: &mut Picture, vp: &Mat4, z: f32, color: &Vec3, model_size: Vec3, scale: f32) { // draw grid let max_xy = model_size.x.max(model_size.y); let grid_color = (color.x, color.y, color.z, 1.0).into(); let grid_size = 10.0; // mm let grid_count = ((max_xy * scale) / scale / grid_size + 1.0) as i32; let grid_spacing = grid_size * scale as f32; let ox = grid_count as f32 * grid_spacing / 2.0; for x in 0..=grid_count { let p0 = Vec3::new(grid_spacing * x as f32 - ox, grid_count as f32 * grid_spacing * 0.5, z); let p1 = Vec3::new(p0.x, -grid_count as f32 * grid_spacing * 0.5, z); // to screen space let sp0 = matmul(&vp, &p0).xy(); let sp1 = matmul(&vp, &p1).xy(); pic.thick_line( ((sp0.x + 1.0) / 2.0 * pic.width() as f32) as i32, ((sp0.y + 1.0) / 2.0 * pic.height() as f32) as i32, ((sp1.x + 1.0) / 2.0 * pic.width() as f32) as i32, ((sp1.y + 1.0) / 2.0 * pic.height() as f32) as i32, &grid_color, 1.0, ); } }
{ let margin = 3; let text_to_height_ratio = 16; let text = format!( "{}x{}x{}", aabb.size().x as i32, aabb.size().y as i32, aabb.size().z as i32 ); let text_size = pic.height() / text_to_height_ratio; pic.fill_rect( 0, pic.height() as i32 - (text_size + margin * 2) as i32, pic.width() as i32, pic.height() as i32, &"333333FF".into(), );
conditional_block
mod.rs
use crate::{ data::{Key, Metakey, Value}, error::*, Aggregator, AggregatorState, Backend, Handle, MapState, Reducer, ReducerState, ValueState, VecState, }; use rocksdb::{ checkpoint::Checkpoint, ColumnFamily, ColumnFamilyDescriptor, DBPinnableSlice, Options, SliceTransform, WriteBatch, WriteOptions, DB, }; use std::{ cell::UnsafeCell, collections::HashSet, fs, path::{Path, PathBuf}, }; unsafe impl Send for Rocks {} unsafe impl Sync for Rocks {} #[derive(Debug)] pub struct Rocks { inner: UnsafeCell<DB>, restored: bool, name: String, } // we use epochs, so WAL is useless for us fn default_write_opts() -> WriteOptions { let mut res = WriteOptions::default(); res.disable_wal(true); res } impl Rocks { #[inline(always)] #[allow(clippy::mut_from_ref)] fn db_mut(&self) -> &mut DB { unsafe { &mut (*self.inner.get()) } } #[inline(always)] fn db(&self) -> &DB { unsafe { &(*self.inner.get()) } } #[inline] fn get_cf_handle(&self, cf_name: impl AsRef<str>) -> Result<&ColumnFamily> { let cf_name = cf_name.as_ref(); self.db() .cf_handle(cf_name) .with_context(|| RocksMissingColumnFamily { cf_name: cf_name.to_string(), }) } #[inline] fn get( &self, cf_name: impl AsRef<str>, key: impl AsRef<[u8]>, ) -> Result<Option<DBPinnableSlice>> { let cf = self.get_cf_handle(cf_name)?; Ok(self.db().get_pinned_cf(cf, key)?) } #[inline] fn put( &self, cf_name: impl AsRef<str>, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>, ) -> Result<()> { let cf = self.get_cf_handle(cf_name)?; Ok(self .db() .put_cf_opt(cf, key, value, &default_write_opts())?) } #[inline] fn remove(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<()> { let cf = self.get_cf_handle(cf)?; Ok(self.db().delete_cf_opt(cf, key, &default_write_opts())?) } fn remove_prefix(&self, cf: impl AsRef<str>, prefix: impl AsRef<[u8]>) -> Result<()> { let prefix = prefix.as_ref(); let cf_name = cf.as_ref(); let cf = self.get_cf_handle(cf_name)?; // NOTE: this only works assuming the column family is lexicographically ordered (which is // the default, so we don't explicitly set it, see Options::set_comparator) let start = prefix; // delete_range deletes all the entries in [start, end) range, so we can just increment the // least significant byte of the prefix let mut end = start.to_vec(); *end.last_mut() .expect("unreachable, the empty case is covered a few lines above") += 1; let mut wb = WriteBatch::default(); wb.delete_range_cf(cf, start, &end); self.db().write_opt(wb, &default_write_opts())?; Ok(()) } #[inline] fn contains(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<bool> { let cf = self.get_cf_handle(cf.as_ref())?; Ok(self.db().get_pinned_cf(cf, key)?.is_some()) } fn
(&self, cf_name: &str, opts: Options) -> Result<()> { if self.db().cf_handle(cf_name).is_none() { self.db_mut().create_cf(cf_name, &opts)?; } Ok(()) } } fn common_options<IK, N>() -> Options where IK: Metakey, N: Metakey, { let prefix_size = IK::SIZE + N::SIZE; let mut opts = Options::default(); // for map state to work properly, but useful for all the states, so the bloom filters get // populated opts.set_prefix_extractor(SliceTransform::create_fixed_prefix(prefix_size as usize)); opts } impl Backend for Rocks { fn name(&self) -> &str { self.name.as_str() } fn create(path: &Path, name: String) -> Result<Self> where Self: Sized, { let mut opts = Options::default(); opts.create_if_missing(true); let path: PathBuf = path.into(); if!path.exists() { fs::create_dir_all(&path)?; } let column_families: HashSet<String> = match DB::list_cf(&opts, &path) { Ok(cfs) => cfs.into_iter().filter(|n| n!= "default").collect(), // TODO: possibly platform-dependant error message check Err(e) if e.to_string().contains("No such file or directory") => HashSet::new(), Err(e) => return Err(e.into()), }; let cfds = if!column_families.is_empty() { column_families .into_iter() .map(|name| ColumnFamilyDescriptor::new(name, Options::default())) .collect() } else { vec![ColumnFamilyDescriptor::new("default", Options::default())] }; Ok(Rocks { inner: UnsafeCell::new(DB::open_cf_descriptors(&opts, &path, cfds)?), restored: false, name, }) } fn restore(live_path: &Path, checkpoint_path: &Path, name: String) -> Result<Self> where Self: Sized, { fs::create_dir_all(live_path)?; ensure!( fs::read_dir(live_path)?.next().is_none(), RocksRestoreDirNotEmpty { dir: &(*live_path) } ); let mut target_path: PathBuf = live_path.into(); target_path.push("__DUMMY"); // the file name is replaced inside the loop below for entry in fs::read_dir(checkpoint_path)? { let entry = entry?; assert!(entry .file_type() .expect("Cannot read entry metadata") .is_file()); let source_path = entry.path(); // replaces the __DUMMY from above the loop target_path.set_file_name( source_path .file_name() .expect("directory entry with no name?"), ); fs::copy(&source_path, &target_path)?; } Rocks::create(live_path, name).map(|mut r| { r.restored = true; r }) } fn was_restored(&self) -> bool { self.restored } fn checkpoint(&self, checkpoint_path: &Path) -> Result<()> { let db = self.db(); db.flush()?; let checkpointer = Checkpoint::new(db)?; if checkpoint_path.exists() { // TODO: add a warning log here // warn!(logger, "Checkpoint path {:?} exists, deleting"); fs::remove_dir_all(checkpoint_path)? } checkpointer.create_checkpoint(checkpoint_path)?; Ok(()) } fn register_value_handle<'s, T: Value, IK: Metakey, N: Metakey>( &'s self, handle: &'s mut Handle<ValueState<T>, IK, N>, ) { handle.registered = true; let opts = common_options::<IK, N>(); self.create_column_family(&handle.id, opts) .expect("Could not create column family"); } fn register_map_handle<'s, K: Key, V: Value, IK: Metakey, N: Metakey>( &'s self, handle: &'s mut Handle<MapState<K, V>, IK, N>, ) { handle.registered = true; let opts = common_options::<IK, N>(); self.create_column_family(&handle.id, opts) .expect("Could not create column family"); } fn register_vec_handle<'s, T: Value, IK: Metakey, N: Metakey>( &'s self, handle: &'s mut Handle<VecState<T>, IK, N>, ) { handle.registered = true; let mut opts = common_options::<IK, N>(); opts.set_merge_operator_associative("vec_merge", vec_ops::vec_merge); self.create_column_family(&handle.id, opts) .expect("Could not create column family"); } fn register_reducer_handle<'s, T: Value, F: Reducer<T>, IK: Metakey, N: Metakey>( &'s self, handle: &'s mut Handle<ReducerState<T, F>, IK, N>, ) { handle.registered = true; let mut opts = common_options::<IK, N>(); let reducer_merge = reducer_ops::make_reducer_merge(handle.extra_data.clone()); opts.set_merge_operator_associative("reducer_merge", reducer_merge); self.create_column_family(&handle.id, opts) .expect("Could not create column family"); } fn register_aggregator_handle<'s, A: Aggregator, IK: Metakey, N: Metakey>( &'s self, handle: &'s mut Handle<AggregatorState<A>, IK, N>, ) { handle.registered = true; let mut opts = common_options::<IK, N>(); let aggregator_merge = aggregator_ops::make_aggregator_merge(handle.extra_data.clone()); opts.set_merge_operator_associative("aggregator_merge", aggregator_merge); self.create_column_family(&handle.id, opts) .expect("Could not create column family"); } } mod aggregator_ops; mod map_ops; mod reducer_ops; mod value_ops; mod vec_ops; #[cfg(test)] pub mod tests { use super::*; use std::{ ops::{Deref, DerefMut}, sync::Arc, }; use tempfile::TempDir; #[derive(Debug)] pub struct TestDb { rocks: Arc<Rocks>, dir: TempDir, } impl TestDb { #[allow(clippy::new_without_default)] pub fn new() -> TestDb { let dir = TempDir::new().unwrap(); let mut dir_path = dir.path().to_path_buf(); dir_path.push("rocks"); fs::create_dir(&dir_path).unwrap(); let rocks = Rocks::create(&dir_path, "testDB".to_string()).unwrap(); TestDb { rocks: Arc::new(rocks), dir, } } pub fn checkpoint(&mut self) -> PathBuf { let mut checkpoint_dir: PathBuf = self.dir.path().into(); checkpoint_dir.push("checkpoint"); self.rocks.checkpoint(&checkpoint_dir).unwrap(); checkpoint_dir } pub fn from_checkpoint(checkpoint_dir: &str) -> TestDb { let dir = TempDir::new().unwrap(); let mut dir_path = dir.path().to_path_buf(); dir_path.push("rocks"); let rocks = Rocks::restore(&dir_path, checkpoint_dir.as_ref(), "testDB".to_string()).unwrap(); TestDb { rocks: Arc::new(rocks), dir, } } } impl Deref for TestDb { type Target = Arc<Rocks>; fn deref(&self) -> &Self::Target { &self.rocks } } impl DerefMut for TestDb { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.rocks } } #[test] fn simple_rocksdb_test() { let db = TestDb::new(); let key = "key"; let value = "test"; let column_family = "default"; db.put(column_family, key.as_bytes(), value.as_bytes()) .expect("put"); { let v = db.get(column_family, key.as_bytes()).unwrap().unwrap(); assert_eq!(value, String::from_utf8_lossy(&v)); } db.remove(column_family, key.as_bytes()).expect("remove"); let v = db.get(column_family, key.as_bytes()).unwrap(); assert!(v.is_none()); } #[test] fn checkpoint_rocksdb_raw_test() { let tmp_dir = TempDir::new().unwrap(); let checkpoints_dir = TempDir::new().unwrap(); let restore_dir = TempDir::new().unwrap(); let dir_path = tmp_dir.path(); let mut checkpoints_dir_path = checkpoints_dir.path().to_path_buf(); checkpoints_dir_path.push("chkp0"); let mut restore_dir_path = restore_dir.path().to_path_buf(); restore_dir_path.push("chkp0"); let db = Rocks::create(dir_path, "testDB".to_string()).unwrap(); let key: &[u8] = b"key"; let initial_value: &[u8] = b"value"; let new_value: &[u8] = b"new value"; let column_family = "default"; db.put(column_family, key, initial_value) .expect("put failed"); db.checkpoint(&checkpoints_dir_path) .expect("checkpoint failed"); db.put(column_family, key, new_value) .expect("second put failed"); let db_from_checkpoint = Rocks::restore( &restore_dir_path, &checkpoints_dir_path, "testDB".to_string(), ) .expect("Could not open checkpointed db"); assert_eq!( new_value, db.get(column_family, key) .expect("Could not get from the original db") .unwrap() .as_ref() ); assert_eq!( initial_value, db_from_checkpoint .get(column_family, key) .expect("Could not get from the checkpoint") .unwrap() .as_ref() ); } #[test] fn checkpoint_restore_state_test() { let mut original_test = TestDb::new(); let mut a_handle = Handle::value("a"); original_test.register_value_handle(&mut a_handle); let checkpoint_dir = { let mut a = a_handle.activate(original_test.clone()); a.set(420).unwrap(); let checkpoint_dir = original_test.checkpoint(); assert_eq!(a.get().unwrap().unwrap(), 420); a.set(69).unwrap(); assert_eq!(a.get().unwrap().unwrap(), 69); checkpoint_dir }; let restored = TestDb::from_checkpoint(&checkpoint_dir.to_string_lossy()); { let mut a_handle = Handle::value("a"); restored.register_value_handle(&mut a_handle); let mut a_restored = a_handle.activate(restored.clone()); // TODO: serialize value state metadata (type names, serialization, etc.) into rocksdb, so // that type mismatches are caught early. Right now it would be possible to, let's say, // store an integer, and then read a float from the restored state backend assert_eq!(a_restored.get().unwrap().unwrap(), 420); a_restored.set(1337).unwrap(); assert_eq!(a_restored.get().unwrap().unwrap(), 1337); } } common_state_tests!(TestDb::new()); }
create_column_family
identifier_name
mod.rs
use crate::{ data::{Key, Metakey, Value}, error::*, Aggregator, AggregatorState, Backend, Handle, MapState, Reducer, ReducerState, ValueState, VecState, }; use rocksdb::{ checkpoint::Checkpoint, ColumnFamily, ColumnFamilyDescriptor, DBPinnableSlice, Options, SliceTransform, WriteBatch, WriteOptions, DB, }; use std::{ cell::UnsafeCell, collections::HashSet, fs, path::{Path, PathBuf}, }; unsafe impl Send for Rocks {} unsafe impl Sync for Rocks {} #[derive(Debug)] pub struct Rocks { inner: UnsafeCell<DB>, restored: bool, name: String, } // we use epochs, so WAL is useless for us fn default_write_opts() -> WriteOptions { let mut res = WriteOptions::default(); res.disable_wal(true); res } impl Rocks { #[inline(always)] #[allow(clippy::mut_from_ref)] fn db_mut(&self) -> &mut DB { unsafe { &mut (*self.inner.get()) } } #[inline(always)] fn db(&self) -> &DB { unsafe { &(*self.inner.get()) } } #[inline] fn get_cf_handle(&self, cf_name: impl AsRef<str>) -> Result<&ColumnFamily> { let cf_name = cf_name.as_ref(); self.db() .cf_handle(cf_name) .with_context(|| RocksMissingColumnFamily { cf_name: cf_name.to_string(), }) } #[inline] fn get( &self, cf_name: impl AsRef<str>, key: impl AsRef<[u8]>, ) -> Result<Option<DBPinnableSlice>> { let cf = self.get_cf_handle(cf_name)?; Ok(self.db().get_pinned_cf(cf, key)?) } #[inline] fn put( &self, cf_name: impl AsRef<str>, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>, ) -> Result<()> { let cf = self.get_cf_handle(cf_name)?; Ok(self .db() .put_cf_opt(cf, key, value, &default_write_opts())?) } #[inline] fn remove(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<()> { let cf = self.get_cf_handle(cf)?; Ok(self.db().delete_cf_opt(cf, key, &default_write_opts())?) } fn remove_prefix(&self, cf: impl AsRef<str>, prefix: impl AsRef<[u8]>) -> Result<()> { let prefix = prefix.as_ref(); let cf_name = cf.as_ref(); let cf = self.get_cf_handle(cf_name)?; // NOTE: this only works assuming the column family is lexicographically ordered (which is // the default, so we don't explicitly set it, see Options::set_comparator) let start = prefix; // delete_range deletes all the entries in [start, end) range, so we can just increment the // least significant byte of the prefix let mut end = start.to_vec(); *end.last_mut() .expect("unreachable, the empty case is covered a few lines above") += 1; let mut wb = WriteBatch::default(); wb.delete_range_cf(cf, start, &end); self.db().write_opt(wb, &default_write_opts())?; Ok(()) } #[inline] fn contains(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<bool> { let cf = self.get_cf_handle(cf.as_ref())?; Ok(self.db().get_pinned_cf(cf, key)?.is_some()) } fn create_column_family(&self, cf_name: &str, opts: Options) -> Result<()> { if self.db().cf_handle(cf_name).is_none() { self.db_mut().create_cf(cf_name, &opts)?; } Ok(()) } } fn common_options<IK, N>() -> Options where IK: Metakey, N: Metakey, { let prefix_size = IK::SIZE + N::SIZE; let mut opts = Options::default(); // for map state to work properly, but useful for all the states, so the bloom filters get // populated opts.set_prefix_extractor(SliceTransform::create_fixed_prefix(prefix_size as usize)); opts } impl Backend for Rocks { fn name(&self) -> &str { self.name.as_str() } fn create(path: &Path, name: String) -> Result<Self> where Self: Sized, { let mut opts = Options::default(); opts.create_if_missing(true); let path: PathBuf = path.into(); if!path.exists() { fs::create_dir_all(&path)?; } let column_families: HashSet<String> = match DB::list_cf(&opts, &path) { Ok(cfs) => cfs.into_iter().filter(|n| n!= "default").collect(), // TODO: possibly platform-dependant error message check Err(e) if e.to_string().contains("No such file or directory") => HashSet::new(), Err(e) => return Err(e.into()), }; let cfds = if!column_families.is_empty() { column_families .into_iter() .map(|name| ColumnFamilyDescriptor::new(name, Options::default())) .collect() } else { vec![ColumnFamilyDescriptor::new("default", Options::default())] }; Ok(Rocks { inner: UnsafeCell::new(DB::open_cf_descriptors(&opts, &path, cfds)?), restored: false, name, }) } fn restore(live_path: &Path, checkpoint_path: &Path, name: String) -> Result<Self> where Self: Sized, { fs::create_dir_all(live_path)?; ensure!( fs::read_dir(live_path)?.next().is_none(), RocksRestoreDirNotEmpty { dir: &(*live_path) } ); let mut target_path: PathBuf = live_path.into(); target_path.push("__DUMMY"); // the file name is replaced inside the loop below for entry in fs::read_dir(checkpoint_path)? { let entry = entry?; assert!(entry .file_type() .expect("Cannot read entry metadata") .is_file()); let source_path = entry.path(); // replaces the __DUMMY from above the loop target_path.set_file_name( source_path .file_name() .expect("directory entry with no name?"), ); fs::copy(&source_path, &target_path)?; } Rocks::create(live_path, name).map(|mut r| { r.restored = true; r }) } fn was_restored(&self) -> bool { self.restored } fn checkpoint(&self, checkpoint_path: &Path) -> Result<()> { let db = self.db(); db.flush()?; let checkpointer = Checkpoint::new(db)?; if checkpoint_path.exists()
checkpointer.create_checkpoint(checkpoint_path)?; Ok(()) } fn register_value_handle<'s, T: Value, IK: Metakey, N: Metakey>( &'s self, handle: &'s mut Handle<ValueState<T>, IK, N>, ) { handle.registered = true; let opts = common_options::<IK, N>(); self.create_column_family(&handle.id, opts) .expect("Could not create column family"); } fn register_map_handle<'s, K: Key, V: Value, IK: Metakey, N: Metakey>( &'s self, handle: &'s mut Handle<MapState<K, V>, IK, N>, ) { handle.registered = true; let opts = common_options::<IK, N>(); self.create_column_family(&handle.id, opts) .expect("Could not create column family"); } fn register_vec_handle<'s, T: Value, IK: Metakey, N: Metakey>( &'s self, handle: &'s mut Handle<VecState<T>, IK, N>, ) { handle.registered = true; let mut opts = common_options::<IK, N>(); opts.set_merge_operator_associative("vec_merge", vec_ops::vec_merge); self.create_column_family(&handle.id, opts) .expect("Could not create column family"); } fn register_reducer_handle<'s, T: Value, F: Reducer<T>, IK: Metakey, N: Metakey>( &'s self, handle: &'s mut Handle<ReducerState<T, F>, IK, N>, ) { handle.registered = true; let mut opts = common_options::<IK, N>(); let reducer_merge = reducer_ops::make_reducer_merge(handle.extra_data.clone()); opts.set_merge_operator_associative("reducer_merge", reducer_merge); self.create_column_family(&handle.id, opts) .expect("Could not create column family"); } fn register_aggregator_handle<'s, A: Aggregator, IK: Metakey, N: Metakey>( &'s self, handle: &'s mut Handle<AggregatorState<A>, IK, N>, ) { handle.registered = true; let mut opts = common_options::<IK, N>(); let aggregator_merge = aggregator_ops::make_aggregator_merge(handle.extra_data.clone()); opts.set_merge_operator_associative("aggregator_merge", aggregator_merge); self.create_column_family(&handle.id, opts) .expect("Could not create column family"); } } mod aggregator_ops; mod map_ops; mod reducer_ops; mod value_ops; mod vec_ops; #[cfg(test)] pub mod tests { use super::*; use std::{ ops::{Deref, DerefMut}, sync::Arc, }; use tempfile::TempDir; #[derive(Debug)] pub struct TestDb { rocks: Arc<Rocks>, dir: TempDir, } impl TestDb { #[allow(clippy::new_without_default)] pub fn new() -> TestDb { let dir = TempDir::new().unwrap(); let mut dir_path = dir.path().to_path_buf(); dir_path.push("rocks"); fs::create_dir(&dir_path).unwrap(); let rocks = Rocks::create(&dir_path, "testDB".to_string()).unwrap(); TestDb { rocks: Arc::new(rocks), dir, } } pub fn checkpoint(&mut self) -> PathBuf { let mut checkpoint_dir: PathBuf = self.dir.path().into(); checkpoint_dir.push("checkpoint"); self.rocks.checkpoint(&checkpoint_dir).unwrap(); checkpoint_dir } pub fn from_checkpoint(checkpoint_dir: &str) -> TestDb { let dir = TempDir::new().unwrap(); let mut dir_path = dir.path().to_path_buf(); dir_path.push("rocks"); let rocks = Rocks::restore(&dir_path, checkpoint_dir.as_ref(), "testDB".to_string()).unwrap(); TestDb { rocks: Arc::new(rocks), dir, } } } impl Deref for TestDb { type Target = Arc<Rocks>; fn deref(&self) -> &Self::Target { &self.rocks } } impl DerefMut for TestDb { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.rocks } } #[test] fn simple_rocksdb_test() { let db = TestDb::new(); let key = "key"; let value = "test"; let column_family = "default"; db.put(column_family, key.as_bytes(), value.as_bytes()) .expect("put"); { let v = db.get(column_family, key.as_bytes()).unwrap().unwrap(); assert_eq!(value, String::from_utf8_lossy(&v)); } db.remove(column_family, key.as_bytes()).expect("remove"); let v = db.get(column_family, key.as_bytes()).unwrap(); assert!(v.is_none()); } #[test] fn checkpoint_rocksdb_raw_test() { let tmp_dir = TempDir::new().unwrap(); let checkpoints_dir = TempDir::new().unwrap(); let restore_dir = TempDir::new().unwrap(); let dir_path = tmp_dir.path(); let mut checkpoints_dir_path = checkpoints_dir.path().to_path_buf(); checkpoints_dir_path.push("chkp0"); let mut restore_dir_path = restore_dir.path().to_path_buf(); restore_dir_path.push("chkp0"); let db = Rocks::create(dir_path, "testDB".to_string()).unwrap(); let key: &[u8] = b"key"; let initial_value: &[u8] = b"value"; let new_value: &[u8] = b"new value"; let column_family = "default"; db.put(column_family, key, initial_value) .expect("put failed"); db.checkpoint(&checkpoints_dir_path) .expect("checkpoint failed"); db.put(column_family, key, new_value) .expect("second put failed"); let db_from_checkpoint = Rocks::restore( &restore_dir_path, &checkpoints_dir_path, "testDB".to_string(), ) .expect("Could not open checkpointed db"); assert_eq!( new_value, db.get(column_family, key) .expect("Could not get from the original db") .unwrap() .as_ref() ); assert_eq!( initial_value, db_from_checkpoint .get(column_family, key) .expect("Could not get from the checkpoint") .unwrap() .as_ref() ); } #[test] fn checkpoint_restore_state_test() { let mut original_test = TestDb::new(); let mut a_handle = Handle::value("a"); original_test.register_value_handle(&mut a_handle); let checkpoint_dir = { let mut a = a_handle.activate(original_test.clone()); a.set(420).unwrap(); let checkpoint_dir = original_test.checkpoint(); assert_eq!(a.get().unwrap().unwrap(), 420); a.set(69).unwrap(); assert_eq!(a.get().unwrap().unwrap(), 69); checkpoint_dir }; let restored = TestDb::from_checkpoint(&checkpoint_dir.to_string_lossy()); { let mut a_handle = Handle::value("a"); restored.register_value_handle(&mut a_handle); let mut a_restored = a_handle.activate(restored.clone()); // TODO: serialize value state metadata (type names, serialization, etc.) into rocksdb, so // that type mismatches are caught early. Right now it would be possible to, let's say, // store an integer, and then read a float from the restored state backend assert_eq!(a_restored.get().unwrap().unwrap(), 420); a_restored.set(1337).unwrap(); assert_eq!(a_restored.get().unwrap().unwrap(), 1337); } } common_state_tests!(TestDb::new()); }
{ // TODO: add a warning log here // warn!(logger, "Checkpoint path {:?} exists, deleting"); fs::remove_dir_all(checkpoint_path)? }
conditional_block
mod.rs
use crate::{ data::{Key, Metakey, Value}, error::*, Aggregator, AggregatorState, Backend, Handle, MapState, Reducer, ReducerState, ValueState, VecState, }; use rocksdb::{ checkpoint::Checkpoint, ColumnFamily, ColumnFamilyDescriptor, DBPinnableSlice, Options, SliceTransform, WriteBatch, WriteOptions, DB, }; use std::{ cell::UnsafeCell, collections::HashSet, fs, path::{Path, PathBuf}, }; unsafe impl Send for Rocks {} unsafe impl Sync for Rocks {} #[derive(Debug)] pub struct Rocks { inner: UnsafeCell<DB>, restored: bool, name: String, } // we use epochs, so WAL is useless for us fn default_write_opts() -> WriteOptions { let mut res = WriteOptions::default(); res.disable_wal(true); res } impl Rocks { #[inline(always)] #[allow(clippy::mut_from_ref)] fn db_mut(&self) -> &mut DB { unsafe { &mut (*self.inner.get()) } } #[inline(always)] fn db(&self) -> &DB { unsafe { &(*self.inner.get()) } } #[inline] fn get_cf_handle(&self, cf_name: impl AsRef<str>) -> Result<&ColumnFamily> { let cf_name = cf_name.as_ref(); self.db() .cf_handle(cf_name) .with_context(|| RocksMissingColumnFamily { cf_name: cf_name.to_string(), }) } #[inline] fn get( &self, cf_name: impl AsRef<str>, key: impl AsRef<[u8]>, ) -> Result<Option<DBPinnableSlice>> { let cf = self.get_cf_handle(cf_name)?; Ok(self.db().get_pinned_cf(cf, key)?) } #[inline] fn put( &self, cf_name: impl AsRef<str>, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>, ) -> Result<()> { let cf = self.get_cf_handle(cf_name)?; Ok(self .db() .put_cf_opt(cf, key, value, &default_write_opts())?) } #[inline] fn remove(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<()> { let cf = self.get_cf_handle(cf)?; Ok(self.db().delete_cf_opt(cf, key, &default_write_opts())?) } fn remove_prefix(&self, cf: impl AsRef<str>, prefix: impl AsRef<[u8]>) -> Result<()> { let prefix = prefix.as_ref(); let cf_name = cf.as_ref(); let cf = self.get_cf_handle(cf_name)?; // NOTE: this only works assuming the column family is lexicographically ordered (which is // the default, so we don't explicitly set it, see Options::set_comparator) let start = prefix; // delete_range deletes all the entries in [start, end) range, so we can just increment the // least significant byte of the prefix let mut end = start.to_vec(); *end.last_mut() .expect("unreachable, the empty case is covered a few lines above") += 1; let mut wb = WriteBatch::default(); wb.delete_range_cf(cf, start, &end); self.db().write_opt(wb, &default_write_opts())?; Ok(()) } #[inline] fn contains(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<bool> { let cf = self.get_cf_handle(cf.as_ref())?; Ok(self.db().get_pinned_cf(cf, key)?.is_some()) } fn create_column_family(&self, cf_name: &str, opts: Options) -> Result<()> { if self.db().cf_handle(cf_name).is_none() { self.db_mut().create_cf(cf_name, &opts)?; } Ok(()) } } fn common_options<IK, N>() -> Options where IK: Metakey, N: Metakey,
impl Backend for Rocks { fn name(&self) -> &str { self.name.as_str() } fn create(path: &Path, name: String) -> Result<Self> where Self: Sized, { let mut opts = Options::default(); opts.create_if_missing(true); let path: PathBuf = path.into(); if!path.exists() { fs::create_dir_all(&path)?; } let column_families: HashSet<String> = match DB::list_cf(&opts, &path) { Ok(cfs) => cfs.into_iter().filter(|n| n!= "default").collect(), // TODO: possibly platform-dependant error message check Err(e) if e.to_string().contains("No such file or directory") => HashSet::new(), Err(e) => return Err(e.into()), }; let cfds = if!column_families.is_empty() { column_families .into_iter() .map(|name| ColumnFamilyDescriptor::new(name, Options::default())) .collect() } else { vec![ColumnFamilyDescriptor::new("default", Options::default())] }; Ok(Rocks { inner: UnsafeCell::new(DB::open_cf_descriptors(&opts, &path, cfds)?), restored: false, name, }) } fn restore(live_path: &Path, checkpoint_path: &Path, name: String) -> Result<Self> where Self: Sized, { fs::create_dir_all(live_path)?; ensure!( fs::read_dir(live_path)?.next().is_none(), RocksRestoreDirNotEmpty { dir: &(*live_path) } ); let mut target_path: PathBuf = live_path.into(); target_path.push("__DUMMY"); // the file name is replaced inside the loop below for entry in fs::read_dir(checkpoint_path)? { let entry = entry?; assert!(entry .file_type() .expect("Cannot read entry metadata") .is_file()); let source_path = entry.path(); // replaces the __DUMMY from above the loop target_path.set_file_name( source_path .file_name() .expect("directory entry with no name?"), ); fs::copy(&source_path, &target_path)?; } Rocks::create(live_path, name).map(|mut r| { r.restored = true; r }) } fn was_restored(&self) -> bool { self.restored } fn checkpoint(&self, checkpoint_path: &Path) -> Result<()> { let db = self.db(); db.flush()?; let checkpointer = Checkpoint::new(db)?; if checkpoint_path.exists() { // TODO: add a warning log here // warn!(logger, "Checkpoint path {:?} exists, deleting"); fs::remove_dir_all(checkpoint_path)? } checkpointer.create_checkpoint(checkpoint_path)?; Ok(()) } fn register_value_handle<'s, T: Value, IK: Metakey, N: Metakey>( &'s self, handle: &'s mut Handle<ValueState<T>, IK, N>, ) { handle.registered = true; let opts = common_options::<IK, N>(); self.create_column_family(&handle.id, opts) .expect("Could not create column family"); } fn register_map_handle<'s, K: Key, V: Value, IK: Metakey, N: Metakey>( &'s self, handle: &'s mut Handle<MapState<K, V>, IK, N>, ) { handle.registered = true; let opts = common_options::<IK, N>(); self.create_column_family(&handle.id, opts) .expect("Could not create column family"); } fn register_vec_handle<'s, T: Value, IK: Metakey, N: Metakey>( &'s self, handle: &'s mut Handle<VecState<T>, IK, N>, ) { handle.registered = true; let mut opts = common_options::<IK, N>(); opts.set_merge_operator_associative("vec_merge", vec_ops::vec_merge); self.create_column_family(&handle.id, opts) .expect("Could not create column family"); } fn register_reducer_handle<'s, T: Value, F: Reducer<T>, IK: Metakey, N: Metakey>( &'s self, handle: &'s mut Handle<ReducerState<T, F>, IK, N>, ) { handle.registered = true; let mut opts = common_options::<IK, N>(); let reducer_merge = reducer_ops::make_reducer_merge(handle.extra_data.clone()); opts.set_merge_operator_associative("reducer_merge", reducer_merge); self.create_column_family(&handle.id, opts) .expect("Could not create column family"); } fn register_aggregator_handle<'s, A: Aggregator, IK: Metakey, N: Metakey>( &'s self, handle: &'s mut Handle<AggregatorState<A>, IK, N>, ) { handle.registered = true; let mut opts = common_options::<IK, N>(); let aggregator_merge = aggregator_ops::make_aggregator_merge(handle.extra_data.clone()); opts.set_merge_operator_associative("aggregator_merge", aggregator_merge); self.create_column_family(&handle.id, opts) .expect("Could not create column family"); } } mod aggregator_ops; mod map_ops; mod reducer_ops; mod value_ops; mod vec_ops; #[cfg(test)] pub mod tests { use super::*; use std::{ ops::{Deref, DerefMut}, sync::Arc, }; use tempfile::TempDir; #[derive(Debug)] pub struct TestDb { rocks: Arc<Rocks>, dir: TempDir, } impl TestDb { #[allow(clippy::new_without_default)] pub fn new() -> TestDb { let dir = TempDir::new().unwrap(); let mut dir_path = dir.path().to_path_buf(); dir_path.push("rocks"); fs::create_dir(&dir_path).unwrap(); let rocks = Rocks::create(&dir_path, "testDB".to_string()).unwrap(); TestDb { rocks: Arc::new(rocks), dir, } } pub fn checkpoint(&mut self) -> PathBuf { let mut checkpoint_dir: PathBuf = self.dir.path().into(); checkpoint_dir.push("checkpoint"); self.rocks.checkpoint(&checkpoint_dir).unwrap(); checkpoint_dir } pub fn from_checkpoint(checkpoint_dir: &str) -> TestDb { let dir = TempDir::new().unwrap(); let mut dir_path = dir.path().to_path_buf(); dir_path.push("rocks"); let rocks = Rocks::restore(&dir_path, checkpoint_dir.as_ref(), "testDB".to_string()).unwrap(); TestDb { rocks: Arc::new(rocks), dir, } } } impl Deref for TestDb { type Target = Arc<Rocks>; fn deref(&self) -> &Self::Target { &self.rocks } } impl DerefMut for TestDb { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.rocks } } #[test] fn simple_rocksdb_test() { let db = TestDb::new(); let key = "key"; let value = "test"; let column_family = "default"; db.put(column_family, key.as_bytes(), value.as_bytes()) .expect("put"); { let v = db.get(column_family, key.as_bytes()).unwrap().unwrap(); assert_eq!(value, String::from_utf8_lossy(&v)); } db.remove(column_family, key.as_bytes()).expect("remove"); let v = db.get(column_family, key.as_bytes()).unwrap(); assert!(v.is_none()); } #[test] fn checkpoint_rocksdb_raw_test() { let tmp_dir = TempDir::new().unwrap(); let checkpoints_dir = TempDir::new().unwrap(); let restore_dir = TempDir::new().unwrap(); let dir_path = tmp_dir.path(); let mut checkpoints_dir_path = checkpoints_dir.path().to_path_buf(); checkpoints_dir_path.push("chkp0"); let mut restore_dir_path = restore_dir.path().to_path_buf(); restore_dir_path.push("chkp0"); let db = Rocks::create(dir_path, "testDB".to_string()).unwrap(); let key: &[u8] = b"key"; let initial_value: &[u8] = b"value"; let new_value: &[u8] = b"new value"; let column_family = "default"; db.put(column_family, key, initial_value) .expect("put failed"); db.checkpoint(&checkpoints_dir_path) .expect("checkpoint failed"); db.put(column_family, key, new_value) .expect("second put failed"); let db_from_checkpoint = Rocks::restore( &restore_dir_path, &checkpoints_dir_path, "testDB".to_string(), ) .expect("Could not open checkpointed db"); assert_eq!( new_value, db.get(column_family, key) .expect("Could not get from the original db") .unwrap() .as_ref() ); assert_eq!( initial_value, db_from_checkpoint .get(column_family, key) .expect("Could not get from the checkpoint") .unwrap() .as_ref() ); } #[test] fn checkpoint_restore_state_test() { let mut original_test = TestDb::new(); let mut a_handle = Handle::value("a"); original_test.register_value_handle(&mut a_handle); let checkpoint_dir = { let mut a = a_handle.activate(original_test.clone()); a.set(420).unwrap(); let checkpoint_dir = original_test.checkpoint(); assert_eq!(a.get().unwrap().unwrap(), 420); a.set(69).unwrap(); assert_eq!(a.get().unwrap().unwrap(), 69); checkpoint_dir }; let restored = TestDb::from_checkpoint(&checkpoint_dir.to_string_lossy()); { let mut a_handle = Handle::value("a"); restored.register_value_handle(&mut a_handle); let mut a_restored = a_handle.activate(restored.clone()); // TODO: serialize value state metadata (type names, serialization, etc.) into rocksdb, so // that type mismatches are caught early. Right now it would be possible to, let's say, // store an integer, and then read a float from the restored state backend assert_eq!(a_restored.get().unwrap().unwrap(), 420); a_restored.set(1337).unwrap(); assert_eq!(a_restored.get().unwrap().unwrap(), 1337); } } common_state_tests!(TestDb::new()); }
{ let prefix_size = IK::SIZE + N::SIZE; let mut opts = Options::default(); // for map state to work properly, but useful for all the states, so the bloom filters get // populated opts.set_prefix_extractor(SliceTransform::create_fixed_prefix(prefix_size as usize)); opts }
identifier_body
mod.rs
use crate::{ data::{Key, Metakey, Value}, error::*, Aggregator, AggregatorState, Backend, Handle, MapState, Reducer, ReducerState, ValueState, VecState, }; use rocksdb::{ checkpoint::Checkpoint, ColumnFamily, ColumnFamilyDescriptor, DBPinnableSlice, Options, SliceTransform, WriteBatch, WriteOptions, DB, }; use std::{ cell::UnsafeCell, collections::HashSet, fs, path::{Path, PathBuf}, }; unsafe impl Send for Rocks {} unsafe impl Sync for Rocks {} #[derive(Debug)] pub struct Rocks { inner: UnsafeCell<DB>, restored: bool, name: String, } // we use epochs, so WAL is useless for us fn default_write_opts() -> WriteOptions { let mut res = WriteOptions::default(); res.disable_wal(true); res } impl Rocks { #[inline(always)] #[allow(clippy::mut_from_ref)] fn db_mut(&self) -> &mut DB { unsafe { &mut (*self.inner.get()) } } #[inline(always)] fn db(&self) -> &DB { unsafe { &(*self.inner.get()) } } #[inline] fn get_cf_handle(&self, cf_name: impl AsRef<str>) -> Result<&ColumnFamily> { let cf_name = cf_name.as_ref(); self.db() .cf_handle(cf_name) .with_context(|| RocksMissingColumnFamily { cf_name: cf_name.to_string(), }) } #[inline] fn get( &self, cf_name: impl AsRef<str>, key: impl AsRef<[u8]>, ) -> Result<Option<DBPinnableSlice>> { let cf = self.get_cf_handle(cf_name)?; Ok(self.db().get_pinned_cf(cf, key)?) } #[inline] fn put( &self, cf_name: impl AsRef<str>, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>, ) -> Result<()> { let cf = self.get_cf_handle(cf_name)?; Ok(self .db() .put_cf_opt(cf, key, value, &default_write_opts())?) } #[inline] fn remove(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<()> { let cf = self.get_cf_handle(cf)?; Ok(self.db().delete_cf_opt(cf, key, &default_write_opts())?) } fn remove_prefix(&self, cf: impl AsRef<str>, prefix: impl AsRef<[u8]>) -> Result<()> { let prefix = prefix.as_ref(); let cf_name = cf.as_ref(); let cf = self.get_cf_handle(cf_name)?; // NOTE: this only works assuming the column family is lexicographically ordered (which is // the default, so we don't explicitly set it, see Options::set_comparator) let start = prefix; // delete_range deletes all the entries in [start, end) range, so we can just increment the // least significant byte of the prefix let mut end = start.to_vec(); *end.last_mut() .expect("unreachable, the empty case is covered a few lines above") += 1; let mut wb = WriteBatch::default(); wb.delete_range_cf(cf, start, &end); self.db().write_opt(wb, &default_write_opts())?; Ok(()) } #[inline] fn contains(&self, cf: impl AsRef<str>, key: impl AsRef<[u8]>) -> Result<bool> { let cf = self.get_cf_handle(cf.as_ref())?; Ok(self.db().get_pinned_cf(cf, key)?.is_some()) } fn create_column_family(&self, cf_name: &str, opts: Options) -> Result<()> { if self.db().cf_handle(cf_name).is_none() { self.db_mut().create_cf(cf_name, &opts)?; } Ok(()) } } fn common_options<IK, N>() -> Options where IK: Metakey, N: Metakey, { let prefix_size = IK::SIZE + N::SIZE; let mut opts = Options::default(); // for map state to work properly, but useful for all the states, so the bloom filters get // populated opts.set_prefix_extractor(SliceTransform::create_fixed_prefix(prefix_size as usize)); opts } impl Backend for Rocks { fn name(&self) -> &str { self.name.as_str() } fn create(path: &Path, name: String) -> Result<Self> where Self: Sized, { let mut opts = Options::default(); opts.create_if_missing(true); let path: PathBuf = path.into(); if!path.exists() { fs::create_dir_all(&path)?; } let column_families: HashSet<String> = match DB::list_cf(&opts, &path) { Ok(cfs) => cfs.into_iter().filter(|n| n!= "default").collect(), // TODO: possibly platform-dependant error message check Err(e) if e.to_string().contains("No such file or directory") => HashSet::new(), Err(e) => return Err(e.into()), }; let cfds = if!column_families.is_empty() { column_families .into_iter() .map(|name| ColumnFamilyDescriptor::new(name, Options::default())) .collect() } else { vec![ColumnFamilyDescriptor::new("default", Options::default())] }; Ok(Rocks { inner: UnsafeCell::new(DB::open_cf_descriptors(&opts, &path, cfds)?), restored: false, name, }) } fn restore(live_path: &Path, checkpoint_path: &Path, name: String) -> Result<Self> where Self: Sized, { fs::create_dir_all(live_path)?; ensure!( fs::read_dir(live_path)?.next().is_none(), RocksRestoreDirNotEmpty { dir: &(*live_path) } ); let mut target_path: PathBuf = live_path.into(); target_path.push("__DUMMY"); // the file name is replaced inside the loop below for entry in fs::read_dir(checkpoint_path)? { let entry = entry?; assert!(entry .file_type() .expect("Cannot read entry metadata") .is_file()); let source_path = entry.path(); // replaces the __DUMMY from above the loop target_path.set_file_name( source_path .file_name() .expect("directory entry with no name?"), ); fs::copy(&source_path, &target_path)?; } Rocks::create(live_path, name).map(|mut r| { r.restored = true; r }) } fn was_restored(&self) -> bool { self.restored } fn checkpoint(&self, checkpoint_path: &Path) -> Result<()> { let db = self.db(); db.flush()?; let checkpointer = Checkpoint::new(db)?; if checkpoint_path.exists() {
} checkpointer.create_checkpoint(checkpoint_path)?; Ok(()) } fn register_value_handle<'s, T: Value, IK: Metakey, N: Metakey>( &'s self, handle: &'s mut Handle<ValueState<T>, IK, N>, ) { handle.registered = true; let opts = common_options::<IK, N>(); self.create_column_family(&handle.id, opts) .expect("Could not create column family"); } fn register_map_handle<'s, K: Key, V: Value, IK: Metakey, N: Metakey>( &'s self, handle: &'s mut Handle<MapState<K, V>, IK, N>, ) { handle.registered = true; let opts = common_options::<IK, N>(); self.create_column_family(&handle.id, opts) .expect("Could not create column family"); } fn register_vec_handle<'s, T: Value, IK: Metakey, N: Metakey>( &'s self, handle: &'s mut Handle<VecState<T>, IK, N>, ) { handle.registered = true; let mut opts = common_options::<IK, N>(); opts.set_merge_operator_associative("vec_merge", vec_ops::vec_merge); self.create_column_family(&handle.id, opts) .expect("Could not create column family"); } fn register_reducer_handle<'s, T: Value, F: Reducer<T>, IK: Metakey, N: Metakey>( &'s self, handle: &'s mut Handle<ReducerState<T, F>, IK, N>, ) { handle.registered = true; let mut opts = common_options::<IK, N>(); let reducer_merge = reducer_ops::make_reducer_merge(handle.extra_data.clone()); opts.set_merge_operator_associative("reducer_merge", reducer_merge); self.create_column_family(&handle.id, opts) .expect("Could not create column family"); } fn register_aggregator_handle<'s, A: Aggregator, IK: Metakey, N: Metakey>( &'s self, handle: &'s mut Handle<AggregatorState<A>, IK, N>, ) { handle.registered = true; let mut opts = common_options::<IK, N>(); let aggregator_merge = aggregator_ops::make_aggregator_merge(handle.extra_data.clone()); opts.set_merge_operator_associative("aggregator_merge", aggregator_merge); self.create_column_family(&handle.id, opts) .expect("Could not create column family"); } } mod aggregator_ops; mod map_ops; mod reducer_ops; mod value_ops; mod vec_ops; #[cfg(test)] pub mod tests { use super::*; use std::{ ops::{Deref, DerefMut}, sync::Arc, }; use tempfile::TempDir; #[derive(Debug)] pub struct TestDb { rocks: Arc<Rocks>, dir: TempDir, } impl TestDb { #[allow(clippy::new_without_default)] pub fn new() -> TestDb { let dir = TempDir::new().unwrap(); let mut dir_path = dir.path().to_path_buf(); dir_path.push("rocks"); fs::create_dir(&dir_path).unwrap(); let rocks = Rocks::create(&dir_path, "testDB".to_string()).unwrap(); TestDb { rocks: Arc::new(rocks), dir, } } pub fn checkpoint(&mut self) -> PathBuf { let mut checkpoint_dir: PathBuf = self.dir.path().into(); checkpoint_dir.push("checkpoint"); self.rocks.checkpoint(&checkpoint_dir).unwrap(); checkpoint_dir } pub fn from_checkpoint(checkpoint_dir: &str) -> TestDb { let dir = TempDir::new().unwrap(); let mut dir_path = dir.path().to_path_buf(); dir_path.push("rocks"); let rocks = Rocks::restore(&dir_path, checkpoint_dir.as_ref(), "testDB".to_string()).unwrap(); TestDb { rocks: Arc::new(rocks), dir, } } } impl Deref for TestDb { type Target = Arc<Rocks>; fn deref(&self) -> &Self::Target { &self.rocks } } impl DerefMut for TestDb { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.rocks } } #[test] fn simple_rocksdb_test() { let db = TestDb::new(); let key = "key"; let value = "test"; let column_family = "default"; db.put(column_family, key.as_bytes(), value.as_bytes()) .expect("put"); { let v = db.get(column_family, key.as_bytes()).unwrap().unwrap(); assert_eq!(value, String::from_utf8_lossy(&v)); } db.remove(column_family, key.as_bytes()).expect("remove"); let v = db.get(column_family, key.as_bytes()).unwrap(); assert!(v.is_none()); } #[test] fn checkpoint_rocksdb_raw_test() { let tmp_dir = TempDir::new().unwrap(); let checkpoints_dir = TempDir::new().unwrap(); let restore_dir = TempDir::new().unwrap(); let dir_path = tmp_dir.path(); let mut checkpoints_dir_path = checkpoints_dir.path().to_path_buf(); checkpoints_dir_path.push("chkp0"); let mut restore_dir_path = restore_dir.path().to_path_buf(); restore_dir_path.push("chkp0"); let db = Rocks::create(dir_path, "testDB".to_string()).unwrap(); let key: &[u8] = b"key"; let initial_value: &[u8] = b"value"; let new_value: &[u8] = b"new value"; let column_family = "default"; db.put(column_family, key, initial_value) .expect("put failed"); db.checkpoint(&checkpoints_dir_path) .expect("checkpoint failed"); db.put(column_family, key, new_value) .expect("second put failed"); let db_from_checkpoint = Rocks::restore( &restore_dir_path, &checkpoints_dir_path, "testDB".to_string(), ) .expect("Could not open checkpointed db"); assert_eq!( new_value, db.get(column_family, key) .expect("Could not get from the original db") .unwrap() .as_ref() ); assert_eq!( initial_value, db_from_checkpoint .get(column_family, key) .expect("Could not get from the checkpoint") .unwrap() .as_ref() ); } #[test] fn checkpoint_restore_state_test() { let mut original_test = TestDb::new(); let mut a_handle = Handle::value("a"); original_test.register_value_handle(&mut a_handle); let checkpoint_dir = { let mut a = a_handle.activate(original_test.clone()); a.set(420).unwrap(); let checkpoint_dir = original_test.checkpoint(); assert_eq!(a.get().unwrap().unwrap(), 420); a.set(69).unwrap(); assert_eq!(a.get().unwrap().unwrap(), 69); checkpoint_dir }; let restored = TestDb::from_checkpoint(&checkpoint_dir.to_string_lossy()); { let mut a_handle = Handle::value("a"); restored.register_value_handle(&mut a_handle); let mut a_restored = a_handle.activate(restored.clone()); // TODO: serialize value state metadata (type names, serialization, etc.) into rocksdb, so // that type mismatches are caught early. Right now it would be possible to, let's say, // store an integer, and then read a float from the restored state backend assert_eq!(a_restored.get().unwrap().unwrap(), 420); a_restored.set(1337).unwrap(); assert_eq!(a_restored.get().unwrap().unwrap(), 1337); } } common_state_tests!(TestDb::new()); }
// TODO: add a warning log here // warn!(logger, "Checkpoint path {:?} exists, deleting"); fs::remove_dir_all(checkpoint_path)?
random_line_split
reading.rs
use crate::{protocols::ReturnableConnection, Pea2Pea}; use async_trait::async_trait; use tokio::{ io::{AsyncRead, AsyncReadExt}, sync::mpsc, time::sleep, }; use tracing::*; use std::{io, net::SocketAddr, time::Duration}; /// Can be used to specify and enable reading, i.e. receiving inbound messages. /// If handshaking is enabled too, it goes into force only after the handshake has been concluded. #[async_trait] pub trait Reading: Pea2Pea where Self: Clone + Send + Sync +'static, { /// The final (deserialized) type of inbound messages. type Message: Send; /// Prepares the node to receive messages; failures to read from a connection's stream are penalized by a timeout /// defined in `NodeConfig`, while broken/unreadable messages result in an immediate disconnect (in order to avoid /// accidentally reading "borked" messages). fn enable_reading(&self) { let (conn_sender, mut conn_receiver) = mpsc::channel::<ReturnableConnection>( self.node().config().protocol_handler_queue_depth, ); // the main task spawning per-connection tasks reading messages from their streams let self_clone = self.clone(); let reading_task = tokio::spawn(async move { trace!(parent: self_clone.node().span(), "spawned the Reading handler task"); loop { // these objects are sent from `Node::adapt_stream` if let Some((mut conn, conn_returner)) = conn_receiver.recv().await { let addr = conn.addr; let mut reader = conn.reader.take().unwrap(); // safe; it is available at this point let mut buffer = vec![0; self_clone.node().config().conn_read_buffer_size] .into_boxed_slice(); let (inbound_message_sender, mut inbound_message_receiver) = mpsc::channel(self_clone.node().config().conn_inbound_queue_depth); // the task for processing parsed messages let processing_clone = self_clone.clone(); let inbound_processing_task = tokio::spawn(async move { let node = processing_clone.node(); trace!(parent: node.span(), "spawned a task for processing messages from {}", addr); loop { if let Some(msg) = inbound_message_receiver.recv().await { if let Err(e) = processing_clone.process_message(addr, msg).await { error!(parent: node.span(), "can't process an inbound message: {}", e); node.known_peers().register_failure(addr); } } else { node.disconnect(addr); break; } } }); conn.tasks.push(inbound_processing_task); // the task for reading messages from a stream let reader_clone = self_clone.clone(); let reader_task = tokio::spawn(async move { let node = reader_clone.node(); trace!(parent: node.span(), "spawned a task for reading messages from {}", addr); // postpone reads until the connection is fully established; if the process fails, // this task gets aborted, so there is no need for a dedicated timeout while!node.connected_addrs().contains(&addr) { sleep(Duration::from_millis(5)).await; } let mut carry = 0; loop { match reader_clone .read_from_stream( addr, &mut buffer, &mut reader, carry, &inbound_message_sender, ) .await { Ok(leftover) => { carry = leftover; } Err(e) => { node.known_peers().register_failure(addr); if node.config().fatal_io_errors.contains(&e.kind()) { node.disconnect(addr); break; } else { sleep(Duration::from_secs( node.config().invalid_read_delay_secs, )) .await; } } } } }); conn.tasks.push(reader_task); // return the Connection to the Node, resuming Node::adapt_stream if conn_returner.send(Ok(conn)).is_err() { unreachable!("could't return a Connection to the Node"); } } else { error!("the Reading protocol is down!"); break; } } }); self.node().tasks.lock().push(reading_task); // register the ReadingHandler with the Node self.node().set_reading_handler(conn_sender.into()); } /// Performs a read from the given reader. The default implementation is buffered; it sacrifices a bit of /// simplicity for better performance. Read messages are sent to a message processing task in order to enable /// faster reads. Returns the number of pending bytes left in the buffer in case of an incomplete read; they /// should be provided to the medthod on the next call as `carry`. async fn read_from_stream<R: AsyncRead + Unpin + Send>( &self, addr: SocketAddr, buffer: &mut [u8], reader: &mut R, carry: usize, message_sender: &mpsc::Sender<Self::Message>, ) -> io::Result<usize> { // perform a read from the stream, being careful not to overwrite any bytes carried over from the previous read match reader.read(&mut buffer[carry..]).await { Ok(0) => return Err(io::ErrorKind::UnexpectedEof.into()), Ok(n) => { trace!(parent: self.node().span(), "read {}B from {}", n, addr); let mut processed = 0; let mut left = carry + n; // several messages could have been read at once; process the contents of the buffer loop { // try to read a single message from the buffer match self.read_message(addr, &buffer[processed..processed + left]) { // a full message was read successfully Ok(Some((msg, len))) => { // advance the counters processed += len; left -= len; trace!( parent: self.node().span(), "isolated {}B as a message from {}; {}B left to process", len, addr, left ); self.node() .known_peers() .register_received_message(addr, len); self.node().stats().register_received_message(len); // send the message for further processing if message_sender.send(msg).await.is_err() { error!(parent: self.node().span(), "the inbound message channel is closed"); return Err(io::ErrorKind::BrokenPipe.into()); } // if the read is exhausted, reset the carry and return if left == 0 { return Ok(0); } } // the message in the buffer is incomplete Ok(None) => { // forbid messages that are larger than the read buffer if left >= buffer.len() { error!(parent: self.node().span(), "a message from {} is too large", addr); return Err(io::ErrorKind::InvalidData.into()); } trace!( parent: self.node().span(), "a message from {} is incomplete; carrying {}B over", addr, left ); // move the leftover bytes to the beginning of the buffer; the next read will append bytes // starting from where the leftover ones end, allowing the message to be completed buffer.copy_within(processed..processed + left, 0); return Ok(left); } // an erroneous message (e.g. an unexpected zero-length payload) Err(_) => { error!(parent: self.node().span(), "a message from {} is invalid", addr); return Err(io::ErrorKind::InvalidData.into()); } } } } // a stream read error Err(e) => { error!(parent: self.node().span(), "can't read from {}: {}", addr, e); Err(e) } } } /// Reads a single message from the given buffer; `Ok(None)` indicates that the message is /// incomplete, i.e. further reads from the stream must be performed in order to produce the whole message. /// Alongside the message it returns the number of bytes the read message occupied in the buffer. An `Err` /// returned here will result in the associated connection being dropped. fn read_message( &self, source: SocketAddr, buffer: &[u8], ) -> io::Result<Option<(Self::Message, usize)>>; /// Processes an inbound message. Can be used to update state, send replies etc. #[allow(unused_variables)] async fn process_message(&self, source: SocketAddr, message: Self::Message) -> io::Result<()>
}
{ // don't do anything by default Ok(()) }
identifier_body
reading.rs
use crate::{protocols::ReturnableConnection, Pea2Pea}; use async_trait::async_trait; use tokio::{ io::{AsyncRead, AsyncReadExt}, sync::mpsc, time::sleep, }; use tracing::*; use std::{io, net::SocketAddr, time::Duration}; /// Can be used to specify and enable reading, i.e. receiving inbound messages. /// If handshaking is enabled too, it goes into force only after the handshake has been concluded. #[async_trait] pub trait Reading: Pea2Pea where Self: Clone + Send + Sync +'static, { /// The final (deserialized) type of inbound messages. type Message: Send; /// Prepares the node to receive messages; failures to read from a connection's stream are penalized by a timeout /// defined in `NodeConfig`, while broken/unreadable messages result in an immediate disconnect (in order to avoid /// accidentally reading "borked" messages). fn enable_reading(&self) { let (conn_sender, mut conn_receiver) = mpsc::channel::<ReturnableConnection>( self.node().config().protocol_handler_queue_depth, ); // the main task spawning per-connection tasks reading messages from their streams let self_clone = self.clone(); let reading_task = tokio::spawn(async move { trace!(parent: self_clone.node().span(), "spawned the Reading handler task"); loop { // these objects are sent from `Node::adapt_stream` if let Some((mut conn, conn_returner)) = conn_receiver.recv().await { let addr = conn.addr; let mut reader = conn.reader.take().unwrap(); // safe; it is available at this point let mut buffer = vec![0; self_clone.node().config().conn_read_buffer_size] .into_boxed_slice(); let (inbound_message_sender, mut inbound_message_receiver) = mpsc::channel(self_clone.node().config().conn_inbound_queue_depth); // the task for processing parsed messages let processing_clone = self_clone.clone(); let inbound_processing_task = tokio::spawn(async move { let node = processing_clone.node(); trace!(parent: node.span(), "spawned a task for processing messages from {}", addr); loop { if let Some(msg) = inbound_message_receiver.recv().await { if let Err(e) = processing_clone.process_message(addr, msg).await { error!(parent: node.span(), "can't process an inbound message: {}", e); node.known_peers().register_failure(addr); } } else { node.disconnect(addr); break; } } }); conn.tasks.push(inbound_processing_task); // the task for reading messages from a stream let reader_clone = self_clone.clone(); let reader_task = tokio::spawn(async move { let node = reader_clone.node(); trace!(parent: node.span(), "spawned a task for reading messages from {}", addr); // postpone reads until the connection is fully established; if the process fails, // this task gets aborted, so there is no need for a dedicated timeout while!node.connected_addrs().contains(&addr) { sleep(Duration::from_millis(5)).await; } let mut carry = 0; loop { match reader_clone .read_from_stream( addr, &mut buffer, &mut reader, carry, &inbound_message_sender, ) .await { Ok(leftover) => { carry = leftover; } Err(e) => { node.known_peers().register_failure(addr); if node.config().fatal_io_errors.contains(&e.kind()) { node.disconnect(addr); break; } else { sleep(Duration::from_secs( node.config().invalid_read_delay_secs, )) .await; } } } } }); conn.tasks.push(reader_task); // return the Connection to the Node, resuming Node::adapt_stream if conn_returner.send(Ok(conn)).is_err() { unreachable!("could't return a Connection to the Node"); } } else { error!("the Reading protocol is down!"); break; } } }); self.node().tasks.lock().push(reading_task); // register the ReadingHandler with the Node self.node().set_reading_handler(conn_sender.into()); } /// Performs a read from the given reader. The default implementation is buffered; it sacrifices a bit of /// simplicity for better performance. Read messages are sent to a message processing task in order to enable /// faster reads. Returns the number of pending bytes left in the buffer in case of an incomplete read; they /// should be provided to the medthod on the next call as `carry`. async fn read_from_stream<R: AsyncRead + Unpin + Send>( &self, addr: SocketAddr, buffer: &mut [u8], reader: &mut R, carry: usize, message_sender: &mpsc::Sender<Self::Message>, ) -> io::Result<usize> { // perform a read from the stream, being careful not to overwrite any bytes carried over from the previous read match reader.read(&mut buffer[carry..]).await { Ok(0) => return Err(io::ErrorKind::UnexpectedEof.into()), Ok(n) => { trace!(parent: self.node().span(), "read {}B from {}", n, addr); let mut processed = 0; let mut left = carry + n; // several messages could have been read at once; process the contents of the buffer loop { // try to read a single message from the buffer match self.read_message(addr, &buffer[processed..processed + left]) { // a full message was read successfully Ok(Some((msg, len))) => { // advance the counters processed += len; left -= len; trace!( parent: self.node().span(), "isolated {}B as a message from {}; {}B left to process", len, addr, left ); self.node() .known_peers() .register_received_message(addr, len); self.node().stats().register_received_message(len); // send the message for further processing if message_sender.send(msg).await.is_err() { error!(parent: self.node().span(), "the inbound message channel is closed"); return Err(io::ErrorKind::BrokenPipe.into()); } // if the read is exhausted, reset the carry and return if left == 0 { return Ok(0); } } // the message in the buffer is incomplete Ok(None) => { // forbid messages that are larger than the read buffer if left >= buffer.len() { error!(parent: self.node().span(), "a message from {} is too large", addr); return Err(io::ErrorKind::InvalidData.into()); } trace!( parent: self.node().span(), "a message from {} is incomplete; carrying {}B over", addr, left ); // move the leftover bytes to the beginning of the buffer; the next read will append bytes // starting from where the leftover ones end, allowing the message to be completed buffer.copy_within(processed..processed + left, 0); return Ok(left); } // an erroneous message (e.g. an unexpected zero-length payload) Err(_) => { error!(parent: self.node().span(), "a message from {} is invalid", addr); return Err(io::ErrorKind::InvalidData.into()); } } } } // a stream read error Err(e) => { error!(parent: self.node().span(), "can't read from {}: {}", addr, e); Err(e) } } } /// Reads a single message from the given buffer; `Ok(None)` indicates that the message is /// incomplete, i.e. further reads from the stream must be performed in order to produce the whole message. /// Alongside the message it returns the number of bytes the read message occupied in the buffer. An `Err` /// returned here will result in the associated connection being dropped. fn read_message( &self, source: SocketAddr, buffer: &[u8], ) -> io::Result<Option<(Self::Message, usize)>>; /// Processes an inbound message. Can be used to update state, send replies etc. #[allow(unused_variables)] async fn
(&self, source: SocketAddr, message: Self::Message) -> io::Result<()> { // don't do anything by default Ok(()) } }
process_message
identifier_name
reading.rs
use crate::{protocols::ReturnableConnection, Pea2Pea}; use async_trait::async_trait; use tokio::{ io::{AsyncRead, AsyncReadExt}, sync::mpsc, time::sleep, }; use tracing::*; use std::{io, net::SocketAddr, time::Duration}; /// Can be used to specify and enable reading, i.e. receiving inbound messages. /// If handshaking is enabled too, it goes into force only after the handshake has been concluded. #[async_trait] pub trait Reading: Pea2Pea where Self: Clone + Send + Sync +'static, { /// The final (deserialized) type of inbound messages. type Message: Send; /// Prepares the node to receive messages; failures to read from a connection's stream are penalized by a timeout /// defined in `NodeConfig`, while broken/unreadable messages result in an immediate disconnect (in order to avoid /// accidentally reading "borked" messages). fn enable_reading(&self) { let (conn_sender, mut conn_receiver) = mpsc::channel::<ReturnableConnection>( self.node().config().protocol_handler_queue_depth, ); // the main task spawning per-connection tasks reading messages from their streams let self_clone = self.clone(); let reading_task = tokio::spawn(async move { trace!(parent: self_clone.node().span(), "spawned the Reading handler task"); loop { // these objects are sent from `Node::adapt_stream` if let Some((mut conn, conn_returner)) = conn_receiver.recv().await { let addr = conn.addr; let mut reader = conn.reader.take().unwrap(); // safe; it is available at this point let mut buffer = vec![0; self_clone.node().config().conn_read_buffer_size] .into_boxed_slice(); let (inbound_message_sender, mut inbound_message_receiver) = mpsc::channel(self_clone.node().config().conn_inbound_queue_depth); // the task for processing parsed messages let processing_clone = self_clone.clone(); let inbound_processing_task = tokio::spawn(async move { let node = processing_clone.node(); trace!(parent: node.span(), "spawned a task for processing messages from {}", addr); loop { if let Some(msg) = inbound_message_receiver.recv().await { if let Err(e) = processing_clone.process_message(addr, msg).await { error!(parent: node.span(), "can't process an inbound message: {}", e); node.known_peers().register_failure(addr); } } else { node.disconnect(addr); break; } } }); conn.tasks.push(inbound_processing_task); // the task for reading messages from a stream let reader_clone = self_clone.clone(); let reader_task = tokio::spawn(async move { let node = reader_clone.node(); trace!(parent: node.span(), "spawned a task for reading messages from {}", addr); // postpone reads until the connection is fully established; if the process fails, // this task gets aborted, so there is no need for a dedicated timeout while!node.connected_addrs().contains(&addr) { sleep(Duration::from_millis(5)).await; } let mut carry = 0; loop { match reader_clone .read_from_stream( addr, &mut buffer, &mut reader, carry, &inbound_message_sender, ) .await { Ok(leftover) => { carry = leftover; } Err(e) => { node.known_peers().register_failure(addr); if node.config().fatal_io_errors.contains(&e.kind()) { node.disconnect(addr); break; } else { sleep(Duration::from_secs( node.config().invalid_read_delay_secs, )) .await; } } } } }); conn.tasks.push(reader_task); // return the Connection to the Node, resuming Node::adapt_stream if conn_returner.send(Ok(conn)).is_err() { unreachable!("could't return a Connection to the Node"); } } else { error!("the Reading protocol is down!"); break; } } }); self.node().tasks.lock().push(reading_task); // register the ReadingHandler with the Node self.node().set_reading_handler(conn_sender.into()); } /// Performs a read from the given reader. The default implementation is buffered; it sacrifices a bit of /// simplicity for better performance. Read messages are sent to a message processing task in order to enable /// faster reads. Returns the number of pending bytes left in the buffer in case of an incomplete read; they /// should be provided to the medthod on the next call as `carry`. async fn read_from_stream<R: AsyncRead + Unpin + Send>( &self, addr: SocketAddr, buffer: &mut [u8], reader: &mut R, carry: usize, message_sender: &mpsc::Sender<Self::Message>, ) -> io::Result<usize> { // perform a read from the stream, being careful not to overwrite any bytes carried over from the previous read match reader.read(&mut buffer[carry..]).await { Ok(0) => return Err(io::ErrorKind::UnexpectedEof.into()), Ok(n) => { trace!(parent: self.node().span(), "read {}B from {}", n, addr); let mut processed = 0; let mut left = carry + n; // several messages could have been read at once; process the contents of the buffer loop { // try to read a single message from the buffer match self.read_message(addr, &buffer[processed..processed + left]) { // a full message was read successfully Ok(Some((msg, len))) => { // advance the counters processed += len; left -= len; trace!( parent: self.node().span(), "isolated {}B as a message from {}; {}B left to process", len, addr, left ); self.node() .known_peers() .register_received_message(addr, len); self.node().stats().register_received_message(len); // send the message for further processing if message_sender.send(msg).await.is_err() { error!(parent: self.node().span(), "the inbound message channel is closed"); return Err(io::ErrorKind::BrokenPipe.into()); } // if the read is exhausted, reset the carry and return if left == 0 { return Ok(0); } } // the message in the buffer is incomplete Ok(None) => { // forbid messages that are larger than the read buffer if left >= buffer.len() { error!(parent: self.node().span(), "a message from {} is too large", addr); return Err(io::ErrorKind::InvalidData.into()); } trace!( parent: self.node().span(), "a message from {} is incomplete; carrying {}B over", addr, left ); // move the leftover bytes to the beginning of the buffer; the next read will append bytes // starting from where the leftover ones end, allowing the message to be completed buffer.copy_within(processed..processed + left, 0); return Ok(left); } // an erroneous message (e.g. an unexpected zero-length payload) Err(_) => { error!(parent: self.node().span(), "a message from {} is invalid", addr); return Err(io::ErrorKind::InvalidData.into()); } } } } // a stream read error Err(e) => { error!(parent: self.node().span(), "can't read from {}: {}", addr, e); Err(e) } } } /// Reads a single message from the given buffer; `Ok(None)` indicates that the message is /// incomplete, i.e. further reads from the stream must be performed in order to produce the whole message. /// Alongside the message it returns the number of bytes the read message occupied in the buffer. An `Err` /// returned here will result in the associated connection being dropped. fn read_message(
buffer: &[u8], ) -> io::Result<Option<(Self::Message, usize)>>; /// Processes an inbound message. Can be used to update state, send replies etc. #[allow(unused_variables)] async fn process_message(&self, source: SocketAddr, message: Self::Message) -> io::Result<()> { // don't do anything by default Ok(()) } }
&self, source: SocketAddr,
random_line_split
reading.rs
use crate::{protocols::ReturnableConnection, Pea2Pea}; use async_trait::async_trait; use tokio::{ io::{AsyncRead, AsyncReadExt}, sync::mpsc, time::sleep, }; use tracing::*; use std::{io, net::SocketAddr, time::Duration}; /// Can be used to specify and enable reading, i.e. receiving inbound messages. /// If handshaking is enabled too, it goes into force only after the handshake has been concluded. #[async_trait] pub trait Reading: Pea2Pea where Self: Clone + Send + Sync +'static, { /// The final (deserialized) type of inbound messages. type Message: Send; /// Prepares the node to receive messages; failures to read from a connection's stream are penalized by a timeout /// defined in `NodeConfig`, while broken/unreadable messages result in an immediate disconnect (in order to avoid /// accidentally reading "borked" messages). fn enable_reading(&self) { let (conn_sender, mut conn_receiver) = mpsc::channel::<ReturnableConnection>( self.node().config().protocol_handler_queue_depth, ); // the main task spawning per-connection tasks reading messages from their streams let self_clone = self.clone(); let reading_task = tokio::spawn(async move { trace!(parent: self_clone.node().span(), "spawned the Reading handler task"); loop { // these objects are sent from `Node::adapt_stream` if let Some((mut conn, conn_returner)) = conn_receiver.recv().await { let addr = conn.addr; let mut reader = conn.reader.take().unwrap(); // safe; it is available at this point let mut buffer = vec![0; self_clone.node().config().conn_read_buffer_size] .into_boxed_slice(); let (inbound_message_sender, mut inbound_message_receiver) = mpsc::channel(self_clone.node().config().conn_inbound_queue_depth); // the task for processing parsed messages let processing_clone = self_clone.clone(); let inbound_processing_task = tokio::spawn(async move { let node = processing_clone.node(); trace!(parent: node.span(), "spawned a task for processing messages from {}", addr); loop { if let Some(msg) = inbound_message_receiver.recv().await
else { node.disconnect(addr); break; } } }); conn.tasks.push(inbound_processing_task); // the task for reading messages from a stream let reader_clone = self_clone.clone(); let reader_task = tokio::spawn(async move { let node = reader_clone.node(); trace!(parent: node.span(), "spawned a task for reading messages from {}", addr); // postpone reads until the connection is fully established; if the process fails, // this task gets aborted, so there is no need for a dedicated timeout while!node.connected_addrs().contains(&addr) { sleep(Duration::from_millis(5)).await; } let mut carry = 0; loop { match reader_clone .read_from_stream( addr, &mut buffer, &mut reader, carry, &inbound_message_sender, ) .await { Ok(leftover) => { carry = leftover; } Err(e) => { node.known_peers().register_failure(addr); if node.config().fatal_io_errors.contains(&e.kind()) { node.disconnect(addr); break; } else { sleep(Duration::from_secs( node.config().invalid_read_delay_secs, )) .await; } } } } }); conn.tasks.push(reader_task); // return the Connection to the Node, resuming Node::adapt_stream if conn_returner.send(Ok(conn)).is_err() { unreachable!("could't return a Connection to the Node"); } } else { error!("the Reading protocol is down!"); break; } } }); self.node().tasks.lock().push(reading_task); // register the ReadingHandler with the Node self.node().set_reading_handler(conn_sender.into()); } /// Performs a read from the given reader. The default implementation is buffered; it sacrifices a bit of /// simplicity for better performance. Read messages are sent to a message processing task in order to enable /// faster reads. Returns the number of pending bytes left in the buffer in case of an incomplete read; they /// should be provided to the medthod on the next call as `carry`. async fn read_from_stream<R: AsyncRead + Unpin + Send>( &self, addr: SocketAddr, buffer: &mut [u8], reader: &mut R, carry: usize, message_sender: &mpsc::Sender<Self::Message>, ) -> io::Result<usize> { // perform a read from the stream, being careful not to overwrite any bytes carried over from the previous read match reader.read(&mut buffer[carry..]).await { Ok(0) => return Err(io::ErrorKind::UnexpectedEof.into()), Ok(n) => { trace!(parent: self.node().span(), "read {}B from {}", n, addr); let mut processed = 0; let mut left = carry + n; // several messages could have been read at once; process the contents of the buffer loop { // try to read a single message from the buffer match self.read_message(addr, &buffer[processed..processed + left]) { // a full message was read successfully Ok(Some((msg, len))) => { // advance the counters processed += len; left -= len; trace!( parent: self.node().span(), "isolated {}B as a message from {}; {}B left to process", len, addr, left ); self.node() .known_peers() .register_received_message(addr, len); self.node().stats().register_received_message(len); // send the message for further processing if message_sender.send(msg).await.is_err() { error!(parent: self.node().span(), "the inbound message channel is closed"); return Err(io::ErrorKind::BrokenPipe.into()); } // if the read is exhausted, reset the carry and return if left == 0 { return Ok(0); } } // the message in the buffer is incomplete Ok(None) => { // forbid messages that are larger than the read buffer if left >= buffer.len() { error!(parent: self.node().span(), "a message from {} is too large", addr); return Err(io::ErrorKind::InvalidData.into()); } trace!( parent: self.node().span(), "a message from {} is incomplete; carrying {}B over", addr, left ); // move the leftover bytes to the beginning of the buffer; the next read will append bytes // starting from where the leftover ones end, allowing the message to be completed buffer.copy_within(processed..processed + left, 0); return Ok(left); } // an erroneous message (e.g. an unexpected zero-length payload) Err(_) => { error!(parent: self.node().span(), "a message from {} is invalid", addr); return Err(io::ErrorKind::InvalidData.into()); } } } } // a stream read error Err(e) => { error!(parent: self.node().span(), "can't read from {}: {}", addr, e); Err(e) } } } /// Reads a single message from the given buffer; `Ok(None)` indicates that the message is /// incomplete, i.e. further reads from the stream must be performed in order to produce the whole message. /// Alongside the message it returns the number of bytes the read message occupied in the buffer. An `Err` /// returned here will result in the associated connection being dropped. fn read_message( &self, source: SocketAddr, buffer: &[u8], ) -> io::Result<Option<(Self::Message, usize)>>; /// Processes an inbound message. Can be used to update state, send replies etc. #[allow(unused_variables)] async fn process_message(&self, source: SocketAddr, message: Self::Message) -> io::Result<()> { // don't do anything by default Ok(()) } }
{ if let Err(e) = processing_clone.process_message(addr, msg).await { error!(parent: node.span(), "can't process an inbound message: {}", e); node.known_peers().register_failure(addr); } }
conditional_block
lib.rs
// Copyright 2018 Torsten Weber // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! # LHEF //! //! The `lhef` library is a [`rust`] library to read and write files in //! the [`LesHouchesEvents`] format. //! It can be used to just read the common blocks specified by the //! standard, but is flexible enough to also handle the additional //! information that is allowed by the standard. //! This can be done either by reading them as `String`s or by parsing //! them into custom data structures. //! Reading common blocks has been tested for event files generated by //! [`MG5_aMC@NLO`] and [`HELAC_NLO`]. //! Specialized data structures for the reweighting information written //! by `HELAC_NLO` are included. //! //! ## Usage examples //! //! ### Reading a file and ignoring all extra information: //! //! ```rust,ignore //! use lhef::ReadLhe; //! use lhef::plain::LheFile; //! //! let lhe = LheFile::read_lhe_from_file(&"events.lhe").unwrap(); //! //! // Energy of beam 1 //! let beam_1_energy = lhe.init.beam_1_energy; //! //! // pz of the 4rd particle in the 7th event //! let pz = lhe.events[6].particles[3].momentum.pz; //! ``` //! //! ### Reading a file generated including extra information as strings: //! //! Specialized data structures for e.g. Madgraph do not exist, but the //! additional information stored in the event files written by it can //! still be extracted as strings: //! //! ```rust,ignore //! use lhef::ReadLhe; //! use lhef::string::{LheFile, EventExtra}; //! //! let lhe = LheFile::read_lhe_from_file(&"events.lhe").unwrap(); //! //! // extra information of the 5th event //! let EventExtra(ref extra) = lhe.events[4].extra; //! ``` //! //! ### Reading a file generated by `HELAC-NLO` //! //! This library comes with a module containing special data structures //! for the additional information contained in event files generated by //! `HELAC-NLO`. //! Therefore event files generated by `HELAC` can be read directly into //! the appropriate structures: //! //! ```rust,ignore //! use lhef::ReadLhe; //! use lhef::helac::LheFileRS; //! //! let lhe = LheFileRS::read_lhe_from_file(&"events.lhe").unwrap(); //! //! // x1 of the 5th event //! let extra = lhe.events[4].extra.pdf.x1; //! ``` //! //! ## Supported file types //! //! This library comes with three specialization modules to handle extra //! information contained in event files: //! //! ### plain //! //! The [`plain`] module allows to read `lhe` files without taking any //! extra information into account. //! The [`plain::LheFile`] struct contains only the information that is //! guaranteed to be present in all `lhe` files. //! The `extra` fields on the file, init and event objects are still //! present, but only return dummy objects that do not contain any //! information. //! The `comment` and the `header` are also dummy objects. //! //! //! ### string //! //! The [`string`] module allows to read `lhe` files and keeping all the //! extra information in the files as unparsed strings. //! The `comment` and the `header` are kept as strings, without the //! start and end tags. //! All extra information has leading and trailing whitespace removed. //! Whitespace (including linebreaks) within the strings is conserved. //! //! //! ### helac //! //! The [`helac`] module contains specialized structs that the extra //! information contained in `lhe` files generated by `HELAC-NLO` is //! parsed into. //! The comment is kept as a string, and since `HELAC` `lhe` files do //! not contain a header, the header is a dummy object. //! //! //! ### Adding support for new file types //! //! To add new file types, you need to add types that implement the //! `ReadLhe` and `WriteLhe` traits for the additional information //! stored in the file type. //! The type signature of the `read_from_lhe` function of the `ReadLhe` //! trait means that you should use [`nom`] to parse your type. //! Your implementations need to parse the opening and end tags for //! comments (`<!--` and `-->`) and the header (`<header>` and //! `</header>`) respectively, but must leave the tags for the init //! section and for events alone. //! With these implementations you can then use `LheFileGeneric` with //! your types to read and write `lhe` files. //! //! //! [`rust`]: https://www.rust-lang.org //! [`LesHouchesEvents`]: https://arxiv.org/abs/hep-ph/0609017 //! [`MG5_aMC@NLO`]: https://launchpad.net/mg5amcnlo //! [`HELAC_NLO`]: http://helac-phegas.web.cern.ch/helac-phegas/ //! [`nom`]: https://github.com/Geal/nom //! [`plain`]: plain/index.html //! [`string`]: string/index.html //! [`helac`]: helac/index.html extern crate lorentz_vector; #[macro_use] extern crate nom; #[cfg(test)] #[macro_use] extern crate quickcheck; #[cfg(test)] #[macro_use] extern crate serde; #[cfg(test)] #[cfg(test)] extern crate serde_json; #[macro_use] pub mod nom_util; pub mod generic; pub mod helac; pub mod plain; pub mod string; use lorentz_vector::LorentzVector; use std::error; use std::fmt; use std::fs; use std::io; use std::io::Read; use std::marker; use std::path::Path; #[cfg(test)] use quickcheck::Arbitrary; #[cfg(test)] use quickcheck::Gen; use nom_util::{parse_f64, parse_i64}; /// A type to use for pdg ids /// /// See the [Particle Data Group] website for more information. /// A list of all particle numbers can be found [here]. /// /// [Particle Data Group]: http://pdg.lbl.gov/ /// [here]: http://pdg.lbl.gov/2017/reviews/rpp2017-rev-monte-carlo-numbering.pdf pub type PdgId = i64; /// A trait to read (parts of) lhe files /// /// This trait needs to be implemented for a type to be able to use it /// in [`LheFileGeneric`] to hold extra information. /// /// [`LheFileGeneric`]: generic/struct.LheFileGeneric.html pub trait ReadLhe where Self: marker::Sized, { /// Read an lhe object from a byte string /// /// The input to this function is the remaining input in the file /// (or just a chunk of it) and if successful, it should return the /// parsed object and the input left after parsing the object. /// See the [`nom documentation`] for more information. /// /// [`nom documentation`]: http://rust.unhandledexpression.com/nom/ fn read_lhe(&[u8]) -> nom::IResult<&[u8], Self>; /// Read an lhe object from a file fn read_lhe_from_file<P: AsRef<Path>>(path: &P) -> Result<Self, ReadError> { let mut file = fs::File::open(path)?; let mut contents = Vec::new(); file.read_to_end(&mut contents)?; Self::read_lhe(&contents) .to_full_result() .map_err(ReadError::Nom) } } /// A trait to write (parts of) lhe files /// /// This trait needs to be implemented for a type to be able to use it /// in [`LheFileGeneric`] to hold extra information. /// /// [`LheFileGeneric`]: generic/struct.LheFileGeneric.html pub trait WriteLhe { /// Write the object to a writer fn write_lhe<W: io::Write>(&self, &mut W) -> io::Result<()>; /// Write the object to a file fn write_lhe_to_file<P: AsRef<Path>>(&self, path: P) -> io::Result<()> { let mut file = fs::File::create(path)?; self.write_lhe(&mut file) } } /// Errors that may occur when reading lhe objects from files #[derive(Debug)] pub enum ReadError { /// An io error occured Io(io::Error), /// A parse error occured Nom(nom::IError), } impl From<io::Error> for ReadError { fn from(err: io::Error) -> ReadError { ReadError::Io(err) } } impl fmt::Display for ReadError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ReadError::Io(ref err) => { write!(f, "Failed to read the lhe file with an IO error: {}", err) } ReadError::Nom(ref err) => write!( f, "Failed to read the lhe file with a parse error: {:?}", err ), } } } impl error::Error for ReadError { fn description(&self) -> &str
fn cause(&self) -> Option<&error::Error> { match *self { ReadError::Io(ref err) => Some(err), ReadError::Nom(_) => None, } } } /// A struct for process information /// /// This is the per process information contained in the `init` section /// of `lhe` files. /// When reading a file, the `Init` struct will contain `NPRUP` /// `ProcInfo` objects. /// `ProcInfo` is part of the compulsory initialization information. /// /// For more information on the fields, see the [`lhe`] paper and the /// documentation of the [`LHA common blocks`]. /// The names in parentheses are the names of the fields in these /// papers. /// /// # Examples /// /// ```rust /// use lhef::{ProcInfo, ReadLhe}; /// use lhef::plain::LheFile; /// /// let bytes = b"\ /// <LesHouchesEvents version=\"1.0\"> /// <init> /// 2212 2212 6500 6500 0 0 13100 13100 3 2 /// 2.1 3.2E-03 1.0E+00 1 /// 4.0 7.4E-03 1.0E+00 2 /// </init> /// </LesHouchesEvents>"; /// let lhe = LheFile::read_lhe(bytes).to_full_result().unwrap(); /// assert_eq!(lhe.init.process_info.len(), 2); /// assert_eq!(lhe.init.process_info[0].xsect, 2.1); /// assert_eq!(lhe.init.process_info[1].xsect_err, 0.0074); /// ``` /// /// [`lhe`]: https://arxiv.org/abs/hep-ph/0609017 /// [`LHA common blocks`]: https://arxiv.org/abs/hep-ph/0109068 #[derive(Clone, Debug, PartialEq)] #[cfg_attr(test, derive(Serialize, Deserialize))] pub struct ProcInfo { /// The cross section of the process (`XSECUP`) pub xsect: f64, /// The cross section error of the process (`XERRUP`) pub xsect_err: f64, /// The maximum weight of the events of the process (`XMAXUP`) pub maximum_weight: f64, /// The process id (`LPRUP`) pub process_id: i64, } impl ReadLhe for ProcInfo { fn read_lhe(input: &[u8]) -> nom::IResult<&[u8], ProcInfo> { do_parse!( input, xsect: ws!(parse_f64) >> xsect_err: ws!(parse_f64) >> maximum_weight: ws!(parse_f64) >> process_id: ws!(parse_i64) >> (ProcInfo { xsect, xsect_err, maximum_weight, process_id, }) ) } } impl WriteLhe for ProcInfo { fn write_lhe<W: io::Write>(&self, writer: &mut W) -> io::Result<()> { writeln!( writer, "{:e} {:e} {:e} {}", self.xsect, self.xsect_err, self.maximum_weight, self.process_id ) } } #[cfg(test)] impl Arbitrary for ProcInfo { fn arbitrary<G: Gen>(gen: &mut G) -> ProcInfo { ProcInfo { xsect: Arbitrary::arbitrary(gen), xsect_err: Arbitrary::arbitrary(gen), maximum_weight: Arbitrary::arbitrary(gen), process_id: Arbitrary::arbitrary(gen), } } } /// A particle in lhe format /// /// An event will contain as many `Particle`s as there are particles in /// the event. /// `Particle` is part of the compulsory event information. /// /// For more information on the fields, see the [`lhe`] paper and the /// documentation of the [`LHA common blocks`]. /// The names in parentheses are the names of the fields in these /// papers. /// /// # Examples /// /// ```rust /// use lhef::{Particle, ReadLhe}; /// use lhef::plain::LheFile; /// /// let bytes = b"\ /// <LesHouchesEvents version=\"1.0\"> /// <init> /// 2212 2212 6500 6500 0 0 13100 13100 3 1 /// 2.1 3.2E-03 1.0E+00 1 /// </init> /// <event> /// 4 1 +1.04e-01 1.00e+03 7.54e-03 8.68e-02 /// -11 -1 0 0 0 0 +0.00e+00 +0.00e+00 +5.00e+02 5.00e+02 0.00e+00 0.00e+00 -1.00e+00 /// 11 -1 0 0 0 0 -0.00e+00 -0.00e+00 -5.00e+02 5.00e+02 0.00e+00 0.00e+00 1.00e+00 /// -13 1 1 2 0 0 -1.97e+02 -4.52e+02 -7.94e+01 5.00e+02 0.00e+00 0.00e+00 -1.00e+00 /// 13 1 1 2 0 0 +1.97e+02 +4.52e+02 +7.94e+01 5.00e+02 0.00e+00 0.00e+00 1.00e+00 /// </event> /// </LesHouchesEvents>"; /// /// let lhe = LheFile::read_lhe(bytes).to_full_result().unwrap(); /// let event = &lhe.events[0]; /// assert_eq!(event.particles.len(), 4); /// assert_eq!(event.particles[0].pdg_id, -11); /// assert_eq!(event.particles[3].momentum.py, 452.); /// ``` /// /// [`lhe`]: https://arxiv.org/abs/hep-ph/0609017 /// [`LHA common blocks`]: https://arxiv.org/abs/hep-ph/0109068 #[derive(Clone, Debug, PartialEq)] #[cfg_attr(test, derive(Serialize, Deserialize))] pub struct Particle { /// The pdg id of the particle (`IDUP`) pub pdg_id: PdgId, /// The status code of the particle (`ISTUP`) pub status: i64, /// The id of the first mother of the particle (`MOTHUP(1)`). /// This isn't a pdg id, but a (1 based) index into the particles vector. pub mother_1_id: i64, /// The id of the second mother of the particle (`MOTHUP(2)`). /// This isn't a pdg id, but a (1 based) index into the particles vector. pub mother_2_id: i64, /// The color of the particle (`ICOLUP(1)`) pub color_1: i64, /// The color of the particle (`ICOLUP(2)`) pub color_2: i64, /// The four momentum of the particle (`PUP` 1 - 4) pub momentum: LorentzVector, /// The mass of the particle (`PUP(5)`) pub mass: f64, /// The proper lifetime of the particle (`VTIMUP`) pub proper_lifetime: f64, /// The spin of the particle (`SPINUP`) pub spin: f64, } impl ReadLhe for Particle { fn read_lhe(input: &[u8]) -> nom::IResult<&[u8], Particle> { do_parse!( input, pdg_id: ws!(parse_i64) >> status: ws!(parse_i64) >> mother_1_id: ws!(parse_i64) >> mother_2_id: ws!(parse_i64) >> color_1: ws!(parse_i64) >> color_2: ws!(parse_i64) >> px: ws!(parse_f64) >> py: ws!(parse_f64) >> pz: ws!(parse_f64) >> e: ws!(parse_f64) >> mass: ws!(parse_f64) >> proper_lifetime: ws!(parse_f64) >> spin: ws!(parse_f64) >> (Particle { pdg_id, status, mother_1_id, mother_2_id, color_1, color_2, momentum: LorentzVector { e, px, py, pz }, mass, proper_lifetime, spin, }) ) } } impl WriteLhe for Particle { fn write_lhe<W: io::Write>(&self, writer: &mut W) -> io::Result<()> { writeln!( writer, "{} {} {} {} {} {} {:e} {:e} {:e} {:e} {:e} {:e} {:e}", self.pdg_id, self.status, self.mother_1_id, self.mother_2_id, self.color_1, self.color_2, self.momentum.px, self.momentum.py, self.momentum.pz, self.momentum.e, self.mass, self.proper_lifetime, self.spin ) } } #[cfg(test)] impl Arbitrary for Particle { fn arbitrary<G: Gen>(gen: &mut G) -> Particle { let momentum = LorentzVector { e: Arbitrary::arbitrary(gen), px: Arbitrary::arbitrary(gen), py: Arbitrary::arbitrary(gen), pz: Arbitrary::arbitrary(gen), }; Particle { pdg_id: Arbitrary::arbitrary(gen), status: Arbitrary::arbitrary(gen), mother_1_id: Arbitrary::arbitrary(gen), mother_2_id: Arbitrary::arbitrary(gen), color_1: Arbitrary::arbitrary(gen), color_2: Arbitrary::arbitrary(gen), momentum, mass: Arbitrary::arbitrary(gen), proper_lifetime: Arbitrary::arbitrary(gen), spin: Arbitrary::arbitrary(gen), } } } #[cfg(test)] mod tests { use lorentz_vector::LorentzVector; use super::{ReadLhe, WriteLhe}; use super::{Particle, ProcInfo}; #[test] fn read_procinfo() { let bytes = b"1. 2. 3. 4\n"; let expected = ProcInfo { xsect: 1., xsect_err: 2., maximum_weight: 3., process_id: 4, }; let result = ProcInfo::read_lhe(bytes).to_full_result().unwrap(); assert_eq!(result, expected); } #[test] fn read_particle() { let bytes = b"1 2 3 4 5 6 7. 8. 9. 10. 11. 12. 13.\n"; let expected = Particle { pdg_id: 1, status: 2, mother_1_id: 3, mother_2_id: 4, color_1: 5, color_2: 6, momentum: LorentzVector { px: 7., py: 8., pz: 9., e: 10., }, mass: 11., proper_lifetime: 12., spin: 13., }; let result = Particle::read_lhe(bytes).to_full_result().unwrap(); assert_eq!(result, expected); } quickcheck! { fn proc_info_roundtrip_qc(p: ProcInfo) -> bool { let mut bytes = Vec::new(); p.write_lhe(&mut bytes).unwrap(); let round = match ProcInfo::read_lhe(&bytes).to_full_result() { Ok(r) => r, Err(err) => panic!("Failed to read roundtrip: {:?}", err), }; p == round } } quickcheck! { fn particle_roundtrip_qc(m: Particle) -> bool { let mut bytes = Vec::new(); m.write_lhe(&mut bytes).unwrap(); let round = match Particle::read_lhe(&bytes).to_full_result() { Ok(r) => r, Err(err) => panic!("Failed to read roundtrip: {:?}", err), }; m == round } } }
{ match *self { ReadError::Io(..) => &"Failed to read the lhe file with an IO error", ReadError::Nom(..) => &"Failed to read the lhe file with a parse error", } }
identifier_body
lib.rs
// Copyright 2018 Torsten Weber // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! # LHEF //! //! The `lhef` library is a [`rust`] library to read and write files in //! the [`LesHouchesEvents`] format. //! It can be used to just read the common blocks specified by the //! standard, but is flexible enough to also handle the additional //! information that is allowed by the standard. //! This can be done either by reading them as `String`s or by parsing //! them into custom data structures. //! Reading common blocks has been tested for event files generated by //! [`MG5_aMC@NLO`] and [`HELAC_NLO`]. //! Specialized data structures for the reweighting information written //! by `HELAC_NLO` are included. //! //! ## Usage examples //! //! ### Reading a file and ignoring all extra information: //! //! ```rust,ignore //! use lhef::ReadLhe; //! use lhef::plain::LheFile; //! //! let lhe = LheFile::read_lhe_from_file(&"events.lhe").unwrap(); //! //! // Energy of beam 1 //! let beam_1_energy = lhe.init.beam_1_energy; //! //! // pz of the 4rd particle in the 7th event //! let pz = lhe.events[6].particles[3].momentum.pz; //! ``` //! //! ### Reading a file generated including extra information as strings: //! //! Specialized data structures for e.g. Madgraph do not exist, but the //! additional information stored in the event files written by it can //! still be extracted as strings: //! //! ```rust,ignore //! use lhef::ReadLhe; //! use lhef::string::{LheFile, EventExtra}; //! //! let lhe = LheFile::read_lhe_from_file(&"events.lhe").unwrap(); //! //! // extra information of the 5th event //! let EventExtra(ref extra) = lhe.events[4].extra; //! ``` //! //! ### Reading a file generated by `HELAC-NLO` //! //! This library comes with a module containing special data structures //! for the additional information contained in event files generated by //! `HELAC-NLO`. //! Therefore event files generated by `HELAC` can be read directly into //! the appropriate structures: //! //! ```rust,ignore //! use lhef::ReadLhe; //! use lhef::helac::LheFileRS; //! //! let lhe = LheFileRS::read_lhe_from_file(&"events.lhe").unwrap(); //! //! // x1 of the 5th event //! let extra = lhe.events[4].extra.pdf.x1; //! ``` //! //! ## Supported file types //! //! This library comes with three specialization modules to handle extra //! information contained in event files: //! //! ### plain //! //! The [`plain`] module allows to read `lhe` files without taking any //! extra information into account. //! The [`plain::LheFile`] struct contains only the information that is //! guaranteed to be present in all `lhe` files. //! The `extra` fields on the file, init and event objects are still //! present, but only return dummy objects that do not contain any //! information. //! The `comment` and the `header` are also dummy objects. //! //! //! ### string //! //! The [`string`] module allows to read `lhe` files and keeping all the //! extra information in the files as unparsed strings. //! The `comment` and the `header` are kept as strings, without the //! start and end tags. //! All extra information has leading and trailing whitespace removed. //! Whitespace (including linebreaks) within the strings is conserved. //! //! //! ### helac //! //! The [`helac`] module contains specialized structs that the extra //! information contained in `lhe` files generated by `HELAC-NLO` is //! parsed into. //! The comment is kept as a string, and since `HELAC` `lhe` files do //! not contain a header, the header is a dummy object. //! //! //! ### Adding support for new file types //! //! To add new file types, you need to add types that implement the //! `ReadLhe` and `WriteLhe` traits for the additional information //! stored in the file type. //! The type signature of the `read_from_lhe` function of the `ReadLhe` //! trait means that you should use [`nom`] to parse your type. //! Your implementations need to parse the opening and end tags for //! comments (`<!--` and `-->`) and the header (`<header>` and //! `</header>`) respectively, but must leave the tags for the init //! section and for events alone. //! With these implementations you can then use `LheFileGeneric` with //! your types to read and write `lhe` files. //! //! //! [`rust`]: https://www.rust-lang.org //! [`LesHouchesEvents`]: https://arxiv.org/abs/hep-ph/0609017 //! [`MG5_aMC@NLO`]: https://launchpad.net/mg5amcnlo //! [`HELAC_NLO`]: http://helac-phegas.web.cern.ch/helac-phegas/ //! [`nom`]: https://github.com/Geal/nom //! [`plain`]: plain/index.html //! [`string`]: string/index.html //! [`helac`]: helac/index.html extern crate lorentz_vector; #[macro_use] extern crate nom; #[cfg(test)] #[macro_use] extern crate quickcheck; #[cfg(test)] #[macro_use] extern crate serde; #[cfg(test)] #[cfg(test)] extern crate serde_json; #[macro_use] pub mod nom_util; pub mod generic; pub mod helac; pub mod plain; pub mod string; use lorentz_vector::LorentzVector; use std::error; use std::fmt; use std::fs; use std::io; use std::io::Read; use std::marker; use std::path::Path; #[cfg(test)] use quickcheck::Arbitrary; #[cfg(test)] use quickcheck::Gen; use nom_util::{parse_f64, parse_i64}; /// A type to use for pdg ids /// /// See the [Particle Data Group] website for more information. /// A list of all particle numbers can be found [here]. /// /// [Particle Data Group]: http://pdg.lbl.gov/ /// [here]: http://pdg.lbl.gov/2017/reviews/rpp2017-rev-monte-carlo-numbering.pdf pub type PdgId = i64; /// A trait to read (parts of) lhe files /// /// This trait needs to be implemented for a type to be able to use it /// in [`LheFileGeneric`] to hold extra information. /// /// [`LheFileGeneric`]: generic/struct.LheFileGeneric.html pub trait ReadLhe where Self: marker::Sized, { /// Read an lhe object from a byte string /// /// The input to this function is the remaining input in the file /// (or just a chunk of it) and if successful, it should return the /// parsed object and the input left after parsing the object. /// See the [`nom documentation`] for more information. /// /// [`nom documentation`]: http://rust.unhandledexpression.com/nom/ fn read_lhe(&[u8]) -> nom::IResult<&[u8], Self>; /// Read an lhe object from a file fn read_lhe_from_file<P: AsRef<Path>>(path: &P) -> Result<Self, ReadError> { let mut file = fs::File::open(path)?; let mut contents = Vec::new(); file.read_to_end(&mut contents)?; Self::read_lhe(&contents) .to_full_result() .map_err(ReadError::Nom) } } /// A trait to write (parts of) lhe files /// /// This trait needs to be implemented for a type to be able to use it /// in [`LheFileGeneric`] to hold extra information. /// /// [`LheFileGeneric`]: generic/struct.LheFileGeneric.html pub trait WriteLhe { /// Write the object to a writer fn write_lhe<W: io::Write>(&self, &mut W) -> io::Result<()>; /// Write the object to a file fn write_lhe_to_file<P: AsRef<Path>>(&self, path: P) -> io::Result<()> { let mut file = fs::File::create(path)?; self.write_lhe(&mut file) } } /// Errors that may occur when reading lhe objects from files #[derive(Debug)] pub enum ReadError { /// An io error occured Io(io::Error), /// A parse error occured Nom(nom::IError), } impl From<io::Error> for ReadError { fn from(err: io::Error) -> ReadError { ReadError::Io(err) } } impl fmt::Display for ReadError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ReadError::Io(ref err) => { write!(f, "Failed to read the lhe file with an IO error: {}", err) } ReadError::Nom(ref err) => write!( f, "Failed to read the lhe file with a parse error: {:?}", err ), } } } impl error::Error for ReadError { fn description(&self) -> &str { match *self { ReadError::Io(..) => &"Failed to read the lhe file with an IO error", ReadError::Nom(..) => &"Failed to read the lhe file with a parse error", } } fn cause(&self) -> Option<&error::Error> { match *self { ReadError::Io(ref err) => Some(err), ReadError::Nom(_) => None, } } } /// A struct for process information /// /// This is the per process information contained in the `init` section /// of `lhe` files. /// When reading a file, the `Init` struct will contain `NPRUP` /// `ProcInfo` objects. /// `ProcInfo` is part of the compulsory initialization information. /// /// For more information on the fields, see the [`lhe`] paper and the /// documentation of the [`LHA common blocks`]. /// The names in parentheses are the names of the fields in these /// papers. /// /// # Examples /// /// ```rust /// use lhef::{ProcInfo, ReadLhe}; /// use lhef::plain::LheFile; /// /// let bytes = b"\ /// <LesHouchesEvents version=\"1.0\"> /// <init> /// 2212 2212 6500 6500 0 0 13100 13100 3 2 /// 2.1 3.2E-03 1.0E+00 1 /// 4.0 7.4E-03 1.0E+00 2 /// </init> /// </LesHouchesEvents>"; /// let lhe = LheFile::read_lhe(bytes).to_full_result().unwrap(); /// assert_eq!(lhe.init.process_info.len(), 2); /// assert_eq!(lhe.init.process_info[0].xsect, 2.1); /// assert_eq!(lhe.init.process_info[1].xsect_err, 0.0074); /// ``` /// /// [`lhe`]: https://arxiv.org/abs/hep-ph/0609017 /// [`LHA common blocks`]: https://arxiv.org/abs/hep-ph/0109068 #[derive(Clone, Debug, PartialEq)] #[cfg_attr(test, derive(Serialize, Deserialize))] pub struct ProcInfo { /// The cross section of the process (`XSECUP`) pub xsect: f64, /// The cross section error of the process (`XERRUP`) pub xsect_err: f64, /// The maximum weight of the events of the process (`XMAXUP`) pub maximum_weight: f64, /// The process id (`LPRUP`) pub process_id: i64, } impl ReadLhe for ProcInfo { fn read_lhe(input: &[u8]) -> nom::IResult<&[u8], ProcInfo> { do_parse!( input, xsect: ws!(parse_f64) >> xsect_err: ws!(parse_f64) >> maximum_weight: ws!(parse_f64) >> process_id: ws!(parse_i64) >> (ProcInfo { xsect, xsect_err, maximum_weight, process_id, }) ) } } impl WriteLhe for ProcInfo { fn write_lhe<W: io::Write>(&self, writer: &mut W) -> io::Result<()> { writeln!( writer, "{:e} {:e} {:e} {}", self.xsect, self.xsect_err, self.maximum_weight, self.process_id ) } } #[cfg(test)] impl Arbitrary for ProcInfo { fn arbitrary<G: Gen>(gen: &mut G) -> ProcInfo { ProcInfo { xsect: Arbitrary::arbitrary(gen), xsect_err: Arbitrary::arbitrary(gen), maximum_weight: Arbitrary::arbitrary(gen), process_id: Arbitrary::arbitrary(gen), } } } /// A particle in lhe format /// /// An event will contain as many `Particle`s as there are particles in /// the event. /// `Particle` is part of the compulsory event information. /// /// For more information on the fields, see the [`lhe`] paper and the /// documentation of the [`LHA common blocks`]. /// The names in parentheses are the names of the fields in these /// papers. /// /// # Examples /// /// ```rust /// use lhef::{Particle, ReadLhe}; /// use lhef::plain::LheFile; /// /// let bytes = b"\ /// <LesHouchesEvents version=\"1.0\"> /// <init> /// 2212 2212 6500 6500 0 0 13100 13100 3 1 /// 2.1 3.2E-03 1.0E+00 1 /// </init> /// <event> /// 4 1 +1.04e-01 1.00e+03 7.54e-03 8.68e-02 /// -11 -1 0 0 0 0 +0.00e+00 +0.00e+00 +5.00e+02 5.00e+02 0.00e+00 0.00e+00 -1.00e+00 /// 11 -1 0 0 0 0 -0.00e+00 -0.00e+00 -5.00e+02 5.00e+02 0.00e+00 0.00e+00 1.00e+00 /// -13 1 1 2 0 0 -1.97e+02 -4.52e+02 -7.94e+01 5.00e+02 0.00e+00 0.00e+00 -1.00e+00 /// 13 1 1 2 0 0 +1.97e+02 +4.52e+02 +7.94e+01 5.00e+02 0.00e+00 0.00e+00 1.00e+00 /// </event> /// </LesHouchesEvents>"; /// /// let lhe = LheFile::read_lhe(bytes).to_full_result().unwrap(); /// let event = &lhe.events[0]; /// assert_eq!(event.particles.len(), 4); /// assert_eq!(event.particles[0].pdg_id, -11); /// assert_eq!(event.particles[3].momentum.py, 452.); /// ``` /// /// [`lhe`]: https://arxiv.org/abs/hep-ph/0609017 /// [`LHA common blocks`]: https://arxiv.org/abs/hep-ph/0109068 #[derive(Clone, Debug, PartialEq)] #[cfg_attr(test, derive(Serialize, Deserialize))] pub struct Particle { /// The pdg id of the particle (`IDUP`) pub pdg_id: PdgId, /// The status code of the particle (`ISTUP`) pub status: i64, /// The id of the first mother of the particle (`MOTHUP(1)`). /// This isn't a pdg id, but a (1 based) index into the particles vector. pub mother_1_id: i64, /// The id of the second mother of the particle (`MOTHUP(2)`). /// This isn't a pdg id, but a (1 based) index into the particles vector. pub mother_2_id: i64, /// The color of the particle (`ICOLUP(1)`) pub color_1: i64, /// The color of the particle (`ICOLUP(2)`) pub color_2: i64, /// The four momentum of the particle (`PUP` 1 - 4) pub momentum: LorentzVector, /// The mass of the particle (`PUP(5)`) pub mass: f64, /// The proper lifetime of the particle (`VTIMUP`) pub proper_lifetime: f64, /// The spin of the particle (`SPINUP`) pub spin: f64, } impl ReadLhe for Particle { fn read_lhe(input: &[u8]) -> nom::IResult<&[u8], Particle> { do_parse!( input, pdg_id: ws!(parse_i64) >> status: ws!(parse_i64) >> mother_1_id: ws!(parse_i64) >> mother_2_id: ws!(parse_i64) >> color_1: ws!(parse_i64) >> color_2: ws!(parse_i64) >> px: ws!(parse_f64) >> py: ws!(parse_f64) >> pz: ws!(parse_f64) >> e: ws!(parse_f64) >> mass: ws!(parse_f64) >> proper_lifetime: ws!(parse_f64) >> spin: ws!(parse_f64) >> (Particle { pdg_id, status, mother_1_id, mother_2_id, color_1, color_2, momentum: LorentzVector { e, px, py, pz }, mass, proper_lifetime, spin, }) ) } } impl WriteLhe for Particle { fn write_lhe<W: io::Write>(&self, writer: &mut W) -> io::Result<()> { writeln!( writer, "{} {} {} {} {} {} {:e} {:e} {:e} {:e} {:e} {:e} {:e}", self.pdg_id, self.status, self.mother_1_id,
self.color_1, self.color_2, self.momentum.px, self.momentum.py, self.momentum.pz, self.momentum.e, self.mass, self.proper_lifetime, self.spin ) } } #[cfg(test)] impl Arbitrary for Particle { fn arbitrary<G: Gen>(gen: &mut G) -> Particle { let momentum = LorentzVector { e: Arbitrary::arbitrary(gen), px: Arbitrary::arbitrary(gen), py: Arbitrary::arbitrary(gen), pz: Arbitrary::arbitrary(gen), }; Particle { pdg_id: Arbitrary::arbitrary(gen), status: Arbitrary::arbitrary(gen), mother_1_id: Arbitrary::arbitrary(gen), mother_2_id: Arbitrary::arbitrary(gen), color_1: Arbitrary::arbitrary(gen), color_2: Arbitrary::arbitrary(gen), momentum, mass: Arbitrary::arbitrary(gen), proper_lifetime: Arbitrary::arbitrary(gen), spin: Arbitrary::arbitrary(gen), } } } #[cfg(test)] mod tests { use lorentz_vector::LorentzVector; use super::{ReadLhe, WriteLhe}; use super::{Particle, ProcInfo}; #[test] fn read_procinfo() { let bytes = b"1. 2. 3. 4\n"; let expected = ProcInfo { xsect: 1., xsect_err: 2., maximum_weight: 3., process_id: 4, }; let result = ProcInfo::read_lhe(bytes).to_full_result().unwrap(); assert_eq!(result, expected); } #[test] fn read_particle() { let bytes = b"1 2 3 4 5 6 7. 8. 9. 10. 11. 12. 13.\n"; let expected = Particle { pdg_id: 1, status: 2, mother_1_id: 3, mother_2_id: 4, color_1: 5, color_2: 6, momentum: LorentzVector { px: 7., py: 8., pz: 9., e: 10., }, mass: 11., proper_lifetime: 12., spin: 13., }; let result = Particle::read_lhe(bytes).to_full_result().unwrap(); assert_eq!(result, expected); } quickcheck! { fn proc_info_roundtrip_qc(p: ProcInfo) -> bool { let mut bytes = Vec::new(); p.write_lhe(&mut bytes).unwrap(); let round = match ProcInfo::read_lhe(&bytes).to_full_result() { Ok(r) => r, Err(err) => panic!("Failed to read roundtrip: {:?}", err), }; p == round } } quickcheck! { fn particle_roundtrip_qc(m: Particle) -> bool { let mut bytes = Vec::new(); m.write_lhe(&mut bytes).unwrap(); let round = match Particle::read_lhe(&bytes).to_full_result() { Ok(r) => r, Err(err) => panic!("Failed to read roundtrip: {:?}", err), }; m == round } } }
self.mother_2_id,
random_line_split
lib.rs
// Copyright 2018 Torsten Weber // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! # LHEF //! //! The `lhef` library is a [`rust`] library to read and write files in //! the [`LesHouchesEvents`] format. //! It can be used to just read the common blocks specified by the //! standard, but is flexible enough to also handle the additional //! information that is allowed by the standard. //! This can be done either by reading them as `String`s or by parsing //! them into custom data structures. //! Reading common blocks has been tested for event files generated by //! [`MG5_aMC@NLO`] and [`HELAC_NLO`]. //! Specialized data structures for the reweighting information written //! by `HELAC_NLO` are included. //! //! ## Usage examples //! //! ### Reading a file and ignoring all extra information: //! //! ```rust,ignore //! use lhef::ReadLhe; //! use lhef::plain::LheFile; //! //! let lhe = LheFile::read_lhe_from_file(&"events.lhe").unwrap(); //! //! // Energy of beam 1 //! let beam_1_energy = lhe.init.beam_1_energy; //! //! // pz of the 4rd particle in the 7th event //! let pz = lhe.events[6].particles[3].momentum.pz; //! ``` //! //! ### Reading a file generated including extra information as strings: //! //! Specialized data structures for e.g. Madgraph do not exist, but the //! additional information stored in the event files written by it can //! still be extracted as strings: //! //! ```rust,ignore //! use lhef::ReadLhe; //! use lhef::string::{LheFile, EventExtra}; //! //! let lhe = LheFile::read_lhe_from_file(&"events.lhe").unwrap(); //! //! // extra information of the 5th event //! let EventExtra(ref extra) = lhe.events[4].extra; //! ``` //! //! ### Reading a file generated by `HELAC-NLO` //! //! This library comes with a module containing special data structures //! for the additional information contained in event files generated by //! `HELAC-NLO`. //! Therefore event files generated by `HELAC` can be read directly into //! the appropriate structures: //! //! ```rust,ignore //! use lhef::ReadLhe; //! use lhef::helac::LheFileRS; //! //! let lhe = LheFileRS::read_lhe_from_file(&"events.lhe").unwrap(); //! //! // x1 of the 5th event //! let extra = lhe.events[4].extra.pdf.x1; //! ``` //! //! ## Supported file types //! //! This library comes with three specialization modules to handle extra //! information contained in event files: //! //! ### plain //! //! The [`plain`] module allows to read `lhe` files without taking any //! extra information into account. //! The [`plain::LheFile`] struct contains only the information that is //! guaranteed to be present in all `lhe` files. //! The `extra` fields on the file, init and event objects are still //! present, but only return dummy objects that do not contain any //! information. //! The `comment` and the `header` are also dummy objects. //! //! //! ### string //! //! The [`string`] module allows to read `lhe` files and keeping all the //! extra information in the files as unparsed strings. //! The `comment` and the `header` are kept as strings, without the //! start and end tags. //! All extra information has leading and trailing whitespace removed. //! Whitespace (including linebreaks) within the strings is conserved. //! //! //! ### helac //! //! The [`helac`] module contains specialized structs that the extra //! information contained in `lhe` files generated by `HELAC-NLO` is //! parsed into. //! The comment is kept as a string, and since `HELAC` `lhe` files do //! not contain a header, the header is a dummy object. //! //! //! ### Adding support for new file types //! //! To add new file types, you need to add types that implement the //! `ReadLhe` and `WriteLhe` traits for the additional information //! stored in the file type. //! The type signature of the `read_from_lhe` function of the `ReadLhe` //! trait means that you should use [`nom`] to parse your type. //! Your implementations need to parse the opening and end tags for //! comments (`<!--` and `-->`) and the header (`<header>` and //! `</header>`) respectively, but must leave the tags for the init //! section and for events alone. //! With these implementations you can then use `LheFileGeneric` with //! your types to read and write `lhe` files. //! //! //! [`rust`]: https://www.rust-lang.org //! [`LesHouchesEvents`]: https://arxiv.org/abs/hep-ph/0609017 //! [`MG5_aMC@NLO`]: https://launchpad.net/mg5amcnlo //! [`HELAC_NLO`]: http://helac-phegas.web.cern.ch/helac-phegas/ //! [`nom`]: https://github.com/Geal/nom //! [`plain`]: plain/index.html //! [`string`]: string/index.html //! [`helac`]: helac/index.html extern crate lorentz_vector; #[macro_use] extern crate nom; #[cfg(test)] #[macro_use] extern crate quickcheck; #[cfg(test)] #[macro_use] extern crate serde; #[cfg(test)] #[cfg(test)] extern crate serde_json; #[macro_use] pub mod nom_util; pub mod generic; pub mod helac; pub mod plain; pub mod string; use lorentz_vector::LorentzVector; use std::error; use std::fmt; use std::fs; use std::io; use std::io::Read; use std::marker; use std::path::Path; #[cfg(test)] use quickcheck::Arbitrary; #[cfg(test)] use quickcheck::Gen; use nom_util::{parse_f64, parse_i64}; /// A type to use for pdg ids /// /// See the [Particle Data Group] website for more information. /// A list of all particle numbers can be found [here]. /// /// [Particle Data Group]: http://pdg.lbl.gov/ /// [here]: http://pdg.lbl.gov/2017/reviews/rpp2017-rev-monte-carlo-numbering.pdf pub type PdgId = i64; /// A trait to read (parts of) lhe files /// /// This trait needs to be implemented for a type to be able to use it /// in [`LheFileGeneric`] to hold extra information. /// /// [`LheFileGeneric`]: generic/struct.LheFileGeneric.html pub trait ReadLhe where Self: marker::Sized, { /// Read an lhe object from a byte string /// /// The input to this function is the remaining input in the file /// (or just a chunk of it) and if successful, it should return the /// parsed object and the input left after parsing the object. /// See the [`nom documentation`] for more information. /// /// [`nom documentation`]: http://rust.unhandledexpression.com/nom/ fn read_lhe(&[u8]) -> nom::IResult<&[u8], Self>; /// Read an lhe object from a file fn read_lhe_from_file<P: AsRef<Path>>(path: &P) -> Result<Self, ReadError> { let mut file = fs::File::open(path)?; let mut contents = Vec::new(); file.read_to_end(&mut contents)?; Self::read_lhe(&contents) .to_full_result() .map_err(ReadError::Nom) } } /// A trait to write (parts of) lhe files /// /// This trait needs to be implemented for a type to be able to use it /// in [`LheFileGeneric`] to hold extra information. /// /// [`LheFileGeneric`]: generic/struct.LheFileGeneric.html pub trait WriteLhe { /// Write the object to a writer fn write_lhe<W: io::Write>(&self, &mut W) -> io::Result<()>; /// Write the object to a file fn write_lhe_to_file<P: AsRef<Path>>(&self, path: P) -> io::Result<()> { let mut file = fs::File::create(path)?; self.write_lhe(&mut file) } } /// Errors that may occur when reading lhe objects from files #[derive(Debug)] pub enum ReadError { /// An io error occured Io(io::Error), /// A parse error occured Nom(nom::IError), } impl From<io::Error> for ReadError { fn from(err: io::Error) -> ReadError { ReadError::Io(err) } } impl fmt::Display for ReadError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ReadError::Io(ref err) => { write!(f, "Failed to read the lhe file with an IO error: {}", err) } ReadError::Nom(ref err) => write!( f, "Failed to read the lhe file with a parse error: {:?}", err ), } } } impl error::Error for ReadError { fn description(&self) -> &str { match *self { ReadError::Io(..) => &"Failed to read the lhe file with an IO error", ReadError::Nom(..) => &"Failed to read the lhe file with a parse error", } } fn cause(&self) -> Option<&error::Error> { match *self { ReadError::Io(ref err) => Some(err), ReadError::Nom(_) => None, } } } /// A struct for process information /// /// This is the per process information contained in the `init` section /// of `lhe` files. /// When reading a file, the `Init` struct will contain `NPRUP` /// `ProcInfo` objects. /// `ProcInfo` is part of the compulsory initialization information. /// /// For more information on the fields, see the [`lhe`] paper and the /// documentation of the [`LHA common blocks`]. /// The names in parentheses are the names of the fields in these /// papers. /// /// # Examples /// /// ```rust /// use lhef::{ProcInfo, ReadLhe}; /// use lhef::plain::LheFile; /// /// let bytes = b"\ /// <LesHouchesEvents version=\"1.0\"> /// <init> /// 2212 2212 6500 6500 0 0 13100 13100 3 2 /// 2.1 3.2E-03 1.0E+00 1 /// 4.0 7.4E-03 1.0E+00 2 /// </init> /// </LesHouchesEvents>"; /// let lhe = LheFile::read_lhe(bytes).to_full_result().unwrap(); /// assert_eq!(lhe.init.process_info.len(), 2); /// assert_eq!(lhe.init.process_info[0].xsect, 2.1); /// assert_eq!(lhe.init.process_info[1].xsect_err, 0.0074); /// ``` /// /// [`lhe`]: https://arxiv.org/abs/hep-ph/0609017 /// [`LHA common blocks`]: https://arxiv.org/abs/hep-ph/0109068 #[derive(Clone, Debug, PartialEq)] #[cfg_attr(test, derive(Serialize, Deserialize))] pub struct ProcInfo { /// The cross section of the process (`XSECUP`) pub xsect: f64, /// The cross section error of the process (`XERRUP`) pub xsect_err: f64, /// The maximum weight of the events of the process (`XMAXUP`) pub maximum_weight: f64, /// The process id (`LPRUP`) pub process_id: i64, } impl ReadLhe for ProcInfo { fn read_lhe(input: &[u8]) -> nom::IResult<&[u8], ProcInfo> { do_parse!( input, xsect: ws!(parse_f64) >> xsect_err: ws!(parse_f64) >> maximum_weight: ws!(parse_f64) >> process_id: ws!(parse_i64) >> (ProcInfo { xsect, xsect_err, maximum_weight, process_id, }) ) } } impl WriteLhe for ProcInfo { fn write_lhe<W: io::Write>(&self, writer: &mut W) -> io::Result<()> { writeln!( writer, "{:e} {:e} {:e} {}", self.xsect, self.xsect_err, self.maximum_weight, self.process_id ) } } #[cfg(test)] impl Arbitrary for ProcInfo { fn arbitrary<G: Gen>(gen: &mut G) -> ProcInfo { ProcInfo { xsect: Arbitrary::arbitrary(gen), xsect_err: Arbitrary::arbitrary(gen), maximum_weight: Arbitrary::arbitrary(gen), process_id: Arbitrary::arbitrary(gen), } } } /// A particle in lhe format /// /// An event will contain as many `Particle`s as there are particles in /// the event. /// `Particle` is part of the compulsory event information. /// /// For more information on the fields, see the [`lhe`] paper and the /// documentation of the [`LHA common blocks`]. /// The names in parentheses are the names of the fields in these /// papers. /// /// # Examples /// /// ```rust /// use lhef::{Particle, ReadLhe}; /// use lhef::plain::LheFile; /// /// let bytes = b"\ /// <LesHouchesEvents version=\"1.0\"> /// <init> /// 2212 2212 6500 6500 0 0 13100 13100 3 1 /// 2.1 3.2E-03 1.0E+00 1 /// </init> /// <event> /// 4 1 +1.04e-01 1.00e+03 7.54e-03 8.68e-02 /// -11 -1 0 0 0 0 +0.00e+00 +0.00e+00 +5.00e+02 5.00e+02 0.00e+00 0.00e+00 -1.00e+00 /// 11 -1 0 0 0 0 -0.00e+00 -0.00e+00 -5.00e+02 5.00e+02 0.00e+00 0.00e+00 1.00e+00 /// -13 1 1 2 0 0 -1.97e+02 -4.52e+02 -7.94e+01 5.00e+02 0.00e+00 0.00e+00 -1.00e+00 /// 13 1 1 2 0 0 +1.97e+02 +4.52e+02 +7.94e+01 5.00e+02 0.00e+00 0.00e+00 1.00e+00 /// </event> /// </LesHouchesEvents>"; /// /// let lhe = LheFile::read_lhe(bytes).to_full_result().unwrap(); /// let event = &lhe.events[0]; /// assert_eq!(event.particles.len(), 4); /// assert_eq!(event.particles[0].pdg_id, -11); /// assert_eq!(event.particles[3].momentum.py, 452.); /// ``` /// /// [`lhe`]: https://arxiv.org/abs/hep-ph/0609017 /// [`LHA common blocks`]: https://arxiv.org/abs/hep-ph/0109068 #[derive(Clone, Debug, PartialEq)] #[cfg_attr(test, derive(Serialize, Deserialize))] pub struct Particle { /// The pdg id of the particle (`IDUP`) pub pdg_id: PdgId, /// The status code of the particle (`ISTUP`) pub status: i64, /// The id of the first mother of the particle (`MOTHUP(1)`). /// This isn't a pdg id, but a (1 based) index into the particles vector. pub mother_1_id: i64, /// The id of the second mother of the particle (`MOTHUP(2)`). /// This isn't a pdg id, but a (1 based) index into the particles vector. pub mother_2_id: i64, /// The color of the particle (`ICOLUP(1)`) pub color_1: i64, /// The color of the particle (`ICOLUP(2)`) pub color_2: i64, /// The four momentum of the particle (`PUP` 1 - 4) pub momentum: LorentzVector, /// The mass of the particle (`PUP(5)`) pub mass: f64, /// The proper lifetime of the particle (`VTIMUP`) pub proper_lifetime: f64, /// The spin of the particle (`SPINUP`) pub spin: f64, } impl ReadLhe for Particle { fn
(input: &[u8]) -> nom::IResult<&[u8], Particle> { do_parse!( input, pdg_id: ws!(parse_i64) >> status: ws!(parse_i64) >> mother_1_id: ws!(parse_i64) >> mother_2_id: ws!(parse_i64) >> color_1: ws!(parse_i64) >> color_2: ws!(parse_i64) >> px: ws!(parse_f64) >> py: ws!(parse_f64) >> pz: ws!(parse_f64) >> e: ws!(parse_f64) >> mass: ws!(parse_f64) >> proper_lifetime: ws!(parse_f64) >> spin: ws!(parse_f64) >> (Particle { pdg_id, status, mother_1_id, mother_2_id, color_1, color_2, momentum: LorentzVector { e, px, py, pz }, mass, proper_lifetime, spin, }) ) } } impl WriteLhe for Particle { fn write_lhe<W: io::Write>(&self, writer: &mut W) -> io::Result<()> { writeln!( writer, "{} {} {} {} {} {} {:e} {:e} {:e} {:e} {:e} {:e} {:e}", self.pdg_id, self.status, self.mother_1_id, self.mother_2_id, self.color_1, self.color_2, self.momentum.px, self.momentum.py, self.momentum.pz, self.momentum.e, self.mass, self.proper_lifetime, self.spin ) } } #[cfg(test)] impl Arbitrary for Particle { fn arbitrary<G: Gen>(gen: &mut G) -> Particle { let momentum = LorentzVector { e: Arbitrary::arbitrary(gen), px: Arbitrary::arbitrary(gen), py: Arbitrary::arbitrary(gen), pz: Arbitrary::arbitrary(gen), }; Particle { pdg_id: Arbitrary::arbitrary(gen), status: Arbitrary::arbitrary(gen), mother_1_id: Arbitrary::arbitrary(gen), mother_2_id: Arbitrary::arbitrary(gen), color_1: Arbitrary::arbitrary(gen), color_2: Arbitrary::arbitrary(gen), momentum, mass: Arbitrary::arbitrary(gen), proper_lifetime: Arbitrary::arbitrary(gen), spin: Arbitrary::arbitrary(gen), } } } #[cfg(test)] mod tests { use lorentz_vector::LorentzVector; use super::{ReadLhe, WriteLhe}; use super::{Particle, ProcInfo}; #[test] fn read_procinfo() { let bytes = b"1. 2. 3. 4\n"; let expected = ProcInfo { xsect: 1., xsect_err: 2., maximum_weight: 3., process_id: 4, }; let result = ProcInfo::read_lhe(bytes).to_full_result().unwrap(); assert_eq!(result, expected); } #[test] fn read_particle() { let bytes = b"1 2 3 4 5 6 7. 8. 9. 10. 11. 12. 13.\n"; let expected = Particle { pdg_id: 1, status: 2, mother_1_id: 3, mother_2_id: 4, color_1: 5, color_2: 6, momentum: LorentzVector { px: 7., py: 8., pz: 9., e: 10., }, mass: 11., proper_lifetime: 12., spin: 13., }; let result = Particle::read_lhe(bytes).to_full_result().unwrap(); assert_eq!(result, expected); } quickcheck! { fn proc_info_roundtrip_qc(p: ProcInfo) -> bool { let mut bytes = Vec::new(); p.write_lhe(&mut bytes).unwrap(); let round = match ProcInfo::read_lhe(&bytes).to_full_result() { Ok(r) => r, Err(err) => panic!("Failed to read roundtrip: {:?}", err), }; p == round } } quickcheck! { fn particle_roundtrip_qc(m: Particle) -> bool { let mut bytes = Vec::new(); m.write_lhe(&mut bytes).unwrap(); let round = match Particle::read_lhe(&bytes).to_full_result() { Ok(r) => r, Err(err) => panic!("Failed to read roundtrip: {:?}", err), }; m == round } } }
read_lhe
identifier_name
lib.rs
use byteorder::{ByteOrder, LittleEndian}; use num_derive::FromPrimitive; use solana_program::{ account_info::next_account_info, account_info::AccountInfo, decode_error::DecodeError, entrypoint, entrypoint::ProgramResult, msg, program_error::ProgramError, program_pack::{Pack, Sealed}, pubkey::Pubkey, rent::Rent, sysvar::{self, Sysvar}, }; use thiserror::Error; #[derive(Clone, Debug, Eq, Error, FromPrimitive, PartialEq)] pub enum DiceErr { #[error("Unexpected Roll Mode")] UnexpectedRollMode, #[error("Incrrect Threshold")] IncorrectThreshold, #[error("Incorrect Owner")] IncorrectOwner, #[error("Account Not Rent Exempt")] AccountNotRentExempt, #[error("Account Not Balance Account")] AccountNotBalanceAccount, #[error("Not Enough Balance")] NotEnoughBalance, #[error("Invalid Bet")] InvalidBet, } impl From<DiceErr> for ProgramError { fn from(e: DiceErr) -> Self { ProgramError::Custom(e as u32) } } impl<T> DecodeError<T> for DiceErr { fn type_of() -> &'static str { "Dice Error" } } // Instruction data pub struct Dice { pub roll_type: u8, pub threshold: u8, pub bet_amount: u32, } impl Sealed for Dice {} impl Pack for Dice { const LEN: usize = 6; fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> { let roll_type = src[0]; //println!("Roll Type: {}", roll_type); if roll_type!= 1 && roll_type!= 2 { msg!("You should roll under (1) or Roll Over (2)"); return Err(DiceErr::UnexpectedRollMode.into()); } let threshold = src[1]; //println!("Threshold: {}", threshold); if threshold < 2 || threshold > 98 { msg!("Your guess has to in between 2 and 98"); return Err(DiceErr::IncorrectThreshold.into()); } let bet_amount = LittleEndian::read_u32(&src[2..6]); //println!("Bet: {}", bet_amount); Ok(Dice { roll_type, threshold, bet_amount}) } fn pack_into_slice(&self, _dst: &mut [u8]) {} } // Player's Balance structure, which is one 4 byte u32 number pub struct PlayerBalance { pub balance: u32, } impl Sealed for PlayerBalance {} impl Pack for PlayerBalance { const LEN: usize = 4; fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> { Ok(PlayerBalance { balance: LittleEndian::read_u32(&src[0..4]), }) } fn pack_into_slice(&self, dst: &mut [u8]) { LittleEndian::write_u32(&mut dst[0..4], self.balance); } } // Prize Pool structure, which is a 4 byte u32 number pub struct PrizePool { pub pool_amount: u32, } impl Sealed for PrizePool {} impl Pack for PrizePool { const LEN: usize = 4; fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> { Ok(PrizePool { pool_amount: LittleEndian::read_u32(&src[0..4]), }) } fn pack_into_slice(&self, dst: &mut [u8]) { LittleEndian::write_u32(&mut dst[0..4], self.pool_amount); } } // Declare and export the program's entrypoint entrypoint!(process_instruction); // Program entrypoint's implementation fn process_instruction( program_id: &Pubkey, // Public key of program account accounts: &[AccountInfo], // data accounts instruction_data: &[u8], // First Element: Roll type, Second Element: Threshold, [2..6] Bet Amount ) -> ProgramResult { msg!("Rust program entrypoint"); // get Dice information let roll_type = Dice::unpack_unchecked(&instruction_data)?.roll_type; msg!("Roll Type: {}", roll_type); let threshold = Dice::unpack_unchecked(&instruction_data)?.threshold; msg!("Threshold: {}", threshold); let bet_amount = Dice::unpack_unchecked(&instruction_data)?.bet_amount; msg!("Bet: {}", bet_amount); // Iterating accounts is safer then indexing let accounts_iter = &mut accounts.iter(); // Get the account that holds the Prize Pool let prize_pool_account = next_account_info(accounts_iter)?; // The account must be owned by the program in order to modify its data if prize_pool_account.owner!= program_id { msg!( "Prize Pool account ({}) not owned by program, actual: {}, expected: {}", prize_pool_account.key, prize_pool_account.owner, program_id ); return Err(DiceErr::IncorrectOwner.into()); } // Get the account that holds the balance for the players let player_balance_account = next_account_info(accounts_iter)?; // The check account must be owned by the program in order to modify its data if player_balance_account.owner!= program_id { msg!("Check account not owned by program"); return Err(DiceErr::IncorrectOwner.into()); } // The account must be rent exempt, i.e. live forever let sysvar_account = next_account_info(accounts_iter)?; let rent = &Rent::from_account_info(sysvar_account)?; if!sysvar::rent::check_id(sysvar_account.key) { msg!("Rent system account is not rent system account"); return Err(ProgramError::InvalidAccountData); } if!rent.is_exempt(player_balance_account.lamports(), player_balance_account.data_len()) { msg!("Balance account is not rent exempt"); return Err(DiceErr::AccountNotRentExempt.into()); } // the player let player_account = next_account_info(accounts_iter)?; if!player_account.is_signer { msg!("Player account is not signer"); return Err(ProgramError::MissingRequiredSignature); } let expected_check_account_pubkey = Pubkey::create_with_seed(player_account.key, "checkvote", program_id)?; if expected_check_account_pubkey!= *player_balance_account.key { msg!("Voter fraud! not the correct balance_account"); return Err(DiceErr::AccountNotBalanceAccount.into()); } let mut balance_data = player_balance_account.try_borrow_mut_data()?; // this unpack reads and deserialises the account data and also checks the data is the correct length let mut player_balance = PlayerBalance::unpack_unchecked(&balance_data).expect("Failed to read PlayerBalance"); // Handle the bet_amount and the balance /*if vote_check.voted_for!= 0 { msg!("Voter fraud! You already voted"); return Err(VoteError::AlreadyVoted.into()); }*/ let mut prize_pool_data = prize_pool_account.try_borrow_mut_data()?; let mut prize_pool = PrizePool::unpack_unchecked(&prize_pool_data).expect("Failed to read PrizePool"); /////////////////////// // Jut for Debug if player_balance.balance == 0 { msg!{"Airdrop some money!!!"}; player_balance.balance = 50; } if prize_pool.pool_amount == 0 { msg!{"Airdrop some money!!!"}; prize_pool.pool_amount = 1000; } // Check the valid of the bet amount if bet_amount > player_balance.balance
if bet_amount == 0 { msg!("Inavalid Bet"); return Err(DiceErr::InvalidBet.into()); } let lucky_number:u8 = 20; println!("Result {}", lucky_number); let mut win_amount:u32 = 0; if (1 == roll_type && lucky_number <= threshold) || (2 == roll_type && lucky_number >= threshold) { if lucky_number <= 25 || lucky_number >= 75 { win_amount = bet_amount as u32 * 2; msg!("Win: {}", win_amount); }else{ win_amount = bet_amount as u32; msg!("Win: {}", win_amount); } } if win_amount == 0 { prize_pool.pool_amount += bet_amount; player_balance.balance -= bet_amount; msg!("You Lose!"); }else{ prize_pool.pool_amount -= win_amount; player_balance.balance += win_amount; msg!("You Win!"); } PrizePool::pack(prize_pool, &mut prize_pool_data).expect("Failed to write Prize Pool"); PlayerBalance::pack(player_balance, &mut balance_data).expect("Failed to write Player Balance"); Ok(()) } #[cfg(test)] mod test { use super::*; use solana_program::instruction::InstructionError::Custom; use solana_program::{ instruction::{AccountMeta, Instruction}, pubkey::Pubkey, sysvar, }; use solana_program_test::*; use solana_sdk::transaction::TransactionError; use solana_sdk::{ account::Account, signature::{Keypair, Signer}, transaction::Transaction, }; use std::mem; use self::tokio; impl From<DiceErr> for TransactionError { fn from(e: DiceErr) -> Self { TransactionError::InstructionError(0, Custom(e as u32)) } } #[tokio::test] async fn test_sanity1() { //++++++++++++++++++++++++++++++++++++ // TEST: Simply vote for Bet //++++++++++++++++++++++++++++++++++++ let program_id = Pubkey::new_unique(); let mut program_test = ProgramTest::new("dice", program_id, processor!(process_instruction)); // mock contract data account let game_key = Pubkey::new_unique(); let mut data: Vec<u8> = vec![0; 4 * mem::size_of::<u8>()]; LittleEndian::write_u32(&mut data[0..4], 1000); // set prize pool to 1000 println!("Prize Pool {:?}", data); program_test.add_account( game_key, Account { lamports: 60000, data, owner: program_id, executable: false, rent_epoch: 0, }, ); // player account let player_keypair = Keypair::new(); let player_key = player_keypair.pubkey(); // mock player balance_account_data let balance_key = Pubkey::create_with_seed(&player_key, "checkvote", &program_id).unwrap(); // derived (correctly) let mut data = vec![0; mem::size_of::<u32>()]; LittleEndian::write_u32(&mut data[0..4], 50); // set storage to 50 program_test.add_account( balance_key, Account { lamports: 1000000, data, owner: program_id, executable: false, rent_epoch: 0, }, ); let (mut banks_client, payer, recent_blockhash) = program_test.start().await; let game_account = banks_client.get_account(game_key).await.unwrap().unwrap(); let prize_bool_amount = PrizePool::unpack_unchecked(&game_account.data).expect("Failed to read Prize Pool"); assert_eq!(prize_bool_amount.pool_amount, 1000); // Roll Under let accounts = vec![ AccountMeta::new(game_key, false), AccountMeta::new(balance_key, false), AccountMeta::new_readonly(sysvar::rent::id(), false), AccountMeta::new(player_key, true), ]; let mut bet = vec![0; 6*mem::size_of::<u8>()]; bet[0] = 1; // Role Under bet[1] = 30; // Threshold 30 LittleEndian::write_u32(&mut bet[2..6], 10); // Bet 10 println!("Instruction Data {:?}", bet); let mut transaction = Transaction::new_with_payer( &[Instruction { program_id, accounts, data: bet, }], Some(&payer.pubkey()), ); transaction.sign(&[&payer, &player_keypair], recent_blockhash); let a = banks_client.process_transaction(transaction).await; println!("Test Log {:?}", a); let game_account = banks_client.get_account(game_key).await.unwrap().unwrap(); let prize_pool_check = PrizePool::unpack_unchecked(&game_account.data).expect("Failed to read Prize Pool"); assert_eq!(prize_pool_check.pool_amount, 980); let player = banks_client.get_account(balance_key).await.unwrap().unwrap(); let bal_check = PlayerBalance::unpack_unchecked(&player.data).expect("Failed to read Balance"); assert_eq!(bal_check.balance, 70); } }
{ msg!("Not Enough Balance"); return Err(DiceErr::NotEnoughBalance.into()); }
conditional_block
lib.rs
use byteorder::{ByteOrder, LittleEndian}; use num_derive::FromPrimitive; use solana_program::{ account_info::next_account_info, account_info::AccountInfo, decode_error::DecodeError, entrypoint, entrypoint::ProgramResult, msg, program_error::ProgramError, program_pack::{Pack, Sealed}, pubkey::Pubkey, rent::Rent, sysvar::{self, Sysvar}, }; use thiserror::Error; #[derive(Clone, Debug, Eq, Error, FromPrimitive, PartialEq)] pub enum DiceErr { #[error("Unexpected Roll Mode")] UnexpectedRollMode, #[error("Incrrect Threshold")] IncorrectThreshold, #[error("Incorrect Owner")] IncorrectOwner, #[error("Account Not Rent Exempt")] AccountNotRentExempt, #[error("Account Not Balance Account")] AccountNotBalanceAccount, #[error("Not Enough Balance")] NotEnoughBalance, #[error("Invalid Bet")] InvalidBet, } impl From<DiceErr> for ProgramError { fn from(e: DiceErr) -> Self { ProgramError::Custom(e as u32) } } impl<T> DecodeError<T> for DiceErr { fn type_of() -> &'static str { "Dice Error" } } // Instruction data pub struct Dice { pub roll_type: u8, pub threshold: u8, pub bet_amount: u32, } impl Sealed for Dice {} impl Pack for Dice { const LEN: usize = 6; fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> { let roll_type = src[0]; //println!("Roll Type: {}", roll_type); if roll_type!= 1 && roll_type!= 2 { msg!("You should roll under (1) or Roll Over (2)"); return Err(DiceErr::UnexpectedRollMode.into()); } let threshold = src[1]; //println!("Threshold: {}", threshold); if threshold < 2 || threshold > 98 { msg!("Your guess has to in between 2 and 98"); return Err(DiceErr::IncorrectThreshold.into()); } let bet_amount = LittleEndian::read_u32(&src[2..6]); //println!("Bet: {}", bet_amount); Ok(Dice { roll_type, threshold, bet_amount}) } fn pack_into_slice(&self, _dst: &mut [u8]) {} } // Player's Balance structure, which is one 4 byte u32 number pub struct PlayerBalance { pub balance: u32, } impl Sealed for PlayerBalance {} impl Pack for PlayerBalance { const LEN: usize = 4; fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> { Ok(PlayerBalance { balance: LittleEndian::read_u32(&src[0..4]), }) } fn pack_into_slice(&self, dst: &mut [u8]) { LittleEndian::write_u32(&mut dst[0..4], self.balance); } } // Prize Pool structure, which is a 4 byte u32 number pub struct PrizePool { pub pool_amount: u32, } impl Sealed for PrizePool {} impl Pack for PrizePool { const LEN: usize = 4; fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> { Ok(PrizePool { pool_amount: LittleEndian::read_u32(&src[0..4]), }) } fn pack_into_slice(&self, dst: &mut [u8]) { LittleEndian::write_u32(&mut dst[0..4], self.pool_amount); } } // Declare and export the program's entrypoint entrypoint!(process_instruction); // Program entrypoint's implementation fn process_instruction( program_id: &Pubkey, // Public key of program account accounts: &[AccountInfo], // data accounts instruction_data: &[u8], // First Element: Roll type, Second Element: Threshold, [2..6] Bet Amount ) -> ProgramResult { msg!("Rust program entrypoint"); // get Dice information let roll_type = Dice::unpack_unchecked(&instruction_data)?.roll_type; msg!("Roll Type: {}", roll_type); let threshold = Dice::unpack_unchecked(&instruction_data)?.threshold; msg!("Threshold: {}", threshold); let bet_amount = Dice::unpack_unchecked(&instruction_data)?.bet_amount; msg!("Bet: {}", bet_amount); // Iterating accounts is safer then indexing let accounts_iter = &mut accounts.iter(); // Get the account that holds the Prize Pool let prize_pool_account = next_account_info(accounts_iter)?; // The account must be owned by the program in order to modify its data if prize_pool_account.owner!= program_id { msg!( "Prize Pool account ({}) not owned by program, actual: {}, expected: {}", prize_pool_account.key, prize_pool_account.owner, program_id ); return Err(DiceErr::IncorrectOwner.into()); } // Get the account that holds the balance for the players let player_balance_account = next_account_info(accounts_iter)?; // The check account must be owned by the program in order to modify its data if player_balance_account.owner!= program_id { msg!("Check account not owned by program"); return Err(DiceErr::IncorrectOwner.into());
let sysvar_account = next_account_info(accounts_iter)?; let rent = &Rent::from_account_info(sysvar_account)?; if!sysvar::rent::check_id(sysvar_account.key) { msg!("Rent system account is not rent system account"); return Err(ProgramError::InvalidAccountData); } if!rent.is_exempt(player_balance_account.lamports(), player_balance_account.data_len()) { msg!("Balance account is not rent exempt"); return Err(DiceErr::AccountNotRentExempt.into()); } // the player let player_account = next_account_info(accounts_iter)?; if!player_account.is_signer { msg!("Player account is not signer"); return Err(ProgramError::MissingRequiredSignature); } let expected_check_account_pubkey = Pubkey::create_with_seed(player_account.key, "checkvote", program_id)?; if expected_check_account_pubkey!= *player_balance_account.key { msg!("Voter fraud! not the correct balance_account"); return Err(DiceErr::AccountNotBalanceAccount.into()); } let mut balance_data = player_balance_account.try_borrow_mut_data()?; // this unpack reads and deserialises the account data and also checks the data is the correct length let mut player_balance = PlayerBalance::unpack_unchecked(&balance_data).expect("Failed to read PlayerBalance"); // Handle the bet_amount and the balance /*if vote_check.voted_for!= 0 { msg!("Voter fraud! You already voted"); return Err(VoteError::AlreadyVoted.into()); }*/ let mut prize_pool_data = prize_pool_account.try_borrow_mut_data()?; let mut prize_pool = PrizePool::unpack_unchecked(&prize_pool_data).expect("Failed to read PrizePool"); /////////////////////// // Jut for Debug if player_balance.balance == 0 { msg!{"Airdrop some money!!!"}; player_balance.balance = 50; } if prize_pool.pool_amount == 0 { msg!{"Airdrop some money!!!"}; prize_pool.pool_amount = 1000; } // Check the valid of the bet amount if bet_amount > player_balance.balance { msg!("Not Enough Balance"); return Err(DiceErr::NotEnoughBalance.into()); } if bet_amount == 0 { msg!("Inavalid Bet"); return Err(DiceErr::InvalidBet.into()); } let lucky_number:u8 = 20; println!("Result {}", lucky_number); let mut win_amount:u32 = 0; if (1 == roll_type && lucky_number <= threshold) || (2 == roll_type && lucky_number >= threshold) { if lucky_number <= 25 || lucky_number >= 75 { win_amount = bet_amount as u32 * 2; msg!("Win: {}", win_amount); }else{ win_amount = bet_amount as u32; msg!("Win: {}", win_amount); } } if win_amount == 0 { prize_pool.pool_amount += bet_amount; player_balance.balance -= bet_amount; msg!("You Lose!"); }else{ prize_pool.pool_amount -= win_amount; player_balance.balance += win_amount; msg!("You Win!"); } PrizePool::pack(prize_pool, &mut prize_pool_data).expect("Failed to write Prize Pool"); PlayerBalance::pack(player_balance, &mut balance_data).expect("Failed to write Player Balance"); Ok(()) } #[cfg(test)] mod test { use super::*; use solana_program::instruction::InstructionError::Custom; use solana_program::{ instruction::{AccountMeta, Instruction}, pubkey::Pubkey, sysvar, }; use solana_program_test::*; use solana_sdk::transaction::TransactionError; use solana_sdk::{ account::Account, signature::{Keypair, Signer}, transaction::Transaction, }; use std::mem; use self::tokio; impl From<DiceErr> for TransactionError { fn from(e: DiceErr) -> Self { TransactionError::InstructionError(0, Custom(e as u32)) } } #[tokio::test] async fn test_sanity1() { //++++++++++++++++++++++++++++++++++++ // TEST: Simply vote for Bet //++++++++++++++++++++++++++++++++++++ let program_id = Pubkey::new_unique(); let mut program_test = ProgramTest::new("dice", program_id, processor!(process_instruction)); // mock contract data account let game_key = Pubkey::new_unique(); let mut data: Vec<u8> = vec![0; 4 * mem::size_of::<u8>()]; LittleEndian::write_u32(&mut data[0..4], 1000); // set prize pool to 1000 println!("Prize Pool {:?}", data); program_test.add_account( game_key, Account { lamports: 60000, data, owner: program_id, executable: false, rent_epoch: 0, }, ); // player account let player_keypair = Keypair::new(); let player_key = player_keypair.pubkey(); // mock player balance_account_data let balance_key = Pubkey::create_with_seed(&player_key, "checkvote", &program_id).unwrap(); // derived (correctly) let mut data = vec![0; mem::size_of::<u32>()]; LittleEndian::write_u32(&mut data[0..4], 50); // set storage to 50 program_test.add_account( balance_key, Account { lamports: 1000000, data, owner: program_id, executable: false, rent_epoch: 0, }, ); let (mut banks_client, payer, recent_blockhash) = program_test.start().await; let game_account = banks_client.get_account(game_key).await.unwrap().unwrap(); let prize_bool_amount = PrizePool::unpack_unchecked(&game_account.data).expect("Failed to read Prize Pool"); assert_eq!(prize_bool_amount.pool_amount, 1000); // Roll Under let accounts = vec![ AccountMeta::new(game_key, false), AccountMeta::new(balance_key, false), AccountMeta::new_readonly(sysvar::rent::id(), false), AccountMeta::new(player_key, true), ]; let mut bet = vec![0; 6*mem::size_of::<u8>()]; bet[0] = 1; // Role Under bet[1] = 30; // Threshold 30 LittleEndian::write_u32(&mut bet[2..6], 10); // Bet 10 println!("Instruction Data {:?}", bet); let mut transaction = Transaction::new_with_payer( &[Instruction { program_id, accounts, data: bet, }], Some(&payer.pubkey()), ); transaction.sign(&[&payer, &player_keypair], recent_blockhash); let a = banks_client.process_transaction(transaction).await; println!("Test Log {:?}", a); let game_account = banks_client.get_account(game_key).await.unwrap().unwrap(); let prize_pool_check = PrizePool::unpack_unchecked(&game_account.data).expect("Failed to read Prize Pool"); assert_eq!(prize_pool_check.pool_amount, 980); let player = banks_client.get_account(balance_key).await.unwrap().unwrap(); let bal_check = PlayerBalance::unpack_unchecked(&player.data).expect("Failed to read Balance"); assert_eq!(bal_check.balance, 70); } }
} // The account must be rent exempt, i.e. live forever
random_line_split
lib.rs
use byteorder::{ByteOrder, LittleEndian}; use num_derive::FromPrimitive; use solana_program::{ account_info::next_account_info, account_info::AccountInfo, decode_error::DecodeError, entrypoint, entrypoint::ProgramResult, msg, program_error::ProgramError, program_pack::{Pack, Sealed}, pubkey::Pubkey, rent::Rent, sysvar::{self, Sysvar}, }; use thiserror::Error; #[derive(Clone, Debug, Eq, Error, FromPrimitive, PartialEq)] pub enum
{ #[error("Unexpected Roll Mode")] UnexpectedRollMode, #[error("Incrrect Threshold")] IncorrectThreshold, #[error("Incorrect Owner")] IncorrectOwner, #[error("Account Not Rent Exempt")] AccountNotRentExempt, #[error("Account Not Balance Account")] AccountNotBalanceAccount, #[error("Not Enough Balance")] NotEnoughBalance, #[error("Invalid Bet")] InvalidBet, } impl From<DiceErr> for ProgramError { fn from(e: DiceErr) -> Self { ProgramError::Custom(e as u32) } } impl<T> DecodeError<T> for DiceErr { fn type_of() -> &'static str { "Dice Error" } } // Instruction data pub struct Dice { pub roll_type: u8, pub threshold: u8, pub bet_amount: u32, } impl Sealed for Dice {} impl Pack for Dice { const LEN: usize = 6; fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> { let roll_type = src[0]; //println!("Roll Type: {}", roll_type); if roll_type!= 1 && roll_type!= 2 { msg!("You should roll under (1) or Roll Over (2)"); return Err(DiceErr::UnexpectedRollMode.into()); } let threshold = src[1]; //println!("Threshold: {}", threshold); if threshold < 2 || threshold > 98 { msg!("Your guess has to in between 2 and 98"); return Err(DiceErr::IncorrectThreshold.into()); } let bet_amount = LittleEndian::read_u32(&src[2..6]); //println!("Bet: {}", bet_amount); Ok(Dice { roll_type, threshold, bet_amount}) } fn pack_into_slice(&self, _dst: &mut [u8]) {} } // Player's Balance structure, which is one 4 byte u32 number pub struct PlayerBalance { pub balance: u32, } impl Sealed for PlayerBalance {} impl Pack for PlayerBalance { const LEN: usize = 4; fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> { Ok(PlayerBalance { balance: LittleEndian::read_u32(&src[0..4]), }) } fn pack_into_slice(&self, dst: &mut [u8]) { LittleEndian::write_u32(&mut dst[0..4], self.balance); } } // Prize Pool structure, which is a 4 byte u32 number pub struct PrizePool { pub pool_amount: u32, } impl Sealed for PrizePool {} impl Pack for PrizePool { const LEN: usize = 4; fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> { Ok(PrizePool { pool_amount: LittleEndian::read_u32(&src[0..4]), }) } fn pack_into_slice(&self, dst: &mut [u8]) { LittleEndian::write_u32(&mut dst[0..4], self.pool_amount); } } // Declare and export the program's entrypoint entrypoint!(process_instruction); // Program entrypoint's implementation fn process_instruction( program_id: &Pubkey, // Public key of program account accounts: &[AccountInfo], // data accounts instruction_data: &[u8], // First Element: Roll type, Second Element: Threshold, [2..6] Bet Amount ) -> ProgramResult { msg!("Rust program entrypoint"); // get Dice information let roll_type = Dice::unpack_unchecked(&instruction_data)?.roll_type; msg!("Roll Type: {}", roll_type); let threshold = Dice::unpack_unchecked(&instruction_data)?.threshold; msg!("Threshold: {}", threshold); let bet_amount = Dice::unpack_unchecked(&instruction_data)?.bet_amount; msg!("Bet: {}", bet_amount); // Iterating accounts is safer then indexing let accounts_iter = &mut accounts.iter(); // Get the account that holds the Prize Pool let prize_pool_account = next_account_info(accounts_iter)?; // The account must be owned by the program in order to modify its data if prize_pool_account.owner!= program_id { msg!( "Prize Pool account ({}) not owned by program, actual: {}, expected: {}", prize_pool_account.key, prize_pool_account.owner, program_id ); return Err(DiceErr::IncorrectOwner.into()); } // Get the account that holds the balance for the players let player_balance_account = next_account_info(accounts_iter)?; // The check account must be owned by the program in order to modify its data if player_balance_account.owner!= program_id { msg!("Check account not owned by program"); return Err(DiceErr::IncorrectOwner.into()); } // The account must be rent exempt, i.e. live forever let sysvar_account = next_account_info(accounts_iter)?; let rent = &Rent::from_account_info(sysvar_account)?; if!sysvar::rent::check_id(sysvar_account.key) { msg!("Rent system account is not rent system account"); return Err(ProgramError::InvalidAccountData); } if!rent.is_exempt(player_balance_account.lamports(), player_balance_account.data_len()) { msg!("Balance account is not rent exempt"); return Err(DiceErr::AccountNotRentExempt.into()); } // the player let player_account = next_account_info(accounts_iter)?; if!player_account.is_signer { msg!("Player account is not signer"); return Err(ProgramError::MissingRequiredSignature); } let expected_check_account_pubkey = Pubkey::create_with_seed(player_account.key, "checkvote", program_id)?; if expected_check_account_pubkey!= *player_balance_account.key { msg!("Voter fraud! not the correct balance_account"); return Err(DiceErr::AccountNotBalanceAccount.into()); } let mut balance_data = player_balance_account.try_borrow_mut_data()?; // this unpack reads and deserialises the account data and also checks the data is the correct length let mut player_balance = PlayerBalance::unpack_unchecked(&balance_data).expect("Failed to read PlayerBalance"); // Handle the bet_amount and the balance /*if vote_check.voted_for!= 0 { msg!("Voter fraud! You already voted"); return Err(VoteError::AlreadyVoted.into()); }*/ let mut prize_pool_data = prize_pool_account.try_borrow_mut_data()?; let mut prize_pool = PrizePool::unpack_unchecked(&prize_pool_data).expect("Failed to read PrizePool"); /////////////////////// // Jut for Debug if player_balance.balance == 0 { msg!{"Airdrop some money!!!"}; player_balance.balance = 50; } if prize_pool.pool_amount == 0 { msg!{"Airdrop some money!!!"}; prize_pool.pool_amount = 1000; } // Check the valid of the bet amount if bet_amount > player_balance.balance { msg!("Not Enough Balance"); return Err(DiceErr::NotEnoughBalance.into()); } if bet_amount == 0 { msg!("Inavalid Bet"); return Err(DiceErr::InvalidBet.into()); } let lucky_number:u8 = 20; println!("Result {}", lucky_number); let mut win_amount:u32 = 0; if (1 == roll_type && lucky_number <= threshold) || (2 == roll_type && lucky_number >= threshold) { if lucky_number <= 25 || lucky_number >= 75 { win_amount = bet_amount as u32 * 2; msg!("Win: {}", win_amount); }else{ win_amount = bet_amount as u32; msg!("Win: {}", win_amount); } } if win_amount == 0 { prize_pool.pool_amount += bet_amount; player_balance.balance -= bet_amount; msg!("You Lose!"); }else{ prize_pool.pool_amount -= win_amount; player_balance.balance += win_amount; msg!("You Win!"); } PrizePool::pack(prize_pool, &mut prize_pool_data).expect("Failed to write Prize Pool"); PlayerBalance::pack(player_balance, &mut balance_data).expect("Failed to write Player Balance"); Ok(()) } #[cfg(test)] mod test { use super::*; use solana_program::instruction::InstructionError::Custom; use solana_program::{ instruction::{AccountMeta, Instruction}, pubkey::Pubkey, sysvar, }; use solana_program_test::*; use solana_sdk::transaction::TransactionError; use solana_sdk::{ account::Account, signature::{Keypair, Signer}, transaction::Transaction, }; use std::mem; use self::tokio; impl From<DiceErr> for TransactionError { fn from(e: DiceErr) -> Self { TransactionError::InstructionError(0, Custom(e as u32)) } } #[tokio::test] async fn test_sanity1() { //++++++++++++++++++++++++++++++++++++ // TEST: Simply vote for Bet //++++++++++++++++++++++++++++++++++++ let program_id = Pubkey::new_unique(); let mut program_test = ProgramTest::new("dice", program_id, processor!(process_instruction)); // mock contract data account let game_key = Pubkey::new_unique(); let mut data: Vec<u8> = vec![0; 4 * mem::size_of::<u8>()]; LittleEndian::write_u32(&mut data[0..4], 1000); // set prize pool to 1000 println!("Prize Pool {:?}", data); program_test.add_account( game_key, Account { lamports: 60000, data, owner: program_id, executable: false, rent_epoch: 0, }, ); // player account let player_keypair = Keypair::new(); let player_key = player_keypair.pubkey(); // mock player balance_account_data let balance_key = Pubkey::create_with_seed(&player_key, "checkvote", &program_id).unwrap(); // derived (correctly) let mut data = vec![0; mem::size_of::<u32>()]; LittleEndian::write_u32(&mut data[0..4], 50); // set storage to 50 program_test.add_account( balance_key, Account { lamports: 1000000, data, owner: program_id, executable: false, rent_epoch: 0, }, ); let (mut banks_client, payer, recent_blockhash) = program_test.start().await; let game_account = banks_client.get_account(game_key).await.unwrap().unwrap(); let prize_bool_amount = PrizePool::unpack_unchecked(&game_account.data).expect("Failed to read Prize Pool"); assert_eq!(prize_bool_amount.pool_amount, 1000); // Roll Under let accounts = vec![ AccountMeta::new(game_key, false), AccountMeta::new(balance_key, false), AccountMeta::new_readonly(sysvar::rent::id(), false), AccountMeta::new(player_key, true), ]; let mut bet = vec![0; 6*mem::size_of::<u8>()]; bet[0] = 1; // Role Under bet[1] = 30; // Threshold 30 LittleEndian::write_u32(&mut bet[2..6], 10); // Bet 10 println!("Instruction Data {:?}", bet); let mut transaction = Transaction::new_with_payer( &[Instruction { program_id, accounts, data: bet, }], Some(&payer.pubkey()), ); transaction.sign(&[&payer, &player_keypair], recent_blockhash); let a = banks_client.process_transaction(transaction).await; println!("Test Log {:?}", a); let game_account = banks_client.get_account(game_key).await.unwrap().unwrap(); let prize_pool_check = PrizePool::unpack_unchecked(&game_account.data).expect("Failed to read Prize Pool"); assert_eq!(prize_pool_check.pool_amount, 980); let player = banks_client.get_account(balance_key).await.unwrap().unwrap(); let bal_check = PlayerBalance::unpack_unchecked(&player.data).expect("Failed to read Balance"); assert_eq!(bal_check.balance, 70); } }
DiceErr
identifier_name
main.rs
extern crate csv; extern crate serde; // This lets us write `#[derive(Deserialize)]`. #[macro_use] extern crate serde_derive; use std::collections::HashMap; use std::env; use std::fs::File; use std::io; use std::{error::Error, ffi::OsString, process}; // fn main_not_recover() { // println!("Hello, world!"); // let mut rds = csv::Reader::from_reader(io::stdin()); // for result in rds.records() { // // expectは、Error時にpanicを発生させるので、バッドプラクティスである。 // let record = result.expect("a csv record"); // println!("{:?}", record); // } // } fn main() { println!("Hello, world!"); match performance_up_read_csv_to_model() { Ok(count) => println!("{:?}", count), Err(err) => { println!("{}", err); process::exit(1); } } } // error 処理の練習 fn main_recorver() { println!("Hellssdfgsdf"); let mut rds = csv::Reader::from_reader(io::stdin()); for result in rds.records() { match result { Ok(r) => println!("{:?}", r), // こうすることで、回復可能なエラー処理になる。 Err(e) => println!("{:?}", e), } } } // read and write csv test fn main_csv() { println!("Hello, world!"); // if let 文で、Errの場合のみの処理を、{}内に記載できる。<これ便利だ! if let Err(err) = read_and_write_csv_model() { println!("{}", err); process::exit(1); } } fn run_match() -> Result<(), Box<dyn Error>> { let mut rds = csv::Reader::from_reader(io::stdin()); for result in rds.records() { match result { // 先に書いて、returnするんだって。 Err(e) => return Err(From::from(e)), Ok(r) => println!("{:?}", r), } } Ok(()) } fn run_question() -> Result<(), Box<dyn Error>> { let mut rds = csv::Reader::from_reader(io::stdin()); for result in rds.records() { // ?を使うことで可読性が上がる! let a = result?; println!("{:?}", a); } Ok(()) } fn read_csv_file() -> Result<(), Box<dyn Error>> { let file_path = get_file_path()?; let file = File::open(file_path)?; let mut rdr = csv::Reader::from_reader(file); // ここでヘッダーを読み込みたいとする。 // ① clone()する。 // ただし、メモリにコピーをとる代償が伴う。 // let headers = rdr.headers()?.clone(); { // lifetimeのために、この呼び出しはそれ所有スコープでネストされている。 // ② スコープをネストさせる。 // 所有権が奪われて、以降のイテレーションができなくなる。 // <なるほど。逆にこういうテクニックがあるということか。 let headers = rdr.headers()?; println!("{:?}", headers); } for result in rdr.records() { let record = result?; println!("{:?}", record); } Ok(()) } fn read_csv_file2() -> Result<(), Box<dyn Error>> { let file_path = get_file_path()?; let mut rdr = csv::ReaderBuilder::new() .has_headers(true) .from_path(file_path)?; for result in rdr.records() { let record = result?; println!("{:?}", record); } Ok(()) } fn get_file_path() -> Result<OsString, Box<dyn Error>> { match env::args_os().nth(1) { None => Err(From::from("expected 1 argument, but got none")), Some(file_path) => Ok(file_path), } } fn read_csv_file3() { let mut rdr = csv::ReaderBuilder::new() .has_headers(false) .delimiter(b';') .double_quote(false) .flexible(true) .comment(Some(b'#')) .from_reader(io::stdin()); // setting可能。<柔軟 } type Record = (String, String, Option<u64>, f64, f64); fn read_csv_file4() -> Result<(), Box<dyn Error>> { let file_path = get_file_path()?; let mut rdr = csv::ReaderBuilder::new() .has_headers(true) .from_path(file_path)?; for result in rdr.deserialize() { let record: Record3 = result?; println!("{:?}", record); } Ok(()) } type Record2 = HashMap<String, String>; fn read_csv_file5() -> Result<(), Box<dyn Error>> { let file_path = get_file_path()?; let mut rdr = csv::ReaderBuilder::new() .has_headers(true) .from_path(file_path)?; for result in rdr.deserialize() { let record: Record2 = result?; println!("{:?}", record); } Ok(()) } #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] struct Record3 { latitude: f64, longitude: f64, // error時に、自動的にNoneにしてくれるオプション #[serde(deserialize_with = "csv::invalid_option")] population: Option<f64>, city: String, state: String, } fn write_csv() -> Result<(), Box<dyn Error>> { // let mut wtr = csv::Writer::from_writer(io::stdout()); let mut wtr = csv::WriterBuilder::new() .delimiter(b'\t') .quote_style(csv::QuoteStyle::NonNumeric) .from_writer(io::stdout()); // AsRef<[u8]>境界はString, &str, Vec<u8>のような型がすべて条件を満たすため有用である。 wtr.write_record(&["City", "State", "Population", "Latitude", "Longitude"])?; wtr.write_record(&["Davidsons Landing", "AK", "", "65.2419444", "-165.2716667"])?; wtr.write_record(&["Kenai", "AK", "7610", "60.5544444", "-151.2583333"])?; wtr.write_record(&["Oakman", "AL", "", "33.7133333", "-87.3886111"])?; wtr.flush()?; Ok(()) } // borrowされた&strを、ownedなString型で置き換えるということは、 // レコードを書き込むたびにcityとstate双方の新しいStringをアロケートしなければならないことを意味する。 // これでも書き込みはできるにはできるのだが、メモリとパフォーマンスを少しばかり無駄遣いしている。 #[derive(Debug, Serialize)] #[serde(rename_all = "PascalCase")] struct WriteRecord<'a> { city: &'a str, state: &'a str, population: Option<u64>, latitude: f64, longitude: f64, } fn write_csv2() -> Result<(), Box<dyn Error>> { let mut wtr = csv::Writer::from_writer(io::stdout()); wtr.serialize(WriteRecord { city: "Davidsons Landing", state: "AK", population: None, latitude: 65.2419444, longitude: -165.2716667, })?; wtr.serialize(WriteRecord { city: "Kenai", state: "AK", population: Some(7610), latitude: 60.5544444, longitude: -151.2583333, })?; wtr.serialize(WriteRecord { city: "Oakman", state: "AL", population: None, latitude: 33.7133333, longitude: -87.3886111, })?; wtr.flush()?; Ok(()) } fn read_and_write_csv() -> Result<(), Box<dyn Error>> { let argss = match env::args_os().nth(1) { None => return Err(From::from("expected 1 argument, but got none")), Some(argument) => argument, }; // CSVリーダー(stdin)とCSVライター(stdout)を構築する let mut rdr = csv::Reader::from_reader(io::stdin()); let mut wtr = csv::Writer::from_writer(io::stdout()); wtr.write_record(rdr.headers()?)?; for result in rdr.records() { let record = result?; if record.iter().any(|r| r == &argss) { wtr.write_record(&record); } } wtr.flush()?; Ok(()) } // utf-8に変換できない場合の対処法。 // byteで読み込む!!! fn read_and_write_byte_csv() -> Result<(), Box<dyn Error>> { let argss = match env::args().nth(1) { None => return Err(From::from("expected 1 argument, but got none")), Some(argument) => argument, }; // CSVリーダー(stdin)とCSVライター(stdout)を構築する let mut rdr = csv::Reader::from_reader(io::stdin()); let mut wtr = csv::Writer::from_writer(io::stdout()); wtr.write_record(rdr.byte_headers()?)?; for result in rdr.byte_records() { let record = result?; // argss.as_bytes() 戻りが、参照なのね。 if record.iter().any(|r| r == argss.as_bytes()) { wtr.write_record(&record); } } wtr.flush()?; Ok(()) } // 前回の例と違い、デシリアライズとシリアライズ両方をderiveする // これは型から自動的にデシリアライズとシリアライズを行えるということである #[derive(Debug, Deserialize, Serialize)] #[serde(rename_all = "PascalCase")] struct RecordMulti { city: String, state: String, population: Option<u64>, latitude: f64, } fn read_and_write_csv_model() -> Result<(), Box<dyn Error>> { // クエリとなる固定引数を受け取る // もし引数が与えられないか整数でない場合はエラーを返す let minimum_pop: u64 = match env::args().nth(1) { None => return Err(From::from("expected 1 argument, but got none")), Some(arg) => arg.parse::<u64>()?, }; let mut rdr = csv::Reader::from_reader(io::stdin()); let mut wtr = csv::Writer::from_writer(io::stdout()); for result in rdr.deserialize() { let record: RecordMulti = result?; if record.population.map_or(false, |f| f >= minimum_pop) { wtr.serialize(&record)?; } } wtr.flush()?; Ok(()) } //./csv_example < worldcitiespop.csv 2.12s user 0.09s system 70% cpu 3.125 total fn performance_read_csv() -> Result<u64, Box<dyn Error>> { let mut reader = csv::Reader::from_reader(io::stdin()); let mut count = 0; for result in reader.records() { let record = result?; if &record[0] == "us" && &record[3] == "MA" { count += 1; } } Ok(count) } //./csv_example < worldcitiespop.csv 1.69s user 0.05s system 34%
// 一度だけ、メモ リにアロケーションする。読み込まれるたびに上書きされていくため、高速化する。 let mut record = csv::ByteRecord::new(); let mut count = 0; while reader.read_byte_record(&mut record)? { if &record[0] == b"us" && &record[3] == b"MA" { count += 1; } } Ok(count) } #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] struct RecordPerformance { country: String, city: String, accent_city: String, region: String, population: Option<u64>, latitude: f64, longitude: f64, } //./csv_example < worldcitiespop.csv 3.66s user 0.11s system 85% cpu 4.396 total fn performance_read_csv_to_model() -> Result<u64, Box<dyn Error>> { let mut reader = csv::Reader::from_reader(io::stdin()); let mut count = 0; for result in reader.deserialize() { let record: RecordPerformance = result?; if &record.country == "us" && &record.region == "MA" { count += 1; } } Ok(count) } // 生存期間をつけて、さらに参照型のstrに変更する。 //tutorial-perf-serde-02.rs #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] struct RecordPerfomanceUp<'a> { city: &'a str, country: &'a str, accent_city: &'a str, region: &'a str, population: Option<u64>, latitude: f64, longitude: f64, } //./csv_example < worldcitiespop.csv 1.14s user 0.04s system 97% cpu 1.216 total fn performance_up_read_csv_to_model() -> Result<u64, Box<dyn Error>> { let mut reader = csv::Reader::from_reader(io::stdin()); let mut raw_record = csv::StringRecord::new(); let headers = reader.headers()?.clone(); let mut count = 0; // while reader.read_record(&mut raw_record)? { // let record: RecordPerfomanceUp = raw_record.deserialize(Some(&headers))?; // if record.country == "us" && record.region == "MA" { // count += 1; // } // } for result in reader.deserialize() { let record: RecordPerformance = result?; if record.country == "us" && record.region == "MA" { count += 1; } } Ok(count) }
cpu 5.094 total // String からbyteで処理をするように変更した。 fn performance2_read_csv() -> Result<u64, Box<dyn Error>> { let mut reader = csv::Reader::from_reader(io::stdin()); let mut count = 0; for result in reader.byte_records() { let record = result?; if &record[0] == b"us" && &record[3] == b"MA" { count += 1; } } Ok(count) } // ./csv_example < worldcitiespop.csv 0.44s user 0.04s system 22% cpu 2.142 total // reader.record()は、イテレータをどんどん返す(アロケートしながら) // だから、1回だけにして、アロケーションの回数を減らす。 fn performance3_read_csv() -> Result<u64, Box<dyn Error>> { let mut reader = csv::Reader::from_reader(io::stdin());
identifier_body
main.rs
extern crate csv; extern crate serde; // This lets us write `#[derive(Deserialize)]`. #[macro_use] extern crate serde_derive; use std::collections::HashMap; use std::env; use std::fs::File; use std::io; use std::{error::Error, ffi::OsString, process}; // fn main_not_recover() { // println!("Hello, world!"); // let mut rds = csv::Reader::from_reader(io::stdin()); // for result in rds.records() { // // expectは、Error時にpanicを発生させるので、バッドプラクティスである。 // let record = result.expect("a csv record"); // println!("{:?}", record); // } // } fn main() { println!("Hello, world!"); match performance_up_read_csv_to_model() { Ok(count) => println!("{:?}", count), Err(err) => { println!("{}", err); process::exit(1); } } } // error 処理の練習 fn main_recorver() { println!("Hellssdfgsdf"); let mut rds = csv::Reader::from_reader(io::stdin()); for result in rds.records() { match result { Ok(r) => println!("{:?}", r), // こうすることで、回復可能なエラー処理になる。 Err(e) => println!("{:?}", e), } } } // read and write csv test fn main_csv() { println!("Hello, world!"); // if let 文で、Errの場合のみの処理を、{}内に記載できる。<これ便利だ! if let Err(err) = read_and_write_csv_model() { println!("{}", err); process::exit(1); } } fn run_match() -> Result<(), Box<dyn Error>> { let mut rds = csv::Reader::from_reader(io::stdin()); for result in rds.records() { match result { // 先に書いて、returnするんだって。 Err(e) => return Err(From::from(e)), Ok(r) => println!("{:?}", r), } } Ok(()) } fn run_question() -> Result<(), Box<dyn Error>> { let mut rds = csv::Reader::from_reader(io::stdin()); for result in rds.records() { // ?を使うことで可読性が上がる! let a = result?; println!("{:?}", a); } Ok(()) } fn read_csv_file() -> Result<(), Box<dyn Error>> { let file_path = get_file_path()?; let file = File::open(file_path)?; let mut rdr = csv::Reader::from_reader(file); // ここでヘッダーを読み込みたいとする。 // ① clone()する。 // ただし、メモリにコピーをとる代償が伴う。 // let headers = rdr.headers()?.clone(); { // lifetimeのために、この呼び出しはそれ所有スコープでネストされている。 // ② スコープをネストさせる。 // 所有権が奪われて、以降のイテレーションができなくなる。 // <なるほど。逆にこういうテクニックがあるということか。 let headers = rdr.headers()?; println!("{:?}", headers); } for result in rdr.records() { let record = result?; println!("{:?}", record); } Ok(()) } fn read_csv_file2() -> Result<(), Box<dyn Error>> { let file_path = get_file_path()?; let mut rdr = csv::ReaderBuilder::new() .has_headers(true) .from_path(file_path)?; for result in rdr.records() { let record = result?; println!("{:?}", record); } Ok(()) } fn get_file_path() -> Result<OsString, Box<dyn Error>> { match env::args_os().nth(1) { None => Err(From::from("expected 1 argument, but got none")), Some(file_path) => Ok(file_path), } } fn read_csv_file3() { let mut rdr = csv::ReaderBuilder::new() .has_headers(false) .delimiter(b';') .double_quote(false) .flexible(true) .comment(Some(b'#')) .from_reader(io::stdin()); // setting可能。<柔軟 } type Record = (String, String, Option<u64>, f64, f64); fn read_csv_file4() -> Result<(), Box<dyn Error>> { let file_path = get_file_path()?; let mut rdr = csv::ReaderBuilder::new() .has_headers(true) .from_path(file_path)?; for result in rdr.deserialize() { let record: Record3 = result?; println!("{:?}", record); } Ok(()) } type Record2 = HashMap<String, String>; fn read_csv_file5() -> Result<(), Box<dyn Error>> { let file_path = get_file_path()?; let mut rdr = csv::ReaderBuilder::new() .has_headers(true) .from_path(file_path)?; for result in rdr.deserialize() { let record: Record2 = result?; println!("{:?}", record); } Ok(()) } #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] struct Record3 { latitude: f64, longitude: f64, // error時に、自動的にNoneにしてくれるオプション #[serde(deserialize_with = "csv::invalid_option")] population: Option<f64>, city: String, state: String, } fn write_csv() -> Result<(), Box<dyn Error>> { // let mut wtr = csv::Writer::from_writer(io::stdout()); let mut wtr = csv::WriterBuilder::new() .delimiter(b'\t') .quote_style(csv::QuoteStyle::NonNumeric) .from_writer(io::stdout()); // AsRef<[u8]>境界はString, &str, Vec<u8>のような型がすべて条件を満たすため有用である。 wtr.write_record(&["City", "State", "Population", "Latitude", "Longitude"])?; wtr.write_record(&["Davidsons Landing", "AK", "", "65.2419444", "-165.2716667"])?; wtr.write_record(&["Kenai", "AK", "7610", "60.5544444", "-151.2583333"])?; wtr.write_record(&["Oakman", "AL", "", "33.7133333", "-87.3886111"])?; wtr.flush()?; Ok(()) } // borrowされた&strを、ownedなString型で置き換えるということは、 // レコードを書き込むたびにcityとstate双方の新しいStringをアロケートしなければならないことを意味する。 // これでも書き込みはできるにはできるのだが、メモリとパフォーマンスを少しばかり無駄遣いしている。 #[derive(Debug, Serialize)] #[serde(rename_all = "PascalCase")] struct WriteRecord<'a> { city: &'a str, state: &'a str, population: Option<u64>, latitude: f64, longitude: f64, } fn write_csv2() -> Result<(), Box<dyn Error>> { let mut wtr = csv::Writer::from_writer(io::stdout()); wtr.serialize(WriteRecord { city: "Davidsons Landing", state: "AK", population: None, latitude: 65.2419444, longitude: -165.2716667, })?; wtr.serialize(WriteRecord { city: "Kenai", state: "AK", population: Some(7610), latitude: 60.5544444, longitude: -151.2583333, })?; wtr.serialize(WriteRecord { city: "Oakman", state: "AL", population: None, latitude: 33.7133333, longitude: -87.3886111, })?; wtr.flush()?; Ok(()) } fn read_and_write_csv() -> Result<(), Box<dyn Error>> { let argss = match env::args_os().nth(1) { None => return Err(From::from("expected 1 argument, but got none")), Some(argument) => argument, }; // CSVリーダー(stdin)とCSVライター(stdout)を構築する let mut rdr = csv::Reader::from_reader(io::stdin()); let mut wtr = csv::Writer::from_writer(io::stdout()); wtr.write_record(rdr.headers()?)?; for result in rdr.records() { let record = result?; if record.iter().any(|r| r == &argss) { wtr.write_record(&record); }
} // utf-8に変換できない場合の対処法。 // byteで読み込む!!! fn read_and_write_byte_csv() -> Result<(), Box<dyn Error>> { let argss = match env::args().nth(1) { None => return Err(From::from("expected 1 argument, but got none")), Some(argument) => argument, }; // CSVリーダー(stdin)とCSVライター(stdout)を構築する let mut rdr = csv::Reader::from_reader(io::stdin()); let mut wtr = csv::Writer::from_writer(io::stdout()); wtr.write_record(rdr.byte_headers()?)?; for result in rdr.byte_records() { let record = result?; // argss.as_bytes() 戻りが、参照なのね。 if record.iter().any(|r| r == argss.as_bytes()) { wtr.write_record(&record); } } wtr.flush()?; Ok(()) } // 前回の例と違い、デシリアライズとシリアライズ両方をderiveする // これは型から自動的にデシリアライズとシリアライズを行えるということである #[derive(Debug, Deserialize, Serialize)] #[serde(rename_all = "PascalCase")] struct RecordMulti { city: String, state: String, population: Option<u64>, latitude: f64, } fn read_and_write_csv_model() -> Result<(), Box<dyn Error>> { // クエリとなる固定引数を受け取る // もし引数が与えられないか整数でない場合はエラーを返す let minimum_pop: u64 = match env::args().nth(1) { None => return Err(From::from("expected 1 argument, but got none")), Some(arg) => arg.parse::<u64>()?, }; let mut rdr = csv::Reader::from_reader(io::stdin()); let mut wtr = csv::Writer::from_writer(io::stdout()); for result in rdr.deserialize() { let record: RecordMulti = result?; if record.population.map_or(false, |f| f >= minimum_pop) { wtr.serialize(&record)?; } } wtr.flush()?; Ok(()) } //./csv_example < worldcitiespop.csv 2.12s user 0.09s system 70% cpu 3.125 total fn performance_read_csv() -> Result<u64, Box<dyn Error>> { let mut reader = csv::Reader::from_reader(io::stdin()); let mut count = 0; for result in reader.records() { let record = result?; if &record[0] == "us" && &record[3] == "MA" { count += 1; } } Ok(count) } //./csv_example < worldcitiespop.csv 1.69s user 0.05s system 34% cpu 5.094 total // String からbyteで処理をするように変更した。 fn performance2_read_csv() -> Result<u64, Box<dyn Error>> { let mut reader = csv::Reader::from_reader(io::stdin()); let mut count = 0; for result in reader.byte_records() { let record = result?; if &record[0] == b"us" && &record[3] == b"MA" { count += 1; } } Ok(count) } //./csv_example < worldcitiespop.csv 0.44s user 0.04s system 22% cpu 2.142 total // reader.record()は、イテレータをどんどん返す(アロケートしながら) // だから、1回だけにして、アロケーションの回数を減らす。 fn performance3_read_csv() -> Result<u64, Box<dyn Error>> { let mut reader = csv::Reader::from_reader(io::stdin()); // 一度だけ、メモリにアロケーションする。読み込まれるたびに上書きされていくため、高速化する。 let mut record = csv::ByteRecord::new(); let mut count = 0; while reader.read_byte_record(&mut record)? { if &record[0] == b"us" && &record[3] == b"MA" { count += 1; } } Ok(count) } #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] struct RecordPerformance { country: String, city: String, accent_city: String, region: String, population: Option<u64>, latitude: f64, longitude: f64, } //./csv_example < worldcitiespop.csv 3.66s user 0.11s system 85% cpu 4.396 total fn performance_read_csv_to_model() -> Result<u64, Box<dyn Error>> { let mut reader = csv::Reader::from_reader(io::stdin()); let mut count = 0; for result in reader.deserialize() { let record: RecordPerformance = result?; if &record.country == "us" && &record.region == "MA" { count += 1; } } Ok(count) } // 生存期間をつけて、さらに参照型のstrに変更する。 //tutorial-perf-serde-02.rs #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] struct RecordPerfomanceUp<'a> { city: &'a str, country: &'a str, accent_city: &'a str, region: &'a str, population: Option<u64>, latitude: f64, longitude: f64, } //./csv_example < worldcitiespop.csv 1.14s user 0.04s system 97% cpu 1.216 total fn performance_up_read_csv_to_model() -> Result<u64, Box<dyn Error>> { let mut reader = csv::Reader::from_reader(io::stdin()); let mut raw_record = csv::StringRecord::new(); let headers = reader.headers()?.clone(); let mut count = 0; // while reader.read_record(&mut raw_record)? { // let record: RecordPerfomanceUp = raw_record.deserialize(Some(&headers))?; // if record.country == "us" && record.region == "MA" { // count += 1; // } // } for result in reader.deserialize() { let record: RecordPerformance = result?; if record.country == "us" && record.region == "MA" { count += 1; } } Ok(count) }
} wtr.flush()?; Ok(())
random_line_split
main.rs
extern crate csv; extern crate serde; // This lets us write `#[derive(Deserialize)]`. #[macro_use] extern crate serde_derive; use std::collections::HashMap; use std::env; use std::fs::File; use std::io; use std::{error::Error, ffi::OsString, process}; // fn main_not_recover() { // println!("Hello, world!"); // let mut rds = csv::Reader::from_reader(io::stdin()); // for result in rds.records() { // // expectは、Error時にpanicを発生させるので、バッドプラクティスである。 // let record = result.expect("a csv record"); // println!("{:?}", record); // } // } fn main() { println!("Hello, world!"); match performance_up_read_csv_to_model() { Ok(count) => println!("{:?}", count), Err(err) => { println!("{}", err); process::exit(1); } } } // error 処理の練習 fn main_recorver() { println!("Hellssdfgsdf"); let mut rds = csv::Reader::from_reader(io::stdin()); for result in rds.records() { match result { Ok(r) => println!("{:?}", r), // こうすることで、回復可能なエラー処理になる。 Err(e) => println!("{:?}", e), } } } // read and write csv test fn main_csv() { println!("Hello, world!"); // if let 文で、Errの場合のみの処理を、{}内に記載できる。<これ便利だ! if let Err(err) = read_and_write_csv_model() { println!("{}", err); process::exit(1); } } fn run_match() -> Result<(), Box<dyn Error>> { let mut rds = csv::Reader::from_reader(io::stdin()); for result in rds.records() { match result { // 先に書いて、returnするんだって。 Err(e) => return Err(From::from(e)), Ok(r) => println!("{:?}", r), } } Ok(()) } fn run_question() -> Result<(), Box<dyn Error>> { let mut rds = csv::Reader::from_reader(io::stdin()); for result in rds.records() { // ?を使うことで可読性が上がる! let a = result?; println!("{:?}", a); } Ok(()) } fn read_csv_file() -> Result<(), Box<dyn Error>> { let file_path = get_file_path()?; let file = File::open(file_path)?; let mut rdr = csv::Reader::from_reader(file); // ここでヘッダーを読み込みたいとする。 // ① clone()する。 // ただし、メモリにコピーをとる代償が伴う。 // let headers = rdr.headers()?.clone(); { // lifetimeのために、この呼び出しはそれ所有スコープでネストされている。 // ② スコープをネストさせる。 // 所有権が奪われて、以降のイテレーションができなくなる。 // <なるほど。逆にこういうテクニックがあるということか。 let headers = rdr.headers()?; println!("{:?}", headers); } for result in rdr.records() { let record = result?; println!("{:?}", record); } Ok(()) } fn read_csv_file2() -> Result<(), Box<dyn Error>> { let file_path = get_file_path()?; let mut rdr = csv::ReaderBuilder::new() .has_headers(true) .from_path(file_path)?; for result in rdr.records() { let record = result?; println!("{:?}", record); } Ok(()) } fn get_file_path() -> Result<OsString, Box<dyn Error>> { match env::args_os().nth(1) { None => Err(From::from("expected 1 argument, but got none")), Some(file_path) => Ok(file_path), } } fn read_csv_file3() { let mut rdr = csv::ReaderBuilder::new() .has_headers(false) .delimiter(b';') .double_quote(false) .flexible(true) .comment(Some(b'#')) .from_reader(io::stdin()); // setting可能。<柔軟 } type Record = (String, String, Option<u64>, f64, f64); fn read_csv_file4() -> Result<(), Box<dyn Error>> { let file_path = get_file_path()?; let mut rdr = csv::ReaderBuilder::new() .has_headers(true) .from_path(file_path)?; for result in rdr.deserialize() { let record: Record3 = result?; println!("{:?}", record); } Ok(()) } type Record2 = HashMap<String, String>; fn read_csv_file5() -> Result<(), Box<dyn Error>> { let file_path = get_file_path()?; let mut rdr = csv::ReaderBuilder::new() .has_headers(true) .from_path(file_path)?; for result in rdr.deserialize() { let record: Record2 = result?; println!("{:?}", record); } Ok(()) } #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] struct Record3 { latitude: f64, longitude: f64, // error時に、自動的にNoneにしてくれるオプション #[serde(deserialize_with = "csv::invalid_option")] population: Option<f64>, city: String, state: String, } fn write_csv() -> Result<(), Box<dyn Error>> { // let mut wtr = csv::Writer::from_writer(io::stdout()); let mut wtr = csv::WriterBuilder::new() .delimiter(b'\t') .quote_style(csv::QuoteStyle::NonNumeric) .from_writer(io::stdout()); // AsRef<[u8]>境界はString, &str, Vec<u8>のような型がすべて条件を満たすため有用である。 wtr.write_record(&["City", "State", "Population", "Latitude", "Longitude"])?; wtr.write_record(&["Davidsons Landing", "AK", "", "65.2419444", "-165.2716667"])?; wtr.write_record(&["Kenai", "AK", "7610", "60.5544444", "-151.2583333"])?; wtr.write_record(&["Oakman", "AL", "", "33.7133333", "-87.3886111"])?; wtr.flush()?; Ok(()) } // borrowされた&strを、ownedなString型で置き換えるということは、 // レコードを書き込むたびにcityとstate双方の新しいStringをアロケートしなければならないことを意味する。 // これでも書き込みはできるにはできるのだが、メモリとパフォーマンスを少しばかり無駄遣いしている。 #[derive(Debug, Serialize)] #[serde(rename_all = "PascalCase")] struct WriteRecord<'a> { city: &'a str, state: &'a str, population: Option<u64>, latitude: f64, longitude: f64, } fn write_csv2() -> Result<(), Box<dyn Error>> { let mut wtr = csv::Writer::from_writer(io::stdout()); wtr.serialize(WriteRecord { city: "Davidsons Landing", state: "AK", population: None, latitude: 65.2419444, longitude: -165.2716667, })?; wtr.serialize(WriteRecord { city: "Kenai", state: "AK", population: Some(7610), latitude: 60.5544444, longitude: -151.2583333, })?; wtr.serialize(WriteRecord { city: "Oakman", state: "AL", population: None, latitude: 33.7133333, longitude: -87.3886111, })?; wtr.flush()?; Ok(()) } fn read_and_write_csv() -> Result<(), Box<dyn Error>> { let argss = match env::args_os().nth(1) { None => return Err(From::from("expected 1 argument, but got none")), Some(argument) => argument, }; // CSVリーダー(stdin)とCSVライター(stdout)を構築する let mut rdr = csv::Reader::from_reader(io::stdin()); let mut wtr = csv::Writer::from_writer(io::stdout()); wtr.write_record(rdr.headers()?)?; for result in rdr.records() { let record = result?; if record.iter().any(|r| r == &argss) { wtr.write_record(&record); } } wtr.flush()?; Ok(()) } // utf-8に変換できない場合の対処法。 // byteで読み込む!!! fn read_and_write_byte_csv() -> Result<(), Box<dyn Error>> { let argss = match env::args().nth(1) { None => return Err(From::from("expected 1 argument, but got none")), Some(argument) => argument, }; // CSVリーダー(stdin)とCSVライター(stdout)を構築する let mut rdr = csv::Reader::from_reader(io::stdin()); let mut wtr = csv::Writer::from_writer(io::stdout()); wtr.write_record(rdr.byte_headers()?)?; for result in rdr.byte_records() { let record = result?; // argss.as_bytes() 戻りが、参照なのね。 if record.iter().any(|r| r == argss.as_bytes()) { wtr.write_record(&record); } } wtr.flush()?; Ok(()) } // 前回の例と違い、デシリアライズとシリアライズ両方をderiveする // これは型から自動的にデシリアライズとシリアライズを行えるということである #[derive(Debug, Deserialize, Serialize)] #[serde(rename_all = "PascalCase")] struct RecordMulti { city: String, state: String, population: Option<u64>, latitude: f64, } fn read_and_write_csv_model() -> Result<(), Box<dyn Error>> { // クエリとなる固定引数を受け取る // もし引数が与えられないか整数でない場合はエラーを返す let minimum_pop: u64 = match env::args().nth(1) { None => return Err(From::from("expected 1 argument, but got none")), Some(arg) => arg.parse::<u64>()?, }; let mut rdr = csv::Reader::from_reader(io::stdin()); let mut wtr = csv::Writer::from_writer(io::stdout()); for result in rdr.deserialize() { let record: RecordMulti = result?; if record.population.map_or(false, |f| f >= minimum_pop) { wtr.serialize(&record)?; } } wtr.flush()?; Ok(()) } //./csv_example < worldcitiespop.csv 2.12s user 0.09s system 70% cpu 3.125 total fn performance_read_csv() -> Result<u64, Box<dyn Error>> { let mut reader = csv::Reader::from_reader(io::stdin()); let mut count = 0; for result in reader.records() { let record = result?; if &rec
us" && &record[3] == "MA" { count += 1; } } Ok(count) } //./csv_example < worldcitiespop.csv 1.69s user 0.05s system 34% cpu 5.094 total // String からbyteで処理をするように変更した。 fn performance2_read_csv() -> Result<u64, Box<dyn Error>> { let mut reader = csv::Reader::from_reader(io::stdin()); let mut count = 0; for result in reader.byte_records() { let record = result?; if &record[0] == b"us" && &record[3] == b"MA" { count += 1; } } Ok(count) } //./csv_example < worldcitiespop.csv 0.44s user 0.04s system 22% cpu 2.142 total // reader.record()は、イテレータをどんどん返す(アロケートしながら) // だから、1回だけにして、アロケーションの回数を減らす。 fn performance3_read_csv() -> Result<u64, Box<dyn Error>> { let mut reader = csv::Reader::from_reader(io::stdin()); // 一度だけ、メモリにアロケーションする。読み込まれるたびに上書きされていくため、高速化する。 let mut record = csv::ByteRecord::new(); let mut count = 0; while reader.read_byte_record(&mut record)? { if &record[0] == b"us" && &record[3] == b"MA" { count += 1; } } Ok(count) } #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] struct RecordPerformance { country: String, city: String, accent_city: String, region: String, population: Option<u64>, latitude: f64, longitude: f64, } //./csv_example < worldcitiespop.csv 3.66s user 0.11s system 85% cpu 4.396 total fn performance_read_csv_to_model() -> Result<u64, Box<dyn Error>> { let mut reader = csv::Reader::from_reader(io::stdin()); let mut count = 0; for result in reader.deserialize() { let record: RecordPerformance = result?; if &record.country == "us" && &record.region == "MA" { count += 1; } } Ok(count) } // 生存期間をつけて、さらに参照型のstrに変更する。 //tutorial-perf-serde-02.rs #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] struct RecordPerfomanceUp<'a> { city: &'a str, country: &'a str, accent_city: &'a str, region: &'a str, population: Option<u64>, latitude: f64, longitude: f64, } //./csv_example < worldcitiespop.csv 1.14s user 0.04s system 97% cpu 1.216 total fn performance_up_read_csv_to_model() -> Result<u64, Box<dyn Error>> { let mut reader = csv::Reader::from_reader(io::stdin()); let mut raw_record = csv::StringRecord::new(); let headers = reader.headers()?.clone(); let mut count = 0; // while reader.read_record(&mut raw_record)? { // let record: RecordPerfomanceUp = raw_record.deserialize(Some(&headers))?; // if record.country == "us" && record.region == "MA" { // count += 1; // } // } for result in reader.deserialize() { let record: RecordPerformance = result?; if record.country == "us" && record.region == "MA" { count += 1; } } Ok(count) }
ord[0] == "
identifier_name
planning.rs
use cargo::CargoError; use cargo::core::Dependency; use cargo::core::Package as CargoPackage; use cargo::core::PackageId; use cargo::core::PackageSet; use cargo::core::Resolve; use cargo::core::SourceId; use cargo::core::Workspace; use cargo::core::dependency::Kind; use cargo::ops::Packages; use cargo::ops; use cargo::util::CargoResult; use cargo::util::Cfg; use cargo::util::Config; use cargo::util::ToUrl; use context::BuildDependency; use context::BuildTarget; use context::CrateContext; use context::WorkspaceContext; use settings::RazeSettings; use settings::GenMode; use std::collections::HashSet; use std::env; use std::fs; use std::ops::Deref; use std::path::Path; use std::str; use util; pub struct PlannedBuild { pub workspace_context: WorkspaceContext, pub crate_contexts: Vec<CrateContext>, } pub struct BuildPlanner<'a> { settings: RazeSettings, cargo_config: &'a Config, platform_attrs: Vec<Cfg>, registry: Option<SourceId>, } impl <'a> BuildPlanner<'a> { pub fn new(settings: RazeSettings, cargo_config: &'a Config) -> CargoResult<BuildPlanner<'a>> { Ok(BuildPlanner { platform_attrs: try!(util::fetch_attrs(&settings.target)), cargo_config: cargo_config, registry: None, settings: settings, }) } pub fn
(&mut self, host: String) -> CargoResult<()> { match host.to_url().map(|url| SourceId::for_registry(&url)) { Ok(registry_id) => { self.registry = Some(registry_id); Ok(()) }, Err(value) => Err(CargoError::from(value)) } } pub fn plan_build(&self) -> CargoResult<PlannedBuild> { let ResolvedPlan {root_name, packages, resolve} = try!(ResolvedPlan::resolve_from_files(&self.cargo_config)); let root_package_id = try!(resolve.iter() .filter(|dep| dep.name() == root_name) .next() .ok_or(CargoError::from("root crate should be in cargo resolve"))); let root_direct_deps = resolve.deps(&root_package_id).cloned().collect::<HashSet<_>>(); let mut crate_contexts = Vec::new(); let source_id = match self.registry.clone() { Some(v) => v, None => try!(SourceId::crates_io(&self.cargo_config)), }; for id in try!(find_all_package_ids(source_id, &resolve)) { let package = packages.get(&id).unwrap().clone(); let mut features = resolve.features(&id).clone().into_iter().collect::<Vec<_>>(); features.sort(); let full_name = format!("{}-{}", id.name(), id.version()); let path = format!("./vendor/{}-{}/", id.name(), id.version()); // Verify that package is really vendored if self.settings.genmode == GenMode::Vendored { try!(fs::metadata(&path).map_err(|_| { CargoError::from(format!("failed to find {}. Either switch to \"Remote\" genmode, or run `cargo vendor -x` first.", &path)) })); } // Identify all possible dependencies let PlannedDeps { mut build_deps, mut dev_deps, mut normal_deps } = PlannedDeps::find_all_deps(&id, &package, &resolve, &self.settings.target, &self.platform_attrs); build_deps.sort(); dev_deps.sort(); normal_deps.sort(); let mut targets = try!(identify_targets(&full_name, &package)); targets.sort(); let possible_crate_settings = self.settings.crates .get(id.name()) .and_then(|c| c.get(&id.version().to_string())); let should_gen_buildrs = possible_crate_settings.map(|s| s.gen_buildrs.clone()).unwrap_or(false); let build_script_target = if should_gen_buildrs { targets.iter().find(|t| t.kind.deref() == "custom-build").cloned() } else { None }; let targets_sans_build_script = targets.into_iter().filter(|t| t.kind.deref()!= "custom-build").collect::<Vec<_>>(); let additional_deps = possible_crate_settings.map(|s| s.additional_deps.clone()).unwrap_or(Vec::new()); let additional_flags = possible_crate_settings.map(|s| s.additional_flags.clone()).unwrap_or(Vec::new()); let extra_aliased_targets = possible_crate_settings.map(|s| s.extra_aliased_targets.clone()).unwrap_or(Vec::new()); // Skip generated dependencies explicitly designated to be skipped (potentially due to // being replaced or customized as part of additional_deps) let non_skipped_normal_deps = if let Some(s) = possible_crate_settings { normal_deps.into_iter() .filter(|d|!s.skipped_deps.contains(&format!("{}-{}", d.name, d.version))) .collect::<Vec<_>>() } else { normal_deps }; crate_contexts.push(CrateContext { pkg_name: id.name().to_owned(), pkg_version: id.version().to_string(), features: features, is_root_dependency: root_direct_deps.contains(&id), metadeps: Vec::new() /* TODO(acmcarther) */, dependencies: non_skipped_normal_deps, build_dependencies: build_deps, dev_dependencies: dev_deps, path: path, build_script_target: build_script_target, targets: targets_sans_build_script, platform_triple: self.settings.target.to_owned(), additional_deps: additional_deps, additional_flags: additional_flags, extra_aliased_targets: extra_aliased_targets, }) } let workspace_context = WorkspaceContext { workspace_path: self.settings.workspace_path.clone(), platform_triple: self.settings.target.clone(), gen_workspace_prefix: self.settings.gen_workspace_prefix.clone(), }; crate_contexts.sort_by_key(|context| format!("{}-{}", context.pkg_name, context.pkg_version)); Ok(PlannedBuild{ workspace_context: workspace_context, crate_contexts: crate_contexts }) } } /** The set of all included dependencies for Cargo's dependency categories. */ pub struct PlannedDeps { pub build_deps: Vec<BuildDependency>, pub dev_deps: Vec<BuildDependency>, pub normal_deps: Vec<BuildDependency>, } impl PlannedDeps { /** * Identifies the full set of cargo dependencies for the provided package id using cargo's * resolution details. */ pub fn find_all_deps(id: &PackageId, package: &CargoPackage, resolve: &Resolve, platform_triple: &str, platform_attrs: &Vec<Cfg>) -> PlannedDeps { let platform_deps = package .dependencies() .iter() .filter(|dep| { dep.platform() .map(|p| p.matches(&platform_triple, Some(&platform_attrs))) .unwrap_or(true) }) .cloned() .collect::<Vec<Dependency>>(); let build_deps = util::take_kinded_dep_names(&platform_deps, Kind::Build); let dev_deps = util::take_kinded_dep_names(&platform_deps, Kind::Development); let normal_deps = util::take_kinded_dep_names(&platform_deps, Kind::Normal); let resolved_deps = resolve.deps(&id).into_iter() .map(|dep| BuildDependency { name: dep.name().to_owned(), version: dep.version().to_string(), }) .collect::<Vec<BuildDependency>>(); PlannedDeps { normal_deps: resolved_deps.iter().filter(|d| normal_deps.contains(&d.name)).cloned().collect(), build_deps: resolved_deps.iter().filter(|d| build_deps.contains(&d.name)).cloned().collect(), dev_deps: resolved_deps.into_iter().filter(|d| dev_deps.contains(&d.name)).collect(), } } } /** A synthesized Cargo dependency resolution. */ pub struct ResolvedPlan<'a> { pub root_name: String, pub packages: PackageSet<'a>, pub resolve: Resolve, } impl<'a> ResolvedPlan<'a> { /** * Performs Cargo's own build plan resolution, yielding the root crate, the set of packages, and * the resolution graph. */ pub fn resolve_from_files(cargo_config: &Config) -> CargoResult<ResolvedPlan> { let lockfile = Path::new("Cargo.lock"); let manifest_path = lockfile.parent().unwrap().join("Cargo.toml"); let manifest = env::current_dir().unwrap().join(&manifest_path); let ws = try!(Workspace::new(&manifest, cargo_config)); let specs = Packages::All.into_package_id_specs(&ws)?; let root_name = specs.iter().next().unwrap().name().to_owned(); let (packages, resolve) = ops::resolve_ws_precisely( &ws, None, &[], false, false, &specs)?; Ok(ResolvedPlan { root_name: root_name, packages: packages, resolve: resolve, }) } } /** Enumerates the set of all possibly relevant packages for the Cargo dependencies */ fn find_all_package_ids(registry_id: SourceId, resolve: &Resolve) -> CargoResult<Vec<PackageId>> { try!(fs::metadata("Cargo.lock").map_err(|_| { CargoError::from("failed to find Cargo.lock. Please run `cargo generate-lockfile` first.") })); let mut package_ids = resolve.iter() .filter(|id| *id.source_id() == registry_id) .cloned() .collect::<Vec<_>>(); package_ids.sort_by_key(|id| id.name().to_owned()); Ok(package_ids) } /** Derives target objects from Cargo's target information. */ fn identify_targets(full_name: &str, package: &CargoPackage) -> CargoResult<Vec<BuildTarget>> { let partial_path = format!("{}/", full_name); let partial_path_byte_length = partial_path.as_bytes().len(); let mut targets = Vec::new(); for target in package.targets().iter() { let target_path_str = try!(target.src_path().to_str() .ok_or(CargoError::from(format!("path for {}'s target {} wasn't unicode", &full_name, target.name())))) .to_owned(); let crate_name_str_idx = try!(target_path_str.find(&partial_path) .ok_or(CargoError::from(format!("path for {}'s target {} should have been in vendor directory", &full_name, target.name())))); let local_path_bytes = target_path_str.bytes() .skip(crate_name_str_idx + partial_path_byte_length) .collect::<Vec<_>>(); let local_path_str = String::from_utf8(local_path_bytes).unwrap(); for kind in util::kind_to_kinds(target.kind()) { targets.push(BuildTarget { name: target.name().to_owned(), path: local_path_str.clone(), kind: kind, }); } } Ok(targets) }
set_registry_from_url
identifier_name
planning.rs
use cargo::CargoError; use cargo::core::Dependency; use cargo::core::Package as CargoPackage; use cargo::core::PackageId; use cargo::core::PackageSet; use cargo::core::Resolve; use cargo::core::SourceId; use cargo::core::Workspace; use cargo::core::dependency::Kind; use cargo::ops::Packages; use cargo::ops; use cargo::util::CargoResult; use cargo::util::Cfg; use cargo::util::Config; use cargo::util::ToUrl; use context::BuildDependency; use context::BuildTarget; use context::CrateContext; use context::WorkspaceContext; use settings::RazeSettings; use settings::GenMode; use std::collections::HashSet; use std::env; use std::fs; use std::ops::Deref; use std::path::Path; use std::str; use util; pub struct PlannedBuild { pub workspace_context: WorkspaceContext, pub crate_contexts: Vec<CrateContext>, } pub struct BuildPlanner<'a> { settings: RazeSettings, cargo_config: &'a Config, platform_attrs: Vec<Cfg>, registry: Option<SourceId>, } impl <'a> BuildPlanner<'a> { pub fn new(settings: RazeSettings, cargo_config: &'a Config) -> CargoResult<BuildPlanner<'a>> { Ok(BuildPlanner { platform_attrs: try!(util::fetch_attrs(&settings.target)), cargo_config: cargo_config, registry: None, settings: settings, }) } pub fn set_registry_from_url(&mut self, host: String) -> CargoResult<()> { match host.to_url().map(|url| SourceId::for_registry(&url)) { Ok(registry_id) => { self.registry = Some(registry_id); Ok(()) }, Err(value) => Err(CargoError::from(value)) } } pub fn plan_build(&self) -> CargoResult<PlannedBuild> { let ResolvedPlan {root_name, packages, resolve} = try!(ResolvedPlan::resolve_from_files(&self.cargo_config)); let root_package_id = try!(resolve.iter() .filter(|dep| dep.name() == root_name) .next() .ok_or(CargoError::from("root crate should be in cargo resolve"))); let root_direct_deps = resolve.deps(&root_package_id).cloned().collect::<HashSet<_>>(); let mut crate_contexts = Vec::new(); let source_id = match self.registry.clone() { Some(v) => v, None => try!(SourceId::crates_io(&self.cargo_config)), }; for id in try!(find_all_package_ids(source_id, &resolve)) { let package = packages.get(&id).unwrap().clone(); let mut features = resolve.features(&id).clone().into_iter().collect::<Vec<_>>(); features.sort(); let full_name = format!("{}-{}", id.name(), id.version()); let path = format!("./vendor/{}-{}/", id.name(), id.version()); // Verify that package is really vendored if self.settings.genmode == GenMode::Vendored { try!(fs::metadata(&path).map_err(|_| { CargoError::from(format!("failed to find {}. Either switch to \"Remote\" genmode, or run `cargo vendor -x` first.", &path)) })); } // Identify all possible dependencies let PlannedDeps { mut build_deps, mut dev_deps, mut normal_deps } = PlannedDeps::find_all_deps(&id, &package, &resolve, &self.settings.target, &self.platform_attrs); build_deps.sort(); dev_deps.sort(); normal_deps.sort(); let mut targets = try!(identify_targets(&full_name, &package)); targets.sort(); let possible_crate_settings = self.settings.crates .get(id.name()) .and_then(|c| c.get(&id.version().to_string())); let should_gen_buildrs = possible_crate_settings.map(|s| s.gen_buildrs.clone()).unwrap_or(false); let build_script_target = if should_gen_buildrs { targets.iter().find(|t| t.kind.deref() == "custom-build").cloned() } else { None }; let targets_sans_build_script = targets.into_iter().filter(|t| t.kind.deref()!= "custom-build").collect::<Vec<_>>(); let additional_deps = possible_crate_settings.map(|s| s.additional_deps.clone()).unwrap_or(Vec::new()); let additional_flags = possible_crate_settings.map(|s| s.additional_flags.clone()).unwrap_or(Vec::new()); let extra_aliased_targets = possible_crate_settings.map(|s| s.extra_aliased_targets.clone()).unwrap_or(Vec::new()); // Skip generated dependencies explicitly designated to be skipped (potentially due to // being replaced or customized as part of additional_deps) let non_skipped_normal_deps = if let Some(s) = possible_crate_settings
else { normal_deps }; crate_contexts.push(CrateContext { pkg_name: id.name().to_owned(), pkg_version: id.version().to_string(), features: features, is_root_dependency: root_direct_deps.contains(&id), metadeps: Vec::new() /* TODO(acmcarther) */, dependencies: non_skipped_normal_deps, build_dependencies: build_deps, dev_dependencies: dev_deps, path: path, build_script_target: build_script_target, targets: targets_sans_build_script, platform_triple: self.settings.target.to_owned(), additional_deps: additional_deps, additional_flags: additional_flags, extra_aliased_targets: extra_aliased_targets, }) } let workspace_context = WorkspaceContext { workspace_path: self.settings.workspace_path.clone(), platform_triple: self.settings.target.clone(), gen_workspace_prefix: self.settings.gen_workspace_prefix.clone(), }; crate_contexts.sort_by_key(|context| format!("{}-{}", context.pkg_name, context.pkg_version)); Ok(PlannedBuild{ workspace_context: workspace_context, crate_contexts: crate_contexts }) } } /** The set of all included dependencies for Cargo's dependency categories. */ pub struct PlannedDeps { pub build_deps: Vec<BuildDependency>, pub dev_deps: Vec<BuildDependency>, pub normal_deps: Vec<BuildDependency>, } impl PlannedDeps { /** * Identifies the full set of cargo dependencies for the provided package id using cargo's * resolution details. */ pub fn find_all_deps(id: &PackageId, package: &CargoPackage, resolve: &Resolve, platform_triple: &str, platform_attrs: &Vec<Cfg>) -> PlannedDeps { let platform_deps = package .dependencies() .iter() .filter(|dep| { dep.platform() .map(|p| p.matches(&platform_triple, Some(&platform_attrs))) .unwrap_or(true) }) .cloned() .collect::<Vec<Dependency>>(); let build_deps = util::take_kinded_dep_names(&platform_deps, Kind::Build); let dev_deps = util::take_kinded_dep_names(&platform_deps, Kind::Development); let normal_deps = util::take_kinded_dep_names(&platform_deps, Kind::Normal); let resolved_deps = resolve.deps(&id).into_iter() .map(|dep| BuildDependency { name: dep.name().to_owned(), version: dep.version().to_string(), }) .collect::<Vec<BuildDependency>>(); PlannedDeps { normal_deps: resolved_deps.iter().filter(|d| normal_deps.contains(&d.name)).cloned().collect(), build_deps: resolved_deps.iter().filter(|d| build_deps.contains(&d.name)).cloned().collect(), dev_deps: resolved_deps.into_iter().filter(|d| dev_deps.contains(&d.name)).collect(), } } } /** A synthesized Cargo dependency resolution. */ pub struct ResolvedPlan<'a> { pub root_name: String, pub packages: PackageSet<'a>, pub resolve: Resolve, } impl<'a> ResolvedPlan<'a> { /** * Performs Cargo's own build plan resolution, yielding the root crate, the set of packages, and * the resolution graph. */ pub fn resolve_from_files(cargo_config: &Config) -> CargoResult<ResolvedPlan> { let lockfile = Path::new("Cargo.lock"); let manifest_path = lockfile.parent().unwrap().join("Cargo.toml"); let manifest = env::current_dir().unwrap().join(&manifest_path); let ws = try!(Workspace::new(&manifest, cargo_config)); let specs = Packages::All.into_package_id_specs(&ws)?; let root_name = specs.iter().next().unwrap().name().to_owned(); let (packages, resolve) = ops::resolve_ws_precisely( &ws, None, &[], false, false, &specs)?; Ok(ResolvedPlan { root_name: root_name, packages: packages, resolve: resolve, }) } } /** Enumerates the set of all possibly relevant packages for the Cargo dependencies */ fn find_all_package_ids(registry_id: SourceId, resolve: &Resolve) -> CargoResult<Vec<PackageId>> { try!(fs::metadata("Cargo.lock").map_err(|_| { CargoError::from("failed to find Cargo.lock. Please run `cargo generate-lockfile` first.") })); let mut package_ids = resolve.iter() .filter(|id| *id.source_id() == registry_id) .cloned() .collect::<Vec<_>>(); package_ids.sort_by_key(|id| id.name().to_owned()); Ok(package_ids) } /** Derives target objects from Cargo's target information. */ fn identify_targets(full_name: &str, package: &CargoPackage) -> CargoResult<Vec<BuildTarget>> { let partial_path = format!("{}/", full_name); let partial_path_byte_length = partial_path.as_bytes().len(); let mut targets = Vec::new(); for target in package.targets().iter() { let target_path_str = try!(target.src_path().to_str() .ok_or(CargoError::from(format!("path for {}'s target {} wasn't unicode", &full_name, target.name())))) .to_owned(); let crate_name_str_idx = try!(target_path_str.find(&partial_path) .ok_or(CargoError::from(format!("path for {}'s target {} should have been in vendor directory", &full_name, target.name())))); let local_path_bytes = target_path_str.bytes() .skip(crate_name_str_idx + partial_path_byte_length) .collect::<Vec<_>>(); let local_path_str = String::from_utf8(local_path_bytes).unwrap(); for kind in util::kind_to_kinds(target.kind()) { targets.push(BuildTarget { name: target.name().to_owned(), path: local_path_str.clone(), kind: kind, }); } } Ok(targets) }
{ normal_deps.into_iter() .filter(|d| !s.skipped_deps.contains(&format!("{}-{}", d.name, d.version))) .collect::<Vec<_>>() }
conditional_block
planning.rs
use cargo::CargoError; use cargo::core::Dependency; use cargo::core::Package as CargoPackage; use cargo::core::PackageId; use cargo::core::PackageSet;
use cargo::ops::Packages; use cargo::ops; use cargo::util::CargoResult; use cargo::util::Cfg; use cargo::util::Config; use cargo::util::ToUrl; use context::BuildDependency; use context::BuildTarget; use context::CrateContext; use context::WorkspaceContext; use settings::RazeSettings; use settings::GenMode; use std::collections::HashSet; use std::env; use std::fs; use std::ops::Deref; use std::path::Path; use std::str; use util; pub struct PlannedBuild { pub workspace_context: WorkspaceContext, pub crate_contexts: Vec<CrateContext>, } pub struct BuildPlanner<'a> { settings: RazeSettings, cargo_config: &'a Config, platform_attrs: Vec<Cfg>, registry: Option<SourceId>, } impl <'a> BuildPlanner<'a> { pub fn new(settings: RazeSettings, cargo_config: &'a Config) -> CargoResult<BuildPlanner<'a>> { Ok(BuildPlanner { platform_attrs: try!(util::fetch_attrs(&settings.target)), cargo_config: cargo_config, registry: None, settings: settings, }) } pub fn set_registry_from_url(&mut self, host: String) -> CargoResult<()> { match host.to_url().map(|url| SourceId::for_registry(&url)) { Ok(registry_id) => { self.registry = Some(registry_id); Ok(()) }, Err(value) => Err(CargoError::from(value)) } } pub fn plan_build(&self) -> CargoResult<PlannedBuild> { let ResolvedPlan {root_name, packages, resolve} = try!(ResolvedPlan::resolve_from_files(&self.cargo_config)); let root_package_id = try!(resolve.iter() .filter(|dep| dep.name() == root_name) .next() .ok_or(CargoError::from("root crate should be in cargo resolve"))); let root_direct_deps = resolve.deps(&root_package_id).cloned().collect::<HashSet<_>>(); let mut crate_contexts = Vec::new(); let source_id = match self.registry.clone() { Some(v) => v, None => try!(SourceId::crates_io(&self.cargo_config)), }; for id in try!(find_all_package_ids(source_id, &resolve)) { let package = packages.get(&id).unwrap().clone(); let mut features = resolve.features(&id).clone().into_iter().collect::<Vec<_>>(); features.sort(); let full_name = format!("{}-{}", id.name(), id.version()); let path = format!("./vendor/{}-{}/", id.name(), id.version()); // Verify that package is really vendored if self.settings.genmode == GenMode::Vendored { try!(fs::metadata(&path).map_err(|_| { CargoError::from(format!("failed to find {}. Either switch to \"Remote\" genmode, or run `cargo vendor -x` first.", &path)) })); } // Identify all possible dependencies let PlannedDeps { mut build_deps, mut dev_deps, mut normal_deps } = PlannedDeps::find_all_deps(&id, &package, &resolve, &self.settings.target, &self.platform_attrs); build_deps.sort(); dev_deps.sort(); normal_deps.sort(); let mut targets = try!(identify_targets(&full_name, &package)); targets.sort(); let possible_crate_settings = self.settings.crates .get(id.name()) .and_then(|c| c.get(&id.version().to_string())); let should_gen_buildrs = possible_crate_settings.map(|s| s.gen_buildrs.clone()).unwrap_or(false); let build_script_target = if should_gen_buildrs { targets.iter().find(|t| t.kind.deref() == "custom-build").cloned() } else { None }; let targets_sans_build_script = targets.into_iter().filter(|t| t.kind.deref()!= "custom-build").collect::<Vec<_>>(); let additional_deps = possible_crate_settings.map(|s| s.additional_deps.clone()).unwrap_or(Vec::new()); let additional_flags = possible_crate_settings.map(|s| s.additional_flags.clone()).unwrap_or(Vec::new()); let extra_aliased_targets = possible_crate_settings.map(|s| s.extra_aliased_targets.clone()).unwrap_or(Vec::new()); // Skip generated dependencies explicitly designated to be skipped (potentially due to // being replaced or customized as part of additional_deps) let non_skipped_normal_deps = if let Some(s) = possible_crate_settings { normal_deps.into_iter() .filter(|d|!s.skipped_deps.contains(&format!("{}-{}", d.name, d.version))) .collect::<Vec<_>>() } else { normal_deps }; crate_contexts.push(CrateContext { pkg_name: id.name().to_owned(), pkg_version: id.version().to_string(), features: features, is_root_dependency: root_direct_deps.contains(&id), metadeps: Vec::new() /* TODO(acmcarther) */, dependencies: non_skipped_normal_deps, build_dependencies: build_deps, dev_dependencies: dev_deps, path: path, build_script_target: build_script_target, targets: targets_sans_build_script, platform_triple: self.settings.target.to_owned(), additional_deps: additional_deps, additional_flags: additional_flags, extra_aliased_targets: extra_aliased_targets, }) } let workspace_context = WorkspaceContext { workspace_path: self.settings.workspace_path.clone(), platform_triple: self.settings.target.clone(), gen_workspace_prefix: self.settings.gen_workspace_prefix.clone(), }; crate_contexts.sort_by_key(|context| format!("{}-{}", context.pkg_name, context.pkg_version)); Ok(PlannedBuild{ workspace_context: workspace_context, crate_contexts: crate_contexts }) } } /** The set of all included dependencies for Cargo's dependency categories. */ pub struct PlannedDeps { pub build_deps: Vec<BuildDependency>, pub dev_deps: Vec<BuildDependency>, pub normal_deps: Vec<BuildDependency>, } impl PlannedDeps { /** * Identifies the full set of cargo dependencies for the provided package id using cargo's * resolution details. */ pub fn find_all_deps(id: &PackageId, package: &CargoPackage, resolve: &Resolve, platform_triple: &str, platform_attrs: &Vec<Cfg>) -> PlannedDeps { let platform_deps = package .dependencies() .iter() .filter(|dep| { dep.platform() .map(|p| p.matches(&platform_triple, Some(&platform_attrs))) .unwrap_or(true) }) .cloned() .collect::<Vec<Dependency>>(); let build_deps = util::take_kinded_dep_names(&platform_deps, Kind::Build); let dev_deps = util::take_kinded_dep_names(&platform_deps, Kind::Development); let normal_deps = util::take_kinded_dep_names(&platform_deps, Kind::Normal); let resolved_deps = resolve.deps(&id).into_iter() .map(|dep| BuildDependency { name: dep.name().to_owned(), version: dep.version().to_string(), }) .collect::<Vec<BuildDependency>>(); PlannedDeps { normal_deps: resolved_deps.iter().filter(|d| normal_deps.contains(&d.name)).cloned().collect(), build_deps: resolved_deps.iter().filter(|d| build_deps.contains(&d.name)).cloned().collect(), dev_deps: resolved_deps.into_iter().filter(|d| dev_deps.contains(&d.name)).collect(), } } } /** A synthesized Cargo dependency resolution. */ pub struct ResolvedPlan<'a> { pub root_name: String, pub packages: PackageSet<'a>, pub resolve: Resolve, } impl<'a> ResolvedPlan<'a> { /** * Performs Cargo's own build plan resolution, yielding the root crate, the set of packages, and * the resolution graph. */ pub fn resolve_from_files(cargo_config: &Config) -> CargoResult<ResolvedPlan> { let lockfile = Path::new("Cargo.lock"); let manifest_path = lockfile.parent().unwrap().join("Cargo.toml"); let manifest = env::current_dir().unwrap().join(&manifest_path); let ws = try!(Workspace::new(&manifest, cargo_config)); let specs = Packages::All.into_package_id_specs(&ws)?; let root_name = specs.iter().next().unwrap().name().to_owned(); let (packages, resolve) = ops::resolve_ws_precisely( &ws, None, &[], false, false, &specs)?; Ok(ResolvedPlan { root_name: root_name, packages: packages, resolve: resolve, }) } } /** Enumerates the set of all possibly relevant packages for the Cargo dependencies */ fn find_all_package_ids(registry_id: SourceId, resolve: &Resolve) -> CargoResult<Vec<PackageId>> { try!(fs::metadata("Cargo.lock").map_err(|_| { CargoError::from("failed to find Cargo.lock. Please run `cargo generate-lockfile` first.") })); let mut package_ids = resolve.iter() .filter(|id| *id.source_id() == registry_id) .cloned() .collect::<Vec<_>>(); package_ids.sort_by_key(|id| id.name().to_owned()); Ok(package_ids) } /** Derives target objects from Cargo's target information. */ fn identify_targets(full_name: &str, package: &CargoPackage) -> CargoResult<Vec<BuildTarget>> { let partial_path = format!("{}/", full_name); let partial_path_byte_length = partial_path.as_bytes().len(); let mut targets = Vec::new(); for target in package.targets().iter() { let target_path_str = try!(target.src_path().to_str() .ok_or(CargoError::from(format!("path for {}'s target {} wasn't unicode", &full_name, target.name())))) .to_owned(); let crate_name_str_idx = try!(target_path_str.find(&partial_path) .ok_or(CargoError::from(format!("path for {}'s target {} should have been in vendor directory", &full_name, target.name())))); let local_path_bytes = target_path_str.bytes() .skip(crate_name_str_idx + partial_path_byte_length) .collect::<Vec<_>>(); let local_path_str = String::from_utf8(local_path_bytes).unwrap(); for kind in util::kind_to_kinds(target.kind()) { targets.push(BuildTarget { name: target.name().to_owned(), path: local_path_str.clone(), kind: kind, }); } } Ok(targets) }
use cargo::core::Resolve; use cargo::core::SourceId; use cargo::core::Workspace; use cargo::core::dependency::Kind;
random_line_split
main.rs
fn main() { // defining a variable println!("-------defining a variable"); println!("Hello, Hooman!"); let mut x = 45; // all variables initially are immutable otherwise it is mentioned println!("The value of x is {}", x); x = 10; println!("The value of x is {}", x); let y: i64; y = 734; println!("{}", y); // if statement println!("-------if statement"); if y < 10 { println!("The {} is less!", y); } else { println!("The {} is big!", y); } // loop println!("-------loop"); let mut n = 0; loop { n += 7; if n % 5 == 0 || n % 2 == 0 { continue; } println!("The value of n is {}", n); if n > 100 { break; } } // for loop println!("-------for loop"); for i in 1..10 { println!("The number is {}", i); } let range = 10..20; for i in range { println!("element in range {}", i); } let family_name = vec!["Amir", "Hooman", "Aref", "Shahnaz", "Vihan", "Shima"]; for name in family_name.iter() { println!("Family person is {}", name); } for (index, name) in family_name.iter().enumerate() { println!("Family people {} is {}", index+1, name); } for name in family_name { // in this way we cannot use family_name next time println!("name is {}", name); } // enum println!("-------enum"); enum Direction { Up, Down, Left, Right } let player_direction1:Direction = Direction::Up; let player_direction2:Direction = Direction::Down; let player_direction3:Direction = Direction::Left; let player_direction4:Direction = Direction::Right; match player_direction1 { Direction::Up => println!("We are heading Up!"), Direction::Down => println!("We are heading Down!"), Direction::Left => println!("We are heading Left!"), Direction::Right => println!("We are heading Right!") } match player_direction2 { Direction::Up => println!("We are heading Up!"), Direction::Down => println!("We are heading Down!"), Direction::Left => println!("We are heading Left!"), Direction::Right => println!("We are heading Right!") } match player_direction3 { Direction::Up => println!("We are heading Up!"), Direction::Down => println!("We are heading Down!"), Direction::Left => println!("We are heading Left!"), Direction::Right => println!("We are heading Right!") } match player_direction4 { Direction::Up => println!("We are heading Up!"), Direction::Down => println!("We are heading Down!"), Direction::Left => println!("We are heading Left!"), Direction::Right => println!("We are heading Right!") } // constants println!("-------constants"); const MAXIMUM_NUMBER: u8 = 7; // must be uppercase for n in 1..MAXIMUM_NUMBER { println!("{}", n); } // tuples println!("-------tuples"); let tup1 = ("A", ("Hooman", "Hesamyan"), "C", 734, true); println!("{}", (tup1.1).1); // referencing a tuple inside the tuple println!("{}", tup1.0); println!("{}", tup1.2); println!("{}", tup1.3); println!("{}", tup1.4); let (x, y, z, u, v) = tup1; // destructuring the tuple println!("{}", x); println!("{}", y.0); // function println!("-------functions"); fn count_to(num: u32) { for i in 1..num { if is_even(i) { println!("{} is even", i); } else { println!("{} is odd", i); } } } count_to(7); fn is_even(num: u32) -> bool { return num % 2 == 0; } let number = 12; println!("is {} even? {}", number, is_even(number)); // reference println!("-------references"); let mut x = 7; println!("x is {}", x); { let x_ref_mut = &mut x; // mutable reference should enclosed inside a block *x_ref_mut += 7; println!("x reference is {}", x_ref_mut); } let x_ref = &x; println!("x is {}", x); println!("x reference is {}", x_ref); // structs println!("-------structs"); struct Color { red: u8, // u8: 0-255 green: u8, blue: u8 } let bg = Color {red: 255, green: 70, blue: 15}; println!("{}, {}, {}", bg.red, bg.green, bg.blue); struct Color2(u8, u8, u8); let mut bg2 = Color2(30, 70, 255); println!("{}, {}, {}", bg2.0, bg2.1, bg2.2); bg2.2 = 40; println!("{}, {}, {}", bg2.0, bg2.1, bg2.2); // pass by reference println!("-------pass by reference"); fn print_color(c: Color) { println!("Color - R:{} G:{} B:{}", c.red, c.green, c.blue); } fn
(c: &Color2) { println!("Color - R:{} G:{} B:{}", c.0, c.1, c.2); } print_color(bg); /* print_color(bg); *impossible */ print_color2(&bg2); print_color2(&bg2); print_color2(&bg2); // it is possible to have multile function invocation due to it is called by reference // arrays println!("-------arrays"); let sample_array = [1, 3, 5, 7]; // either ways are valid let sample_array2: [i32; 4] = [6, 8, 15, 20]; println!("{}", sample_array[1]); for (i, el) in sample_array.iter().enumerate() { println!("{}-th element is {}", i, el); } for i in 0..sample_array2.len() { println!("{}", sample_array2[i]); } let array_of_2 = [2; 10]; // generating an array of 2's with length 10 for el in array_of_2.iter() { println!("{}", el); } // impl println!("-------impl"); struct Rectangle { width: u32, height: u32 } impl Rectangle { fn print_description(&self) { println!("Rectangle: {} x {}", self.width, self.height); } fn is_square(&self) -> bool{ return self.width == self.height; } fn area(&self) -> u32 { return self.width * self.height; } fn perimeter(&self) -> u32 { return (self.width + self.height) * 2; } } let rectangle: Rectangle = Rectangle {height: 30, width: 10, }; rectangle.print_description(); println!("The given rectangle is square? {}", rectangle.is_square()); println!("Area is {} and perimeter is {}", rectangle.area(), rectangle.perimeter()); // Strings println!("-------Strings"); let new_string = "Hello World"; // primitive string println!("{}", new_string); let mut my_string = String::from("How is it going today?"); println!("{}", my_string); println!("{}", my_string.len()); println!("{}", my_string.is_empty()); for token in my_string.split_whitespace() { // there is not in primitive string println!("{}-", token) } println!("Does contain {} 'today' in it? {}", my_string, my_string.contains("today")); my_string.push_str(new_string); println!("{}", my_string); /* println!("{}", my_string.push_str(new_string)) *impossible */ // Traits (like interface) println!("-------Traits"); struct Person { name: String, age: u32, } // impl Person { // fn to_string(&self) -> String { // return format!("My name is {} and my age is {}", self.name, self.age); // } // } impl ToString for Person { // trait "ToString" is implemented for "Person" fn to_string(&self) -> String { return format!("My name is {} and my age is {}", self.name, self.age); } } let hooman: Person = Person {age: 39, name: String::from("Hesamyan Hooman")}; println!("{}", hooman.to_string()); // Custom Traits (like interface) println!("-------Custom Traits"); trait HasVoiceBox { // speak fn speak(&self); // check if can speak fn can_speak(&self) -> bool; } impl HasVoiceBox for Person { fn speak(&self) { println!("Hello, my name is {} ", self.name); } fn can_speak(&self) -> bool { if self.age > 3 { return true; } return false; } } println!("I am {} and I can speak? {}", hooman.name, hooman.can_speak()); hooman.speak(); // Match Operator (like Switch) println!("-------Match Operator"); let number = 11; match number { 1 => println!("It is one!"), // case 1 2 => println!("it is two!"), // case 2 3 | 4 => println!("it is three or four!"), // case 3 | 4 5..=10 => println!("it is between 5 to 10"), // case 5 to 10 _ => println!("it is out of the range!"), // default } // read input from console println!("-------read input from console"); use std::io; let mut input = String::new(); println!("Hey mate! Say something:"); match io::stdin().read_line(&mut input) { Ok(_) => { println!("Success! You said: {}", input.to_ascii_uppercase()); }, Err(e) => println!("Oops! SOmething went wrong: {}", e) } // Hashmap println!("-------Hashmap"); use std::collections::HashMap; // define HashMap let mut marks = HashMap::new(); // add values marks.insert("Rust Programming", 96); marks.insert("Lua Programming", 100); marks.insert("C++ Programming", 90); marks.insert("Java Programming", 94); // prompt length of the HashMap println!("How many subjects are collected there? {}", marks.len()); // find a subject match marks.get("Rust Programming") { Some(mark) => println!("You have got {} for that.", mark), None => println!("You did not study this subject!"), } // remove an item marks.remove("Java Programming"); // loop through HashMap for (subject, mark) in &marks { println!("For {} you have got {}.", subject, mark); } // check for value println!("Did you study C#? {} ", marks.contains_key("C# Programming")); }
print_color2
identifier_name
main.rs
fn main() { // defining a variable println!("-------defining a variable"); println!("Hello, Hooman!"); let mut x = 45; // all variables initially are immutable otherwise it is mentioned println!("The value of x is {}", x); x = 10; println!("The value of x is {}", x); let y: i64; y = 734; println!("{}", y); // if statement println!("-------if statement"); if y < 10 { println!("The {} is less!", y); } else { println!("The {} is big!", y); } // loop println!("-------loop"); let mut n = 0; loop { n += 7; if n % 5 == 0 || n % 2 == 0 { continue; } println!("The value of n is {}", n); if n > 100 { break; } } // for loop println!("-------for loop"); for i in 1..10 { println!("The number is {}", i); } let range = 10..20; for i in range { println!("element in range {}", i); } let family_name = vec!["Amir", "Hooman", "Aref", "Shahnaz", "Vihan", "Shima"]; for name in family_name.iter() { println!("Family person is {}", name); } for (index, name) in family_name.iter().enumerate() { println!("Family people {} is {}", index+1, name); } for name in family_name { // in this way we cannot use family_name next time println!("name is {}", name); } // enum println!("-------enum"); enum Direction { Up, Down, Left, Right } let player_direction1:Direction = Direction::Up; let player_direction2:Direction = Direction::Down; let player_direction3:Direction = Direction::Left; let player_direction4:Direction = Direction::Right; match player_direction1 { Direction::Up => println!("We are heading Up!"), Direction::Down => println!("We are heading Down!"), Direction::Left => println!("We are heading Left!"), Direction::Right => println!("We are heading Right!") } match player_direction2 { Direction::Up => println!("We are heading Up!"), Direction::Down => println!("We are heading Down!"), Direction::Left => println!("We are heading Left!"), Direction::Right => println!("We are heading Right!") } match player_direction3 { Direction::Up => println!("We are heading Up!"), Direction::Down => println!("We are heading Down!"), Direction::Left => println!("We are heading Left!"), Direction::Right => println!("We are heading Right!") } match player_direction4 { Direction::Up => println!("We are heading Up!"), Direction::Down => println!("We are heading Down!"), Direction::Left => println!("We are heading Left!"), Direction::Right => println!("We are heading Right!") } // constants println!("-------constants"); const MAXIMUM_NUMBER: u8 = 7; // must be uppercase for n in 1..MAXIMUM_NUMBER { println!("{}", n); } // tuples println!("-------tuples"); let tup1 = ("A", ("Hooman", "Hesamyan"), "C", 734, true); println!("{}", (tup1.1).1); // referencing a tuple inside the tuple println!("{}", tup1.0); println!("{}", tup1.2); println!("{}", tup1.3); println!("{}", tup1.4); let (x, y, z, u, v) = tup1; // destructuring the tuple println!("{}", x); println!("{}", y.0); // function println!("-------functions"); fn count_to(num: u32) { for i in 1..num { if is_even(i)
else { println!("{} is odd", i); } } } count_to(7); fn is_even(num: u32) -> bool { return num % 2 == 0; } let number = 12; println!("is {} even? {}", number, is_even(number)); // reference println!("-------references"); let mut x = 7; println!("x is {}", x); { let x_ref_mut = &mut x; // mutable reference should enclosed inside a block *x_ref_mut += 7; println!("x reference is {}", x_ref_mut); } let x_ref = &x; println!("x is {}", x); println!("x reference is {}", x_ref); // structs println!("-------structs"); struct Color { red: u8, // u8: 0-255 green: u8, blue: u8 } let bg = Color {red: 255, green: 70, blue: 15}; println!("{}, {}, {}", bg.red, bg.green, bg.blue); struct Color2(u8, u8, u8); let mut bg2 = Color2(30, 70, 255); println!("{}, {}, {}", bg2.0, bg2.1, bg2.2); bg2.2 = 40; println!("{}, {}, {}", bg2.0, bg2.1, bg2.2); // pass by reference println!("-------pass by reference"); fn print_color(c: Color) { println!("Color - R:{} G:{} B:{}", c.red, c.green, c.blue); } fn print_color2(c: &Color2) { println!("Color - R:{} G:{} B:{}", c.0, c.1, c.2); } print_color(bg); /* print_color(bg); *impossible */ print_color2(&bg2); print_color2(&bg2); print_color2(&bg2); // it is possible to have multile function invocation due to it is called by reference // arrays println!("-------arrays"); let sample_array = [1, 3, 5, 7]; // either ways are valid let sample_array2: [i32; 4] = [6, 8, 15, 20]; println!("{}", sample_array[1]); for (i, el) in sample_array.iter().enumerate() { println!("{}-th element is {}", i, el); } for i in 0..sample_array2.len() { println!("{}", sample_array2[i]); } let array_of_2 = [2; 10]; // generating an array of 2's with length 10 for el in array_of_2.iter() { println!("{}", el); } // impl println!("-------impl"); struct Rectangle { width: u32, height: u32 } impl Rectangle { fn print_description(&self) { println!("Rectangle: {} x {}", self.width, self.height); } fn is_square(&self) -> bool{ return self.width == self.height; } fn area(&self) -> u32 { return self.width * self.height; } fn perimeter(&self) -> u32 { return (self.width + self.height) * 2; } } let rectangle: Rectangle = Rectangle {height: 30, width: 10, }; rectangle.print_description(); println!("The given rectangle is square? {}", rectangle.is_square()); println!("Area is {} and perimeter is {}", rectangle.area(), rectangle.perimeter()); // Strings println!("-------Strings"); let new_string = "Hello World"; // primitive string println!("{}", new_string); let mut my_string = String::from("How is it going today?"); println!("{}", my_string); println!("{}", my_string.len()); println!("{}", my_string.is_empty()); for token in my_string.split_whitespace() { // there is not in primitive string println!("{}-", token) } println!("Does contain {} 'today' in it? {}", my_string, my_string.contains("today")); my_string.push_str(new_string); println!("{}", my_string); /* println!("{}", my_string.push_str(new_string)) *impossible */ // Traits (like interface) println!("-------Traits"); struct Person { name: String, age: u32, } // impl Person { // fn to_string(&self) -> String { // return format!("My name is {} and my age is {}", self.name, self.age); // } // } impl ToString for Person { // trait "ToString" is implemented for "Person" fn to_string(&self) -> String { return format!("My name is {} and my age is {}", self.name, self.age); } } let hooman: Person = Person {age: 39, name: String::from("Hesamyan Hooman")}; println!("{}", hooman.to_string()); // Custom Traits (like interface) println!("-------Custom Traits"); trait HasVoiceBox { // speak fn speak(&self); // check if can speak fn can_speak(&self) -> bool; } impl HasVoiceBox for Person { fn speak(&self) { println!("Hello, my name is {} ", self.name); } fn can_speak(&self) -> bool { if self.age > 3 { return true; } return false; } } println!("I am {} and I can speak? {}", hooman.name, hooman.can_speak()); hooman.speak(); // Match Operator (like Switch) println!("-------Match Operator"); let number = 11; match number { 1 => println!("It is one!"), // case 1 2 => println!("it is two!"), // case 2 3 | 4 => println!("it is three or four!"), // case 3 | 4 5..=10 => println!("it is between 5 to 10"), // case 5 to 10 _ => println!("it is out of the range!"), // default } // read input from console println!("-------read input from console"); use std::io; let mut input = String::new(); println!("Hey mate! Say something:"); match io::stdin().read_line(&mut input) { Ok(_) => { println!("Success! You said: {}", input.to_ascii_uppercase()); }, Err(e) => println!("Oops! SOmething went wrong: {}", e) } // Hashmap println!("-------Hashmap"); use std::collections::HashMap; // define HashMap let mut marks = HashMap::new(); // add values marks.insert("Rust Programming", 96); marks.insert("Lua Programming", 100); marks.insert("C++ Programming", 90); marks.insert("Java Programming", 94); // prompt length of the HashMap println!("How many subjects are collected there? {}", marks.len()); // find a subject match marks.get("Rust Programming") { Some(mark) => println!("You have got {} for that.", mark), None => println!("You did not study this subject!"), } // remove an item marks.remove("Java Programming"); // loop through HashMap for (subject, mark) in &marks { println!("For {} you have got {}.", subject, mark); } // check for value println!("Did you study C#? {} ", marks.contains_key("C# Programming")); }
{ println!("{} is even", i); }
conditional_block
main.rs
fn main() { // defining a variable println!("-------defining a variable"); println!("Hello, Hooman!"); let mut x = 45; // all variables initially are immutable otherwise it is mentioned println!("The value of x is {}", x); x = 10; println!("The value of x is {}", x); let y: i64; y = 734; println!("{}", y); // if statement println!("-------if statement"); if y < 10 { println!("The {} is less!", y); } else { println!("The {} is big!", y); } // loop println!("-------loop"); let mut n = 0; loop { n += 7; if n % 5 == 0 || n % 2 == 0 { continue; } println!("The value of n is {}", n); if n > 100 { break; } } // for loop println!("-------for loop"); for i in 1..10 { println!("The number is {}", i); } let range = 10..20; for i in range { println!("element in range {}", i); } let family_name = vec!["Amir", "Hooman", "Aref", "Shahnaz", "Vihan", "Shima"]; for name in family_name.iter() { println!("Family person is {}", name); } for (index, name) in family_name.iter().enumerate() { println!("Family people {} is {}", index+1, name); } for name in family_name { // in this way we cannot use family_name next time println!("name is {}", name); } // enum println!("-------enum"); enum Direction { Up, Down, Left, Right } let player_direction1:Direction = Direction::Up; let player_direction2:Direction = Direction::Down; let player_direction3:Direction = Direction::Left; let player_direction4:Direction = Direction::Right; match player_direction1 { Direction::Up => println!("We are heading Up!"), Direction::Down => println!("We are heading Down!"), Direction::Left => println!("We are heading Left!"), Direction::Right => println!("We are heading Right!") } match player_direction2 { Direction::Up => println!("We are heading Up!"), Direction::Down => println!("We are heading Down!"), Direction::Left => println!("We are heading Left!"), Direction::Right => println!("We are heading Right!") } match player_direction3 { Direction::Up => println!("We are heading Up!"), Direction::Down => println!("We are heading Down!"), Direction::Left => println!("We are heading Left!"), Direction::Right => println!("We are heading Right!") } match player_direction4 { Direction::Up => println!("We are heading Up!"), Direction::Down => println!("We are heading Down!"), Direction::Left => println!("We are heading Left!"), Direction::Right => println!("We are heading Right!") } // constants println!("-------constants"); const MAXIMUM_NUMBER: u8 = 7; // must be uppercase for n in 1..MAXIMUM_NUMBER { println!("{}", n); } // tuples println!("-------tuples"); let tup1 = ("A", ("Hooman", "Hesamyan"), "C", 734, true); println!("{}", (tup1.1).1); // referencing a tuple inside the tuple println!("{}", tup1.0); println!("{}", tup1.2); println!("{}", tup1.3); println!("{}", tup1.4); let (x, y, z, u, v) = tup1; // destructuring the tuple println!("{}", x); println!("{}", y.0); // function println!("-------functions"); fn count_to(num: u32) { for i in 1..num { if is_even(i) { println!("{} is even", i); } else { println!("{} is odd", i); } } } count_to(7); fn is_even(num: u32) -> bool { return num % 2 == 0; } let number = 12; println!("is {} even? {}", number, is_even(number)); // reference println!("-------references"); let mut x = 7; println!("x is {}", x); { let x_ref_mut = &mut x; // mutable reference should enclosed inside a block *x_ref_mut += 7; println!("x reference is {}", x_ref_mut); } let x_ref = &x; println!("x is {}", x); println!("x reference is {}", x_ref); // structs println!("-------structs"); struct Color { red: u8, // u8: 0-255 green: u8, blue: u8 } let bg = Color {red: 255, green: 70, blue: 15}; println!("{}, {}, {}", bg.red, bg.green, bg.blue); struct Color2(u8, u8, u8); let mut bg2 = Color2(30, 70, 255); println!("{}, {}, {}", bg2.0, bg2.1, bg2.2); bg2.2 = 40; println!("{}, {}, {}", bg2.0, bg2.1, bg2.2); // pass by reference println!("-------pass by reference"); fn print_color(c: Color) { println!("Color - R:{} G:{} B:{}", c.red, c.green, c.blue); } fn print_color2(c: &Color2) { println!("Color - R:{} G:{} B:{}", c.0, c.1, c.2); } print_color(bg); /* print_color(bg); *impossible */ print_color2(&bg2); print_color2(&bg2); print_color2(&bg2); // it is possible to have multile function invocation due to it is called by reference // arrays println!("-------arrays"); let sample_array = [1, 3, 5, 7]; // either ways are valid let sample_array2: [i32; 4] = [6, 8, 15, 20]; println!("{}", sample_array[1]); for (i, el) in sample_array.iter().enumerate() { println!("{}-th element is {}", i, el); } for i in 0..sample_array2.len() { println!("{}", sample_array2[i]); } let array_of_2 = [2; 10]; // generating an array of 2's with length 10 for el in array_of_2.iter() { println!("{}", el); } // impl println!("-------impl"); struct Rectangle { width: u32, height: u32 } impl Rectangle { fn print_description(&self) { println!("Rectangle: {} x {}", self.width, self.height); } fn is_square(&self) -> bool{ return self.width == self.height; } fn area(&self) -> u32 { return self.width * self.height; } fn perimeter(&self) -> u32
} let rectangle: Rectangle = Rectangle {height: 30, width: 10, }; rectangle.print_description(); println!("The given rectangle is square? {}", rectangle.is_square()); println!("Area is {} and perimeter is {}", rectangle.area(), rectangle.perimeter()); // Strings println!("-------Strings"); let new_string = "Hello World"; // primitive string println!("{}", new_string); let mut my_string = String::from("How is it going today?"); println!("{}", my_string); println!("{}", my_string.len()); println!("{}", my_string.is_empty()); for token in my_string.split_whitespace() { // there is not in primitive string println!("{}-", token) } println!("Does contain {} 'today' in it? {}", my_string, my_string.contains("today")); my_string.push_str(new_string); println!("{}", my_string); /* println!("{}", my_string.push_str(new_string)) *impossible */ // Traits (like interface) println!("-------Traits"); struct Person { name: String, age: u32, } // impl Person { // fn to_string(&self) -> String { // return format!("My name is {} and my age is {}", self.name, self.age); // } // } impl ToString for Person { // trait "ToString" is implemented for "Person" fn to_string(&self) -> String { return format!("My name is {} and my age is {}", self.name, self.age); } } let hooman: Person = Person {age: 39, name: String::from("Hesamyan Hooman")}; println!("{}", hooman.to_string()); // Custom Traits (like interface) println!("-------Custom Traits"); trait HasVoiceBox { // speak fn speak(&self); // check if can speak fn can_speak(&self) -> bool; } impl HasVoiceBox for Person { fn speak(&self) { println!("Hello, my name is {} ", self.name); } fn can_speak(&self) -> bool { if self.age > 3 { return true; } return false; } } println!("I am {} and I can speak? {}", hooman.name, hooman.can_speak()); hooman.speak(); // Match Operator (like Switch) println!("-------Match Operator"); let number = 11; match number { 1 => println!("It is one!"), // case 1 2 => println!("it is two!"), // case 2 3 | 4 => println!("it is three or four!"), // case 3 | 4 5..=10 => println!("it is between 5 to 10"), // case 5 to 10 _ => println!("it is out of the range!"), // default } // read input from console println!("-------read input from console"); use std::io; let mut input = String::new(); println!("Hey mate! Say something:"); match io::stdin().read_line(&mut input) { Ok(_) => { println!("Success! You said: {}", input.to_ascii_uppercase()); }, Err(e) => println!("Oops! SOmething went wrong: {}", e) } // Hashmap println!("-------Hashmap"); use std::collections::HashMap; // define HashMap let mut marks = HashMap::new(); // add values marks.insert("Rust Programming", 96); marks.insert("Lua Programming", 100); marks.insert("C++ Programming", 90); marks.insert("Java Programming", 94); // prompt length of the HashMap println!("How many subjects are collected there? {}", marks.len()); // find a subject match marks.get("Rust Programming") { Some(mark) => println!("You have got {} for that.", mark), None => println!("You did not study this subject!"), } // remove an item marks.remove("Java Programming"); // loop through HashMap for (subject, mark) in &marks { println!("For {} you have got {}.", subject, mark); } // check for value println!("Did you study C#? {} ", marks.contains_key("C# Programming")); }
{ return (self.width + self.height) * 2; }
identifier_body
main.rs
fn main() { // defining a variable println!("-------defining a variable"); println!("Hello, Hooman!"); let mut x = 45; // all variables initially are immutable otherwise it is mentioned println!("The value of x is {}", x); x = 10; println!("The value of x is {}", x); let y: i64; y = 734; println!("{}", y); // if statement println!("-------if statement"); if y < 10 { println!("The {} is less!", y); } else { println!("The {} is big!", y); } // loop println!("-------loop"); let mut n = 0; loop { n += 7; if n % 5 == 0 || n % 2 == 0 { continue; } println!("The value of n is {}", n); if n > 100 { break; } } // for loop println!("-------for loop"); for i in 1..10 { println!("The number is {}", i); } let range = 10..20; for i in range { println!("element in range {}", i); } let family_name = vec!["Amir", "Hooman", "Aref", "Shahnaz", "Vihan", "Shima"]; for name in family_name.iter() { println!("Family person is {}", name); } for (index, name) in family_name.iter().enumerate() { println!("Family people {} is {}", index+1, name); } for name in family_name { // in this way we cannot use family_name next time println!("name is {}", name); } // enum println!("-------enum"); enum Direction { Up, Down, Left, Right } let player_direction1:Direction = Direction::Up; let player_direction2:Direction = Direction::Down; let player_direction3:Direction = Direction::Left; let player_direction4:Direction = Direction::Right; match player_direction1 { Direction::Up => println!("We are heading Up!"), Direction::Down => println!("We are heading Down!"), Direction::Left => println!("We are heading Left!"), Direction::Right => println!("We are heading Right!") } match player_direction2 { Direction::Up => println!("We are heading Up!"), Direction::Down => println!("We are heading Down!"), Direction::Left => println!("We are heading Left!"), Direction::Right => println!("We are heading Right!") } match player_direction3 { Direction::Up => println!("We are heading Up!"), Direction::Down => println!("We are heading Down!"), Direction::Left => println!("We are heading Left!"), Direction::Right => println!("We are heading Right!") } match player_direction4 { Direction::Up => println!("We are heading Up!"), Direction::Down => println!("We are heading Down!"), Direction::Left => println!("We are heading Left!"), Direction::Right => println!("We are heading Right!") } // constants println!("-------constants"); const MAXIMUM_NUMBER: u8 = 7; // must be uppercase for n in 1..MAXIMUM_NUMBER { println!("{}", n); } // tuples println!("-------tuples"); let tup1 = ("A", ("Hooman", "Hesamyan"), "C", 734, true); println!("{}", (tup1.1).1); // referencing a tuple inside the tuple println!("{}", tup1.0); println!("{}", tup1.2); println!("{}", tup1.3); println!("{}", tup1.4); let (x, y, z, u, v) = tup1; // destructuring the tuple println!("{}", x); println!("{}", y.0); // function println!("-------functions"); fn count_to(num: u32) { for i in 1..num { if is_even(i) { println!("{} is even", i); } else { println!("{} is odd", i); } } } count_to(7); fn is_even(num: u32) -> bool { return num % 2 == 0; } let number = 12; println!("is {} even? {}", number, is_even(number)); // reference println!("-------references"); let mut x = 7; println!("x is {}", x); { let x_ref_mut = &mut x; // mutable reference should enclosed inside a block *x_ref_mut += 7; println!("x reference is {}", x_ref_mut); } let x_ref = &x; println!("x is {}", x); println!("x reference is {}", x_ref); // structs println!("-------structs"); struct Color { red: u8, // u8: 0-255 green: u8, blue: u8 } let bg = Color {red: 255, green: 70, blue: 15}; println!("{}, {}, {}", bg.red, bg.green, bg.blue); struct Color2(u8, u8, u8); let mut bg2 = Color2(30, 70, 255); println!("{}, {}, {}", bg2.0, bg2.1, bg2.2); bg2.2 = 40; println!("{}, {}, {}", bg2.0, bg2.1, bg2.2); // pass by reference println!("-------pass by reference"); fn print_color(c: Color) { println!("Color - R:{} G:{} B:{}", c.red, c.green, c.blue); } fn print_color2(c: &Color2) { println!("Color - R:{} G:{} B:{}", c.0, c.1, c.2); } print_color(bg); /* print_color(bg); *impossible */ print_color2(&bg2); print_color2(&bg2); print_color2(&bg2); // it is possible to have multile function invocation due to it is called by reference // arrays println!("-------arrays"); let sample_array = [1, 3, 5, 7]; // either ways are valid let sample_array2: [i32; 4] = [6, 8, 15, 20]; println!("{}", sample_array[1]); for (i, el) in sample_array.iter().enumerate() { println!("{}-th element is {}", i, el); } for i in 0..sample_array2.len() { println!("{}", sample_array2[i]); } let array_of_2 = [2; 10]; // generating an array of 2's with length 10 for el in array_of_2.iter() { println!("{}", el); } // impl println!("-------impl"); struct Rectangle { width: u32, height: u32 } impl Rectangle { fn print_description(&self) { println!("Rectangle: {} x {}", self.width, self.height); } fn is_square(&self) -> bool{ return self.width == self.height; } fn area(&self) -> u32 { return self.width * self.height; } fn perimeter(&self) -> u32 { return (self.width + self.height) * 2; } } let rectangle: Rectangle = Rectangle {height: 30, width: 10, }; rectangle.print_description(); println!("The given rectangle is square? {}", rectangle.is_square()); println!("Area is {} and perimeter is {}", rectangle.area(), rectangle.perimeter()); // Strings println!("-------Strings"); let new_string = "Hello World"; // primitive string println!("{}", new_string); let mut my_string = String::from("How is it going today?"); println!("{}", my_string); println!("{}", my_string.len()); println!("{}", my_string.is_empty()); for token in my_string.split_whitespace() { // there is not in primitive string println!("{}-", token) } println!("Does contain {} 'today' in it? {}", my_string, my_string.contains("today")); my_string.push_str(new_string); println!("{}", my_string); /* println!("{}", my_string.push_str(new_string)) *impossible */ // Traits (like interface) println!("-------Traits"); struct Person { name: String, age: u32, } // impl Person { // fn to_string(&self) -> String { // return format!("My name is {} and my age is {}", self.name, self.age); // } // } impl ToString for Person { // trait "ToString" is implemented for "Person" fn to_string(&self) -> String { return format!("My name is {} and my age is {}", self.name, self.age); } } let hooman: Person = Person {age: 39, name: String::from("Hesamyan Hooman")}; println!("{}", hooman.to_string()); // Custom Traits (like interface) println!("-------Custom Traits"); trait HasVoiceBox { // speak fn speak(&self); // check if can speak fn can_speak(&self) -> bool; } impl HasVoiceBox for Person { fn speak(&self) { println!("Hello, my name is {} ", self.name); } fn can_speak(&self) -> bool { if self.age > 3 { return true; } return false; } } println!("I am {} and I can speak? {}", hooman.name, hooman.can_speak()); hooman.speak(); // Match Operator (like Switch) println!("-------Match Operator"); let number = 11; match number { 1 => println!("It is one!"), // case 1 2 => println!("it is two!"), // case 2 3 | 4 => println!("it is three or four!"), // case 3 | 4 5..=10 => println!("it is between 5 to 10"), // case 5 to 10 _ => println!("it is out of the range!"), // default } // read input from console println!("-------read input from console"); use std::io; let mut input = String::new(); println!("Hey mate! Say something:"); match io::stdin().read_line(&mut input) { Ok(_) => { println!("Success! You said: {}", input.to_ascii_uppercase()); }, Err(e) => println!("Oops! SOmething went wrong: {}", e) } // Hashmap println!("-------Hashmap"); use std::collections::HashMap; // define HashMap let mut marks = HashMap::new(); // add values marks.insert("Rust Programming", 96); marks.insert("Lua Programming", 100); marks.insert("C++ Programming", 90); marks.insert("Java Programming", 94); // prompt length of the HashMap println!("How many subjects are collected there? {}", marks.len()); // find a subject match marks.get("Rust Programming") { Some(mark) => println!("You have got {} for that.", mark), None => println!("You did not study this subject!"), } // remove an item marks.remove("Java Programming"); // loop through HashMap for (subject, mark) in &marks { println!("For {} you have got {}.", subject, mark); } // check for value println!("Did you study C#? {} ", marks.contains_key("C# Programming"));
}
random_line_split
timer.rs
//! The nRF51822 timer system operates off of the high frequency clock //! (HFCLK) and provides three timers from the clock. Timer0 is tied //! to the radio through some hard-coded peripheral linkages (e.g., there //! are dedicated PPI connections between Timer0's compare events and //! radio tasks, its capture tasks and radio events). //! //! This implementation provides a full-fledged Timer interface to //! timers 0 and 2, and exposes Timer1 as an HIL Alarm, for a Tock //! timer system. It may be that the Tock timer system should be ultimately //! placed on top of the RTC (from the low frequency clock). It's currently //! implemented this way as a demonstration that it can be and because //! the full RTC/clock interface hasn't been finalized yet. //! //! This approach should be rewritten, such that the timer system uses //! the RTC from the low frequency clock (lower power) and the scheduler //! uses the high frequency clock. //! //! Author: Philip Levis <[email protected]> //! Date: August 18, 2016 use chip; use core::cell::Cell; use core::mem; use kernel::common::VolatileCell; use kernel::hil; use nvic; use peripheral_interrupts::NvicIdx; #[repr(C, packed)] struct Registers { pub task_start: VolatileCell<u32>, pub task_stop: VolatileCell<u32>, pub task_count: VolatileCell<u32>, pub task_clear: VolatileCell<u32>, pub task_shutdown: VolatileCell<u32>, _reserved0: [VolatileCell<u32>; 11], pub task_capture: [VolatileCell<u32>; 4], // 0x40 _reserved1: [VolatileCell<u32>; 60], // 0x140 pub event_compare: [VolatileCell<u32>; 4], _reserved2: [VolatileCell<u32>; 44], // 0x150 pub shorts: VolatileCell<u32>, // 0x200 _reserved3: [VolatileCell<u32>; 64], // 0x204 pub intenset: VolatileCell<u32>, // 0x304 pub intenclr: VolatileCell<u32>, // 0x308 _reserved4: [VolatileCell<u32>; 126], // 0x30C pub mode: VolatileCell<u32>, // 0x504 pub bitmode: VolatileCell<u32>, // 0x508 _reserved5: VolatileCell<u32>, pub prescaler: VolatileCell<u32>, // 0x510 _reserved6: [VolatileCell<u32>; 11], // 0x514 pub cc: [VolatileCell<u32>; 4], // 0x540 } const SIZE: usize = 0x1000; const TIMER_BASE: usize = 0x40008000; #[derive(Copy,Clone)] pub enum Location { TIMER0, TIMER1, TIMER2, } pub static mut TIMER0: Timer = Timer { which: Location::TIMER0, nvic: NvicIdx::TIMER0, client: Cell::new(None), }; pub static mut ALARM1: TimerAlarm = TimerAlarm { which: Location::TIMER1, nvic: NvicIdx::TIMER1, client: Cell::new(None), }; pub static mut TIMER2: Timer = Timer { which: Location::TIMER2, nvic: NvicIdx::TIMER2, client: Cell::new(None), }; #[allow(non_snake_case)] fn TIMER(location: Location) -> &'static Registers { let ptr = TIMER_BASE + (location as usize) * SIZE; unsafe { mem::transmute(ptr) } } pub trait CompareClient { /// Passes a bitmask of which of the 4 compares/captures fired (0x0-0xf). fn compare(&self, bitmask: u8); } pub struct Timer { which: Location, nvic: NvicIdx, client: Cell<Option<&'static CompareClient>>, } impl Timer { fn timer(&self) -> &'static Registers { TIMER(self.which) } pub const fn new(location: Location, nvic: NvicIdx) -> Timer { Timer { which: location, nvic: nvic, client: Cell::new(None), } } pub fn set_client(&self, client: &'static CompareClient) { self.client.set(Some(client)); } pub fn start(&self) { self.timer().task_start.set(1); } // Stops the timer and keeps the value pub fn stop(&self) { self.timer().task_stop.set(1); } // Stops the timer and clears the value pub fn shutdown(&self) { self.timer().task_shutdown.set(1); } // Clear the value pub fn clear(&self) { self.timer().task_clear.set(1); } /// Capture the current timer value into the CC register /// specified by which, and return the value. pub fn capture(&self, which: u8) -> u32 { match which { 0 => { self.timer().task_capture[0].set(1); self.timer().cc[0].get() } 1 => { self.timer().task_capture[1].set(1); self.timer().cc[1].get() } 2 => { self.timer().task_capture[2].set(1); self.timer().cc[2].get() } _ => { self.timer().task_capture[3].set(1); self.timer().cc[3].get() } } } /// Capture the current value to the CC register specified by /// which and do not return the value. pub fn
(&self, which: u8) { let _ = self.capture(which); } /// Shortcuts can automatically stop or clear the timer on a particular /// compare event; refer to section 18.3 of the nRF reference manual /// for details. Implementation currently provides shortcuts as the /// raw bitmask. pub fn get_shortcuts(&self) -> u32 { self.timer().shorts.get() } pub fn set_shortcuts(&self, shortcut: u32) { self.timer().shorts.set(shortcut); } pub fn get_cc0(&self) -> u32 { self.timer().cc[0].get() } pub fn set_cc0(&self, val: u32) { self.timer().cc[0].set(val); } pub fn get_cc1(&self) -> u32 { self.timer().cc[1].get() } pub fn set_cc1(&self, val: u32) { self.timer().cc[0].set(val); } pub fn get_cc2(&self) -> u32 { self.timer().cc[2].get() } pub fn set_cc2(&self, val: u32) { self.timer().cc[0].set(val); } pub fn get_cc3(&self) -> u32 { self.timer().cc[3].get() } pub fn set_cc3(&self, val: u32) { self.timer().cc[0].set(val); } pub fn enable_interrupts(&self, interrupts: u32) { self.timer().intenset.set(interrupts << 16); } pub fn disable_interrupts(&self, interrupts: u32) { self.timer().intenclr.set(interrupts << 16); } pub fn enable_nvic(&self) { nvic::enable(self.nvic); } pub fn disable_nvic(&self) { nvic::disable(self.nvic); } pub fn set_prescaler(&self, val: u8) { // Only bottom 4 bits are valid, so mask them // nRF51822 reference manual, page 102 self.timer().prescaler.set((val & 0xf) as u32); } pub fn get_prescaler(&self) -> u8 { self.timer().prescaler.get() as u8 } /// When an interrupt occurs, check if any of the 4 compares have /// created an event, and if so, add it to the bitmask of triggered /// events that is passed to the client. pub fn handle_interrupt(&self) { nvic::clear_pending(self.nvic); self.client.get().map(|client| { let mut val = 0; // For each of 4 possible compare events, if it's happened, // clear it and store its bit in val to pass in callback. for i in 0..4 { if self.timer().event_compare[i].get()!= 0 { val = val | 1 << i; self.timer().event_compare[i].set(0); self.disable_interrupts(1 << (i + 16)); } } client.compare(val as u8); }); } } pub struct TimerAlarm { which: Location, nvic: NvicIdx, client: Cell<Option<&'static hil::time::Client>>, } // CC0 is used for capture // CC1 is used for compare/interrupts const ALARM_CAPTURE: usize = 0; const ALARM_COMPARE: usize = 1; const ALARM_INTERRUPT_BIT: u32 = 1 << (16 + ALARM_COMPARE); impl TimerAlarm { fn timer(&self) -> &'static Registers { TIMER(self.which) } pub const fn new(location: Location, nvic: NvicIdx) -> TimerAlarm { TimerAlarm { which: location, nvic: nvic, client: Cell::new(None), } } pub fn clear(&self) { self.clear_alarm(); self.timer().task_clear.set(1); } pub fn clear_alarm(&self) { self.timer().event_compare[ALARM_COMPARE].set(0); self.disable_interrupts(); nvic::clear_pending(self.nvic); } pub fn set_client(&self, client: &'static hil::time::Client) { self.client.set(Some(client)); } pub fn start(&self) { // Make timer 32 bits wide self.timer().bitmode.set(3); // Clock is 16MHz, so scale down by 2^10 to 16KHz self.timer().prescaler.set(10); self.timer().task_start.set(1); } pub fn stop(&self) { self.timer().task_stop.set(1); } #[inline(never)] pub fn handle_interrupt(&self) { self.clear_alarm(); self.client.get().map(|client| { client.fired(); }); } // Enable and disable interrupts use the bottom 4 bits // for the 4 compare interrupts. These functions shift // those bits to the correct place in the register. pub fn enable_interrupts(&self) { self.timer().intenset.set(ALARM_INTERRUPT_BIT); } pub fn disable_interrupts(&self) { self.timer().intenclr.set(ALARM_INTERRUPT_BIT); } pub fn interrupts_enabled(&self) -> bool { self.timer().intenset.get() == (ALARM_INTERRUPT_BIT) } pub fn enable_nvic(&self) { nvic::enable(self.nvic); } pub fn disable_nvic(&self) { nvic::disable(self.nvic); } pub fn value(&self) -> u32 { self.timer().task_capture[ALARM_CAPTURE].set(1); self.timer().cc[ALARM_CAPTURE].get() } } impl hil::time::Time for TimerAlarm { fn disable(&self) { self.disable_interrupts(); } fn is_armed(&self) -> bool { self.interrupts_enabled() } } impl hil::time::Alarm for TimerAlarm { type Frequency = hil::time::Freq16KHz; fn now(&self) -> u32 { self.value() } fn set_alarm(&self, tics: u32) { self.disable_interrupts(); self.timer().cc[ALARM_COMPARE].set(tics); self.clear_alarm(); self.enable_interrupts(); } fn get_alarm(&self) -> u32 { self.timer().cc[ALARM_COMPARE].get() } } #[no_mangle] #[allow(non_snake_case)] pub unsafe extern "C" fn TIMER0_Handler() { use kernel::common::Queue; nvic::disable(NvicIdx::TIMER0); chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER0); } #[no_mangle] #[allow(non_snake_case)] pub unsafe extern "C" fn TIMER1_Handler() { use kernel::common::Queue; nvic::disable(NvicIdx::TIMER1); chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER1); } #[no_mangle] #[allow(non_snake_case)] pub unsafe extern "C" fn TIMER2_Handler() { use kernel::common::Queue; nvic::disable(NvicIdx::TIMER2); chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER2); }
capture_to
identifier_name
timer.rs
//! The nRF51822 timer system operates off of the high frequency clock //! (HFCLK) and provides three timers from the clock. Timer0 is tied //! to the radio through some hard-coded peripheral linkages (e.g., there //! are dedicated PPI connections between Timer0's compare events and //! radio tasks, its capture tasks and radio events). //! //! This implementation provides a full-fledged Timer interface to //! timers 0 and 2, and exposes Timer1 as an HIL Alarm, for a Tock //! timer system. It may be that the Tock timer system should be ultimately //! placed on top of the RTC (from the low frequency clock). It's currently //! implemented this way as a demonstration that it can be and because //! the full RTC/clock interface hasn't been finalized yet. //! //! This approach should be rewritten, such that the timer system uses //! the RTC from the low frequency clock (lower power) and the scheduler //! uses the high frequency clock. //! //! Author: Philip Levis <[email protected]> //! Date: August 18, 2016 use chip; use core::cell::Cell; use core::mem; use kernel::common::VolatileCell; use kernel::hil; use nvic; use peripheral_interrupts::NvicIdx; #[repr(C, packed)] struct Registers { pub task_start: VolatileCell<u32>, pub task_stop: VolatileCell<u32>, pub task_count: VolatileCell<u32>, pub task_clear: VolatileCell<u32>, pub task_shutdown: VolatileCell<u32>, _reserved0: [VolatileCell<u32>; 11], pub task_capture: [VolatileCell<u32>; 4], // 0x40 _reserved1: [VolatileCell<u32>; 60], // 0x140 pub event_compare: [VolatileCell<u32>; 4], _reserved2: [VolatileCell<u32>; 44], // 0x150 pub shorts: VolatileCell<u32>, // 0x200 _reserved3: [VolatileCell<u32>; 64], // 0x204 pub intenset: VolatileCell<u32>, // 0x304 pub intenclr: VolatileCell<u32>, // 0x308 _reserved4: [VolatileCell<u32>; 126], // 0x30C pub mode: VolatileCell<u32>, // 0x504 pub bitmode: VolatileCell<u32>, // 0x508 _reserved5: VolatileCell<u32>, pub prescaler: VolatileCell<u32>, // 0x510 _reserved6: [VolatileCell<u32>; 11], // 0x514 pub cc: [VolatileCell<u32>; 4], // 0x540 } const SIZE: usize = 0x1000; const TIMER_BASE: usize = 0x40008000; #[derive(Copy,Clone)] pub enum Location { TIMER0, TIMER1, TIMER2, } pub static mut TIMER0: Timer = Timer { which: Location::TIMER0, nvic: NvicIdx::TIMER0, client: Cell::new(None), }; pub static mut ALARM1: TimerAlarm = TimerAlarm { which: Location::TIMER1, nvic: NvicIdx::TIMER1, client: Cell::new(None), }; pub static mut TIMER2: Timer = Timer { which: Location::TIMER2, nvic: NvicIdx::TIMER2, client: Cell::new(None), }; #[allow(non_snake_case)] fn TIMER(location: Location) -> &'static Registers { let ptr = TIMER_BASE + (location as usize) * SIZE; unsafe { mem::transmute(ptr) } } pub trait CompareClient { /// Passes a bitmask of which of the 4 compares/captures fired (0x0-0xf). fn compare(&self, bitmask: u8); } pub struct Timer { which: Location, nvic: NvicIdx, client: Cell<Option<&'static CompareClient>>, } impl Timer { fn timer(&self) -> &'static Registers { TIMER(self.which) } pub const fn new(location: Location, nvic: NvicIdx) -> Timer { Timer { which: location, nvic: nvic, client: Cell::new(None), } } pub fn set_client(&self, client: &'static CompareClient) { self.client.set(Some(client)); } pub fn start(&self) { self.timer().task_start.set(1); } // Stops the timer and keeps the value pub fn stop(&self) { self.timer().task_stop.set(1); } // Stops the timer and clears the value pub fn shutdown(&self) { self.timer().task_shutdown.set(1); } // Clear the value pub fn clear(&self) { self.timer().task_clear.set(1); } /// Capture the current timer value into the CC register /// specified by which, and return the value. pub fn capture(&self, which: u8) -> u32 { match which { 0 => { self.timer().task_capture[0].set(1); self.timer().cc[0].get() } 1 => { self.timer().task_capture[1].set(1); self.timer().cc[1].get() } 2 =>
_ => { self.timer().task_capture[3].set(1); self.timer().cc[3].get() } } } /// Capture the current value to the CC register specified by /// which and do not return the value. pub fn capture_to(&self, which: u8) { let _ = self.capture(which); } /// Shortcuts can automatically stop or clear the timer on a particular /// compare event; refer to section 18.3 of the nRF reference manual /// for details. Implementation currently provides shortcuts as the /// raw bitmask. pub fn get_shortcuts(&self) -> u32 { self.timer().shorts.get() } pub fn set_shortcuts(&self, shortcut: u32) { self.timer().shorts.set(shortcut); } pub fn get_cc0(&self) -> u32 { self.timer().cc[0].get() } pub fn set_cc0(&self, val: u32) { self.timer().cc[0].set(val); } pub fn get_cc1(&self) -> u32 { self.timer().cc[1].get() } pub fn set_cc1(&self, val: u32) { self.timer().cc[0].set(val); } pub fn get_cc2(&self) -> u32 { self.timer().cc[2].get() } pub fn set_cc2(&self, val: u32) { self.timer().cc[0].set(val); } pub fn get_cc3(&self) -> u32 { self.timer().cc[3].get() } pub fn set_cc3(&self, val: u32) { self.timer().cc[0].set(val); } pub fn enable_interrupts(&self, interrupts: u32) { self.timer().intenset.set(interrupts << 16); } pub fn disable_interrupts(&self, interrupts: u32) { self.timer().intenclr.set(interrupts << 16); } pub fn enable_nvic(&self) { nvic::enable(self.nvic); } pub fn disable_nvic(&self) { nvic::disable(self.nvic); } pub fn set_prescaler(&self, val: u8) { // Only bottom 4 bits are valid, so mask them // nRF51822 reference manual, page 102 self.timer().prescaler.set((val & 0xf) as u32); } pub fn get_prescaler(&self) -> u8 { self.timer().prescaler.get() as u8 } /// When an interrupt occurs, check if any of the 4 compares have /// created an event, and if so, add it to the bitmask of triggered /// events that is passed to the client. pub fn handle_interrupt(&self) { nvic::clear_pending(self.nvic); self.client.get().map(|client| { let mut val = 0; // For each of 4 possible compare events, if it's happened, // clear it and store its bit in val to pass in callback. for i in 0..4 { if self.timer().event_compare[i].get()!= 0 { val = val | 1 << i; self.timer().event_compare[i].set(0); self.disable_interrupts(1 << (i + 16)); } } client.compare(val as u8); }); } } pub struct TimerAlarm { which: Location, nvic: NvicIdx, client: Cell<Option<&'static hil::time::Client>>, } // CC0 is used for capture // CC1 is used for compare/interrupts const ALARM_CAPTURE: usize = 0; const ALARM_COMPARE: usize = 1; const ALARM_INTERRUPT_BIT: u32 = 1 << (16 + ALARM_COMPARE); impl TimerAlarm { fn timer(&self) -> &'static Registers { TIMER(self.which) } pub const fn new(location: Location, nvic: NvicIdx) -> TimerAlarm { TimerAlarm { which: location, nvic: nvic, client: Cell::new(None), } } pub fn clear(&self) { self.clear_alarm(); self.timer().task_clear.set(1); } pub fn clear_alarm(&self) { self.timer().event_compare[ALARM_COMPARE].set(0); self.disable_interrupts(); nvic::clear_pending(self.nvic); } pub fn set_client(&self, client: &'static hil::time::Client) { self.client.set(Some(client)); } pub fn start(&self) { // Make timer 32 bits wide self.timer().bitmode.set(3); // Clock is 16MHz, so scale down by 2^10 to 16KHz self.timer().prescaler.set(10); self.timer().task_start.set(1); } pub fn stop(&self) { self.timer().task_stop.set(1); } #[inline(never)] pub fn handle_interrupt(&self) { self.clear_alarm(); self.client.get().map(|client| { client.fired(); }); } // Enable and disable interrupts use the bottom 4 bits // for the 4 compare interrupts. These functions shift // those bits to the correct place in the register. pub fn enable_interrupts(&self) { self.timer().intenset.set(ALARM_INTERRUPT_BIT); } pub fn disable_interrupts(&self) { self.timer().intenclr.set(ALARM_INTERRUPT_BIT); } pub fn interrupts_enabled(&self) -> bool { self.timer().intenset.get() == (ALARM_INTERRUPT_BIT) } pub fn enable_nvic(&self) { nvic::enable(self.nvic); } pub fn disable_nvic(&self) { nvic::disable(self.nvic); } pub fn value(&self) -> u32 { self.timer().task_capture[ALARM_CAPTURE].set(1); self.timer().cc[ALARM_CAPTURE].get() } } impl hil::time::Time for TimerAlarm { fn disable(&self) { self.disable_interrupts(); } fn is_armed(&self) -> bool { self.interrupts_enabled() } } impl hil::time::Alarm for TimerAlarm { type Frequency = hil::time::Freq16KHz; fn now(&self) -> u32 { self.value() } fn set_alarm(&self, tics: u32) { self.disable_interrupts(); self.timer().cc[ALARM_COMPARE].set(tics); self.clear_alarm(); self.enable_interrupts(); } fn get_alarm(&self) -> u32 { self.timer().cc[ALARM_COMPARE].get() } } #[no_mangle] #[allow(non_snake_case)] pub unsafe extern "C" fn TIMER0_Handler() { use kernel::common::Queue; nvic::disable(NvicIdx::TIMER0); chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER0); } #[no_mangle] #[allow(non_snake_case)] pub unsafe extern "C" fn TIMER1_Handler() { use kernel::common::Queue; nvic::disable(NvicIdx::TIMER1); chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER1); } #[no_mangle] #[allow(non_snake_case)] pub unsafe extern "C" fn TIMER2_Handler() { use kernel::common::Queue; nvic::disable(NvicIdx::TIMER2); chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER2); }
{ self.timer().task_capture[2].set(1); self.timer().cc[2].get() }
conditional_block
timer.rs
//! The nRF51822 timer system operates off of the high frequency clock //! (HFCLK) and provides three timers from the clock. Timer0 is tied //! to the radio through some hard-coded peripheral linkages (e.g., there //! are dedicated PPI connections between Timer0's compare events and //! radio tasks, its capture tasks and radio events). //! //! This implementation provides a full-fledged Timer interface to //! timers 0 and 2, and exposes Timer1 as an HIL Alarm, for a Tock //! timer system. It may be that the Tock timer system should be ultimately //! placed on top of the RTC (from the low frequency clock). It's currently //! implemented this way as a demonstration that it can be and because //! the full RTC/clock interface hasn't been finalized yet. //! //! This approach should be rewritten, such that the timer system uses //! the RTC from the low frequency clock (lower power) and the scheduler //! uses the high frequency clock. //! //! Author: Philip Levis <[email protected]> //! Date: August 18, 2016 use chip; use core::cell::Cell; use core::mem; use kernel::common::VolatileCell; use kernel::hil; use nvic; use peripheral_interrupts::NvicIdx; #[repr(C, packed)] struct Registers { pub task_start: VolatileCell<u32>, pub task_stop: VolatileCell<u32>, pub task_count: VolatileCell<u32>, pub task_clear: VolatileCell<u32>, pub task_shutdown: VolatileCell<u32>, _reserved0: [VolatileCell<u32>; 11], pub task_capture: [VolatileCell<u32>; 4], // 0x40 _reserved1: [VolatileCell<u32>; 60], // 0x140 pub event_compare: [VolatileCell<u32>; 4], _reserved2: [VolatileCell<u32>; 44], // 0x150 pub shorts: VolatileCell<u32>, // 0x200 _reserved3: [VolatileCell<u32>; 64], // 0x204 pub intenset: VolatileCell<u32>, // 0x304 pub intenclr: VolatileCell<u32>, // 0x308 _reserved4: [VolatileCell<u32>; 126], // 0x30C pub mode: VolatileCell<u32>, // 0x504 pub bitmode: VolatileCell<u32>, // 0x508 _reserved5: VolatileCell<u32>, pub prescaler: VolatileCell<u32>, // 0x510 _reserved6: [VolatileCell<u32>; 11], // 0x514 pub cc: [VolatileCell<u32>; 4], // 0x540 } const SIZE: usize = 0x1000; const TIMER_BASE: usize = 0x40008000; #[derive(Copy,Clone)] pub enum Location { TIMER0, TIMER1, TIMER2, } pub static mut TIMER0: Timer = Timer { which: Location::TIMER0, nvic: NvicIdx::TIMER0, client: Cell::new(None), }; pub static mut ALARM1: TimerAlarm = TimerAlarm { which: Location::TIMER1, nvic: NvicIdx::TIMER1, client: Cell::new(None), }; pub static mut TIMER2: Timer = Timer { which: Location::TIMER2, nvic: NvicIdx::TIMER2, client: Cell::new(None), }; #[allow(non_snake_case)] fn TIMER(location: Location) -> &'static Registers { let ptr = TIMER_BASE + (location as usize) * SIZE; unsafe { mem::transmute(ptr) } } pub trait CompareClient { /// Passes a bitmask of which of the 4 compares/captures fired (0x0-0xf). fn compare(&self, bitmask: u8); } pub struct Timer { which: Location, nvic: NvicIdx, client: Cell<Option<&'static CompareClient>>, } impl Timer { fn timer(&self) -> &'static Registers { TIMER(self.which) } pub const fn new(location: Location, nvic: NvicIdx) -> Timer { Timer { which: location, nvic: nvic, client: Cell::new(None), } } pub fn set_client(&self, client: &'static CompareClient) { self.client.set(Some(client)); } pub fn start(&self) { self.timer().task_start.set(1); } // Stops the timer and keeps the value pub fn stop(&self) { self.timer().task_stop.set(1); } // Stops the timer and clears the value pub fn shutdown(&self) { self.timer().task_shutdown.set(1); } // Clear the value pub fn clear(&self) { self.timer().task_clear.set(1); } /// Capture the current timer value into the CC register /// specified by which, and return the value. pub fn capture(&self, which: u8) -> u32 { match which { 0 => { self.timer().task_capture[0].set(1); self.timer().cc[0].get() } 1 => { self.timer().task_capture[1].set(1); self.timer().cc[1].get() } 2 => { self.timer().task_capture[2].set(1); self.timer().cc[2].get() } _ => { self.timer().task_capture[3].set(1); self.timer().cc[3].get() } } } /// Capture the current value to the CC register specified by /// which and do not return the value. pub fn capture_to(&self, which: u8) { let _ = self.capture(which); } /// Shortcuts can automatically stop or clear the timer on a particular /// compare event; refer to section 18.3 of the nRF reference manual /// for details. Implementation currently provides shortcuts as the /// raw bitmask. pub fn get_shortcuts(&self) -> u32
pub fn set_shortcuts(&self, shortcut: u32) { self.timer().shorts.set(shortcut); } pub fn get_cc0(&self) -> u32 { self.timer().cc[0].get() } pub fn set_cc0(&self, val: u32) { self.timer().cc[0].set(val); } pub fn get_cc1(&self) -> u32 { self.timer().cc[1].get() } pub fn set_cc1(&self, val: u32) { self.timer().cc[0].set(val); } pub fn get_cc2(&self) -> u32 { self.timer().cc[2].get() } pub fn set_cc2(&self, val: u32) { self.timer().cc[0].set(val); } pub fn get_cc3(&self) -> u32 { self.timer().cc[3].get() } pub fn set_cc3(&self, val: u32) { self.timer().cc[0].set(val); } pub fn enable_interrupts(&self, interrupts: u32) { self.timer().intenset.set(interrupts << 16); } pub fn disable_interrupts(&self, interrupts: u32) { self.timer().intenclr.set(interrupts << 16); } pub fn enable_nvic(&self) { nvic::enable(self.nvic); } pub fn disable_nvic(&self) { nvic::disable(self.nvic); } pub fn set_prescaler(&self, val: u8) { // Only bottom 4 bits are valid, so mask them // nRF51822 reference manual, page 102 self.timer().prescaler.set((val & 0xf) as u32); } pub fn get_prescaler(&self) -> u8 { self.timer().prescaler.get() as u8 } /// When an interrupt occurs, check if any of the 4 compares have /// created an event, and if so, add it to the bitmask of triggered /// events that is passed to the client. pub fn handle_interrupt(&self) { nvic::clear_pending(self.nvic); self.client.get().map(|client| { let mut val = 0; // For each of 4 possible compare events, if it's happened, // clear it and store its bit in val to pass in callback. for i in 0..4 { if self.timer().event_compare[i].get()!= 0 { val = val | 1 << i; self.timer().event_compare[i].set(0); self.disable_interrupts(1 << (i + 16)); } } client.compare(val as u8); }); } } pub struct TimerAlarm { which: Location, nvic: NvicIdx, client: Cell<Option<&'static hil::time::Client>>, } // CC0 is used for capture // CC1 is used for compare/interrupts const ALARM_CAPTURE: usize = 0; const ALARM_COMPARE: usize = 1; const ALARM_INTERRUPT_BIT: u32 = 1 << (16 + ALARM_COMPARE); impl TimerAlarm { fn timer(&self) -> &'static Registers { TIMER(self.which) } pub const fn new(location: Location, nvic: NvicIdx) -> TimerAlarm { TimerAlarm { which: location, nvic: nvic, client: Cell::new(None), } } pub fn clear(&self) { self.clear_alarm(); self.timer().task_clear.set(1); } pub fn clear_alarm(&self) { self.timer().event_compare[ALARM_COMPARE].set(0); self.disable_interrupts(); nvic::clear_pending(self.nvic); } pub fn set_client(&self, client: &'static hil::time::Client) { self.client.set(Some(client)); } pub fn start(&self) { // Make timer 32 bits wide self.timer().bitmode.set(3); // Clock is 16MHz, so scale down by 2^10 to 16KHz self.timer().prescaler.set(10); self.timer().task_start.set(1); } pub fn stop(&self) { self.timer().task_stop.set(1); } #[inline(never)] pub fn handle_interrupt(&self) { self.clear_alarm(); self.client.get().map(|client| { client.fired(); }); } // Enable and disable interrupts use the bottom 4 bits // for the 4 compare interrupts. These functions shift // those bits to the correct place in the register. pub fn enable_interrupts(&self) { self.timer().intenset.set(ALARM_INTERRUPT_BIT); } pub fn disable_interrupts(&self) { self.timer().intenclr.set(ALARM_INTERRUPT_BIT); } pub fn interrupts_enabled(&self) -> bool { self.timer().intenset.get() == (ALARM_INTERRUPT_BIT) } pub fn enable_nvic(&self) { nvic::enable(self.nvic); } pub fn disable_nvic(&self) { nvic::disable(self.nvic); } pub fn value(&self) -> u32 { self.timer().task_capture[ALARM_CAPTURE].set(1); self.timer().cc[ALARM_CAPTURE].get() } } impl hil::time::Time for TimerAlarm { fn disable(&self) { self.disable_interrupts(); } fn is_armed(&self) -> bool { self.interrupts_enabled() } } impl hil::time::Alarm for TimerAlarm { type Frequency = hil::time::Freq16KHz; fn now(&self) -> u32 { self.value() } fn set_alarm(&self, tics: u32) { self.disable_interrupts(); self.timer().cc[ALARM_COMPARE].set(tics); self.clear_alarm(); self.enable_interrupts(); } fn get_alarm(&self) -> u32 { self.timer().cc[ALARM_COMPARE].get() } } #[no_mangle] #[allow(non_snake_case)] pub unsafe extern "C" fn TIMER0_Handler() { use kernel::common::Queue; nvic::disable(NvicIdx::TIMER0); chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER0); } #[no_mangle] #[allow(non_snake_case)] pub unsafe extern "C" fn TIMER1_Handler() { use kernel::common::Queue; nvic::disable(NvicIdx::TIMER1); chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER1); } #[no_mangle] #[allow(non_snake_case)] pub unsafe extern "C" fn TIMER2_Handler() { use kernel::common::Queue; nvic::disable(NvicIdx::TIMER2); chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER2); }
{ self.timer().shorts.get() }
identifier_body
timer.rs
//! The nRF51822 timer system operates off of the high frequency clock //! (HFCLK) and provides three timers from the clock. Timer0 is tied //! to the radio through some hard-coded peripheral linkages (e.g., there //! are dedicated PPI connections between Timer0's compare events and //! radio tasks, its capture tasks and radio events). //!
//! This implementation provides a full-fledged Timer interface to //! timers 0 and 2, and exposes Timer1 as an HIL Alarm, for a Tock //! timer system. It may be that the Tock timer system should be ultimately //! placed on top of the RTC (from the low frequency clock). It's currently //! implemented this way as a demonstration that it can be and because //! the full RTC/clock interface hasn't been finalized yet. //! //! This approach should be rewritten, such that the timer system uses //! the RTC from the low frequency clock (lower power) and the scheduler //! uses the high frequency clock. //! //! Author: Philip Levis <[email protected]> //! Date: August 18, 2016 use chip; use core::cell::Cell; use core::mem; use kernel::common::VolatileCell; use kernel::hil; use nvic; use peripheral_interrupts::NvicIdx; #[repr(C, packed)] struct Registers { pub task_start: VolatileCell<u32>, pub task_stop: VolatileCell<u32>, pub task_count: VolatileCell<u32>, pub task_clear: VolatileCell<u32>, pub task_shutdown: VolatileCell<u32>, _reserved0: [VolatileCell<u32>; 11], pub task_capture: [VolatileCell<u32>; 4], // 0x40 _reserved1: [VolatileCell<u32>; 60], // 0x140 pub event_compare: [VolatileCell<u32>; 4], _reserved2: [VolatileCell<u32>; 44], // 0x150 pub shorts: VolatileCell<u32>, // 0x200 _reserved3: [VolatileCell<u32>; 64], // 0x204 pub intenset: VolatileCell<u32>, // 0x304 pub intenclr: VolatileCell<u32>, // 0x308 _reserved4: [VolatileCell<u32>; 126], // 0x30C pub mode: VolatileCell<u32>, // 0x504 pub bitmode: VolatileCell<u32>, // 0x508 _reserved5: VolatileCell<u32>, pub prescaler: VolatileCell<u32>, // 0x510 _reserved6: [VolatileCell<u32>; 11], // 0x514 pub cc: [VolatileCell<u32>; 4], // 0x540 } const SIZE: usize = 0x1000; const TIMER_BASE: usize = 0x40008000; #[derive(Copy,Clone)] pub enum Location { TIMER0, TIMER1, TIMER2, } pub static mut TIMER0: Timer = Timer { which: Location::TIMER0, nvic: NvicIdx::TIMER0, client: Cell::new(None), }; pub static mut ALARM1: TimerAlarm = TimerAlarm { which: Location::TIMER1, nvic: NvicIdx::TIMER1, client: Cell::new(None), }; pub static mut TIMER2: Timer = Timer { which: Location::TIMER2, nvic: NvicIdx::TIMER2, client: Cell::new(None), }; #[allow(non_snake_case)] fn TIMER(location: Location) -> &'static Registers { let ptr = TIMER_BASE + (location as usize) * SIZE; unsafe { mem::transmute(ptr) } } pub trait CompareClient { /// Passes a bitmask of which of the 4 compares/captures fired (0x0-0xf). fn compare(&self, bitmask: u8); } pub struct Timer { which: Location, nvic: NvicIdx, client: Cell<Option<&'static CompareClient>>, } impl Timer { fn timer(&self) -> &'static Registers { TIMER(self.which) } pub const fn new(location: Location, nvic: NvicIdx) -> Timer { Timer { which: location, nvic: nvic, client: Cell::new(None), } } pub fn set_client(&self, client: &'static CompareClient) { self.client.set(Some(client)); } pub fn start(&self) { self.timer().task_start.set(1); } // Stops the timer and keeps the value pub fn stop(&self) { self.timer().task_stop.set(1); } // Stops the timer and clears the value pub fn shutdown(&self) { self.timer().task_shutdown.set(1); } // Clear the value pub fn clear(&self) { self.timer().task_clear.set(1); } /// Capture the current timer value into the CC register /// specified by which, and return the value. pub fn capture(&self, which: u8) -> u32 { match which { 0 => { self.timer().task_capture[0].set(1); self.timer().cc[0].get() } 1 => { self.timer().task_capture[1].set(1); self.timer().cc[1].get() } 2 => { self.timer().task_capture[2].set(1); self.timer().cc[2].get() } _ => { self.timer().task_capture[3].set(1); self.timer().cc[3].get() } } } /// Capture the current value to the CC register specified by /// which and do not return the value. pub fn capture_to(&self, which: u8) { let _ = self.capture(which); } /// Shortcuts can automatically stop or clear the timer on a particular /// compare event; refer to section 18.3 of the nRF reference manual /// for details. Implementation currently provides shortcuts as the /// raw bitmask. pub fn get_shortcuts(&self) -> u32 { self.timer().shorts.get() } pub fn set_shortcuts(&self, shortcut: u32) { self.timer().shorts.set(shortcut); } pub fn get_cc0(&self) -> u32 { self.timer().cc[0].get() } pub fn set_cc0(&self, val: u32) { self.timer().cc[0].set(val); } pub fn get_cc1(&self) -> u32 { self.timer().cc[1].get() } pub fn set_cc1(&self, val: u32) { self.timer().cc[0].set(val); } pub fn get_cc2(&self) -> u32 { self.timer().cc[2].get() } pub fn set_cc2(&self, val: u32) { self.timer().cc[0].set(val); } pub fn get_cc3(&self) -> u32 { self.timer().cc[3].get() } pub fn set_cc3(&self, val: u32) { self.timer().cc[0].set(val); } pub fn enable_interrupts(&self, interrupts: u32) { self.timer().intenset.set(interrupts << 16); } pub fn disable_interrupts(&self, interrupts: u32) { self.timer().intenclr.set(interrupts << 16); } pub fn enable_nvic(&self) { nvic::enable(self.nvic); } pub fn disable_nvic(&self) { nvic::disable(self.nvic); } pub fn set_prescaler(&self, val: u8) { // Only bottom 4 bits are valid, so mask them // nRF51822 reference manual, page 102 self.timer().prescaler.set((val & 0xf) as u32); } pub fn get_prescaler(&self) -> u8 { self.timer().prescaler.get() as u8 } /// When an interrupt occurs, check if any of the 4 compares have /// created an event, and if so, add it to the bitmask of triggered /// events that is passed to the client. pub fn handle_interrupt(&self) { nvic::clear_pending(self.nvic); self.client.get().map(|client| { let mut val = 0; // For each of 4 possible compare events, if it's happened, // clear it and store its bit in val to pass in callback. for i in 0..4 { if self.timer().event_compare[i].get()!= 0 { val = val | 1 << i; self.timer().event_compare[i].set(0); self.disable_interrupts(1 << (i + 16)); } } client.compare(val as u8); }); } } pub struct TimerAlarm { which: Location, nvic: NvicIdx, client: Cell<Option<&'static hil::time::Client>>, } // CC0 is used for capture // CC1 is used for compare/interrupts const ALARM_CAPTURE: usize = 0; const ALARM_COMPARE: usize = 1; const ALARM_INTERRUPT_BIT: u32 = 1 << (16 + ALARM_COMPARE); impl TimerAlarm { fn timer(&self) -> &'static Registers { TIMER(self.which) } pub const fn new(location: Location, nvic: NvicIdx) -> TimerAlarm { TimerAlarm { which: location, nvic: nvic, client: Cell::new(None), } } pub fn clear(&self) { self.clear_alarm(); self.timer().task_clear.set(1); } pub fn clear_alarm(&self) { self.timer().event_compare[ALARM_COMPARE].set(0); self.disable_interrupts(); nvic::clear_pending(self.nvic); } pub fn set_client(&self, client: &'static hil::time::Client) { self.client.set(Some(client)); } pub fn start(&self) { // Make timer 32 bits wide self.timer().bitmode.set(3); // Clock is 16MHz, so scale down by 2^10 to 16KHz self.timer().prescaler.set(10); self.timer().task_start.set(1); } pub fn stop(&self) { self.timer().task_stop.set(1); } #[inline(never)] pub fn handle_interrupt(&self) { self.clear_alarm(); self.client.get().map(|client| { client.fired(); }); } // Enable and disable interrupts use the bottom 4 bits // for the 4 compare interrupts. These functions shift // those bits to the correct place in the register. pub fn enable_interrupts(&self) { self.timer().intenset.set(ALARM_INTERRUPT_BIT); } pub fn disable_interrupts(&self) { self.timer().intenclr.set(ALARM_INTERRUPT_BIT); } pub fn interrupts_enabled(&self) -> bool { self.timer().intenset.get() == (ALARM_INTERRUPT_BIT) } pub fn enable_nvic(&self) { nvic::enable(self.nvic); } pub fn disable_nvic(&self) { nvic::disable(self.nvic); } pub fn value(&self) -> u32 { self.timer().task_capture[ALARM_CAPTURE].set(1); self.timer().cc[ALARM_CAPTURE].get() } } impl hil::time::Time for TimerAlarm { fn disable(&self) { self.disable_interrupts(); } fn is_armed(&self) -> bool { self.interrupts_enabled() } } impl hil::time::Alarm for TimerAlarm { type Frequency = hil::time::Freq16KHz; fn now(&self) -> u32 { self.value() } fn set_alarm(&self, tics: u32) { self.disable_interrupts(); self.timer().cc[ALARM_COMPARE].set(tics); self.clear_alarm(); self.enable_interrupts(); } fn get_alarm(&self) -> u32 { self.timer().cc[ALARM_COMPARE].get() } } #[no_mangle] #[allow(non_snake_case)] pub unsafe extern "C" fn TIMER0_Handler() { use kernel::common::Queue; nvic::disable(NvicIdx::TIMER0); chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER0); } #[no_mangle] #[allow(non_snake_case)] pub unsafe extern "C" fn TIMER1_Handler() { use kernel::common::Queue; nvic::disable(NvicIdx::TIMER1); chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER1); } #[no_mangle] #[allow(non_snake_case)] pub unsafe extern "C" fn TIMER2_Handler() { use kernel::common::Queue; nvic::disable(NvicIdx::TIMER2); chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER2); }
random_line_split
session_data.rs
use super::{ configuration::{self, CoreConfig, SessionConfig}, core_data::{CoreData, CoreHandle}, }; use crate::cmd::dap_server::{ debug_adapter::{ dap::adapter::DebugAdapter, dap::dap_types::Source, protocol::ProtocolAdapter, }, DebuggerError, }; use anyhow::{anyhow, Result}; use probe_rs::{ config::TargetSelector, debug::{debug_info::DebugInfo, SourceLocation}, CoreStatus, DebugProbeError, Permissions, Probe, ProbeCreationError, Session, }; use std::env::set_current_dir; use time::UtcOffset;
/// A breakpoint was requested using an instruction address, and usually a result of a user requesting a /// breakpoint while in a 'disassembly' view. InstructionBreakpoint, /// A breakpoint that has a Source, and usually a result of a user requesting a breakpoint while in a'source' view. SourceBreakpoint { source: Source, location: SourceLocationScope, }, } /// Breakpoint requests will either be refer to a specific SourceLcoation, or unspecified, in which case it will refer to /// all breakpoints for the Source. #[derive(Clone, Debug, PartialEq)] pub(crate) enum SourceLocationScope { All, Specific(SourceLocation), } /// Provide the storage and methods to handle various [`BreakpointType`] #[derive(Clone, Debug)] pub(crate) struct ActiveBreakpoint { pub(crate) breakpoint_type: BreakpointType, pub(crate) address: u64, } /// SessionData is designed to be similar to [probe_rs::Session], in as much that it provides handles to the [CoreHandle] instances for each of the available [probe_rs::Core] involved in the debug session. /// To get access to the [CoreHandle] for a specific [probe_rs::Core], the /// TODO: Adjust [SessionConfig] to allow multiple cores (and if appropriate, their binaries) to be specified. pub(crate) struct SessionData { pub(crate) session: Session, /// [SessionData] will manage one [CoreData] per target core, that is also present in [SessionConfig::core_configs] pub(crate) core_data: Vec<CoreData>, /// Offset used for RTC timestamps /// /// Getting the offset can fail, so it's better to store it. timestamp_offset: UtcOffset, } impl SessionData { pub(crate) fn new( config: &mut configuration::SessionConfig, timestamp_offset: UtcOffset, ) -> Result<Self, DebuggerError> { // `SessionConfig` Probe/Session level configurations initialization. let mut target_probe = match config.probe_selector.clone() { Some(selector) => Probe::open(selector.clone()).map_err(|e| match e { DebugProbeError::ProbeCouldNotBeCreated(ProbeCreationError::NotFound) => { DebuggerError::Other(anyhow!( "Could not find the probe_selector specified as {:04x}:{:04x}:{:?}", selector.vendor_id, selector.product_id, selector.serial_number )) } other_error => DebuggerError::DebugProbe(other_error), }), None => { // Only automatically select a probe if there is only a single probe detected. let list = Probe::list_all(); if list.len() > 1 { return Err(DebuggerError::Other(anyhow!( "Found multiple ({}) probes", list.len() ))); } if let Some(info) = list.first() { Probe::open(info).map_err(DebuggerError::DebugProbe) } else { return Err(DebuggerError::Other(anyhow!( "No probes found. Please check your USB connections." ))); } } }?; let target_selector = match &config.chip { Some(identifier) => identifier.into(), None => TargetSelector::Auto, }; // Set the protocol, if the user explicitly selected a protocol. Otherwise, use the default protocol of the probe. if let Some(wire_protocol) = config.wire_protocol { target_probe.select_protocol(wire_protocol)?; } // Set the speed. if let Some(speed) = config.speed { let actual_speed = target_probe.set_speed(speed)?; if actual_speed!= speed { tracing::warn!( "Protocol speed {} kHz not supported, actual speed is {} kHz", speed, actual_speed ); } } let mut permissions = Permissions::new(); if config.allow_erase_all { permissions = permissions.allow_erase_all(); } // Attach to the probe. let target_session = if config.connect_under_reset { target_probe.attach_under_reset(target_selector, permissions)? } else { target_probe .attach(target_selector, permissions) .map_err(|err| anyhow!("Error attaching to the probe: {:?}.", err))? }; // Change the current working directory if `config.cwd` is `Some(T)`. if let Some(new_cwd) = config.cwd.clone() { set_current_dir(new_cwd.as_path()).map_err(|err| { anyhow!( "Failed to set current working directory to: {:?}, {:?}", new_cwd, err ) })?; }; // `FlashingConfig` probe level initialization. // `CoreConfig` probe level initialization. if config.core_configs.len()!= 1 { // TODO: For multi-core, allow > 1. return Err(DebuggerError::Other(anyhow!("probe-rs-debugger requires that one, and only one, core be configured for debugging."))); } // Filter `CoreConfig` entries based on those that match an actual core on the target probe. let valid_core_configs = config .core_configs .iter() .filter(|&core_config| { target_session .list_cores() .iter() .any(|(target_core_index, _)| *target_core_index == core_config.core_index) }) .cloned() .collect::<Vec<CoreConfig>>(); let mut core_data_vec = vec![]; for core_configuration in &valid_core_configs { core_data_vec.push(CoreData { core_index: core_configuration.core_index, last_known_status: CoreStatus::Unknown, target_name: format!( "{}-{}", core_configuration.core_index, target_session.target().name ), debug_info: debug_info_from_binary(core_configuration)?, core_peripherals: None, stack_frames: Vec::<probe_rs::debug::stack_frame::StackFrame>::new(), breakpoints: Vec::<ActiveBreakpoint>::new(), rtt_connection: None, }) } Ok(SessionData { session: target_session, core_data: core_data_vec, timestamp_offset, }) } /// Reload the a specific core's debug info from the binary file. pub(crate) fn load_debug_info_for_core( &mut self, core_configuration: &CoreConfig, ) -> Result<(), DebuggerError> { if let Some(core_data) = self .core_data .iter_mut() .find(|core_data| core_data.core_index == core_configuration.core_index) { core_data.debug_info = debug_info_from_binary(core_configuration)?; Ok(()) } else { Err(DebuggerError::UnableToOpenProbe(Some( "No core at the specified index.", ))) } } /// Do a 'light weight'(just get references to existing data structures) attach to the core and return relevant debug data. pub(crate) fn attach_core(&mut self, core_index: usize) -> Result<CoreHandle, DebuggerError> { if let (Ok(target_core), Some(core_data)) = ( self.session.core(core_index), self.core_data .iter_mut() .find(|core_data| core_data.core_index == core_index), ) { Ok(CoreHandle { core: target_core, core_data, }) } else { Err(DebuggerError::UnableToOpenProbe(Some( "No core at the specified index.", ))) } } /// The target has no way of notifying the debug adapter when things changes, so we have to constantly poll it to determine: /// - Whether the target cores are running, and what their actual status is. /// - Whether the target cores have data in their RTT buffers that we need to read and pass to the client. /// /// To optimize this polling process while also optimizing the reading of RTT data, we apply a couple of principles: /// 1. Sleep (nap for a short duration) between polling each target core, but: /// - Only sleep IF the core's status hasn't changed AND there was no RTT data in the last poll. /// - Otherwise move on without delay, to keep things flowing as fast as possible. /// - The justification is that any client side CPU used to keep polling is a small price to pay for maximum throughput of debug requests and RTT from the probe. /// 2. Check all target cores to ensure they have a configured and initialized RTT connections and if they do, process the RTT data. /// - To keep things efficient, the polling of RTT data is done only when we expect there to be data available. /// - We check for RTT only when the core has an RTT connection configured, and one of the following is true: /// - While the core is NOT halted, because core processing can generate new data at any time. /// - The first time we have entered halted status, to ensure the buffers are drained. After that, for as long as we remain in halted state, we don't need to check RTT again. /// /// Return a Vec of [`CoreStatus`] (one entry per core) after this process has completed, as well as a boolean indicating whether we should consider a short delay before the next poll. pub(crate) fn poll_cores<P: ProtocolAdapter>( &mut self, session_config: &SessionConfig, debug_adapter: &mut DebugAdapter<P>, ) -> Result<(Vec<CoreStatus>, bool), DebuggerError> { // By default, we will have a small delay between polls, and will disable it if we know the last poll returned data, on the assumption that there might be at least one more batch of data. let mut suggest_delay_required = true; let mut status_of_cores: Vec<CoreStatus> = vec![]; let target_memory_map = &self.session.target().memory_map.clone(); let timestamp_offset = self.timestamp_offset; // Always set `all_cores_halted` to true, until one core is found to be running. debug_adapter.all_cores_halted = true; for core_config in session_config.core_configs.iter() { let Ok(mut target_core) = self.attach_core(core_config.core_index) else { tracing::debug!( "Failed to attach to target core #{}. Cannot poll for RTT data.", core_config.core_index ); continue; }; // We need to poll the core to determine its status. let current_core_status = target_core.poll_core(debug_adapter).map_err(|error| { let error = DebuggerError::ProbeRs(error); let _ = debug_adapter.show_error_message(&error); error })?; // If appropriate, check for RTT data. if core_config.rtt_config.enabled { if let Some(core_rtt) = &mut target_core.core_data.rtt_connection { // We should poll the target for rtt data, and if any RTT data was processed, we clear the flag. if core_rtt.process_rtt_data(debug_adapter, &mut target_core.core) { suggest_delay_required = false; } } else if debug_adapter.configuration_is_done() { // We have not yet reached the point in the target application where the RTT buffers are initialized, // so, provided we have processed the MSDAP request for "configurationDone", we should check again. { #[allow(clippy::unwrap_used)] match target_core.attach_to_rtt( debug_adapter, target_memory_map, core_config.program_binary.as_ref().unwrap(), &core_config.rtt_config, timestamp_offset, ) { Ok(_) => { // Nothing else to do. } Err(error) => { debug_adapter .show_error_message(&DebuggerError::Other(error)) .ok(); } } } } } // If the core is running, we set the flag to indicate that at least one core is not halted. // By setting it here, we ensure that RTT will be checked at least once after the core has halted. if!current_core_status.is_halted() { debug_adapter.all_cores_halted = false; } status_of_cores.push(current_core_status); } Ok((status_of_cores, suggest_delay_required)) } } pub(crate) fn debug_info_from_binary( core_configuration: &CoreConfig, ) -> Result<DebugInfo, DebuggerError> { let debug_info = if let Some(binary_path) = &core_configuration.program_binary { DebugInfo::from_file(binary_path).map_err(|error| DebuggerError::Other(anyhow!(error)))? } else { return Err(anyhow!( "Please provide a valid `program_binary` for debug core: {:?}", core_configuration.core_index ) .into()); }; Ok(debug_info) }
/// The supported breakpoint types #[derive(Clone, Debug, PartialEq)] pub(crate) enum BreakpointType {
random_line_split