file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
rcu.rs | //! Reset and clock unit
use crate::pac::RCU;
use riscv::interrupt;
use crate::time::Hertz;
use core::cmp;
/// Extension trait that sets up the `RCU` peripheral
pub trait RcuExt {
/// Configure the clocks of the `RCU` peripheral
fn configure(self) -> UnconfiguredRcu;
}
impl RcuExt for RCU {
fn configure(self) -> UnconfiguredRcu {
UnconfiguredRcu::new(self)
}
}
/// Configured RCU peripheral
pub struct Rcu {
/// Frozen clock frequencies
pub clocks: Clocks,
pub(crate) regs: RCU,
}
pub struct UnconfiguredRcu {
hxtal: Option<u32>,
sysclk: Option<u32>,
regs: RCU,
}
impl UnconfiguredRcu {
fn new(rcu: RCU) -> Self {
Self {
hxtal: None,
sysclk: None,
regs: rcu,
}
}
/// Uses an external oscillator instead of IRC8M (internal RC oscillator) as the high-speed
/// clock source. Will result in a hang if an external oscillator is not connected or it fails
/// to start.
pub fn ext_hf_clock(mut self, freq: impl Into<Hertz>) -> Self {
let freq = freq.into().0;
assert!(4_000_000 <= freq && freq <= 32_000_000);
self.hxtal = Some(freq);
self
}
/// Sets the desired frequency for the SYSCLK clock
pub fn sysclk(mut self, freq: impl Into<Hertz>) -> Self {
let freq = freq.into().0;
assert!(freq <= 108_000_000);
self.sysclk = Some(freq);
self
}
/// Freezes clock configuration, making it effective
pub fn freeze(self) -> Rcu {
const IRC8M: u32 = 8_000_000;
let target_sysclk = self.sysclk.unwrap_or(IRC8M);
let (scs_bits, use_pll) = match (self.hxtal, target_sysclk) {
(Some(freq), sysclk) if freq == sysclk => (0b01, false),
(None, sysclk) if IRC8M == sysclk => (0b00, false),
_ => (0b10, true),
};
let pllsel_bit;
let predv0_bits;
let pllmf_bits;
if use_pll | }
}
None
};
let (d, m) = calculate_pll(hxtal_freq, target_sysclk).expect("invalid sysclk value");
predv0_bits = d - 1;
pllmf = m;
} else {
// IRC8M/2 is used as an input clock
pllsel_bit = false;
let pllsource = IRC8M / 2;
let m = target_sysclk / pllsource;
let m = cmp::max(2, cmp::min(m, 32));
assert_ne!(m, 15, "invalid sysclk value");
let actual_sysclk = pllsource * m;
assert_eq!(target_sysclk, actual_sysclk, "invalid sysclk value");
predv0_bits = 0;
pllmf = m as u8;
}
pllmf_bits = match pllmf {
2..=14 => pllmf - 2,
16..=32 => pllmf - 1,
_ => unreachable!("invalid pll multiplier"),
};
}
else {
pllsel_bit = false;
predv0_bits = 0;
pllmf_bits = 0;
}
// Switch to the internal clock
let rcu = unsafe { &*crate::pac::RCU::ptr() };
rcu.ctl.modify(|_, w| w.irc8men().set_bit()); // Enable IRC8M oscillator
while rcu.ctl.read().irc8mstb().bit_is_clear() {} // Wait for oscillator to stabilize
rcu.cfg0.modify(|_, w| unsafe { w.scs().bits(0b00) }); // Switch to the internal oscillator
rcu.ctl.modify(|_, w| w.pllen().clear_bit()); // Disable PLL
// Set bus prescalers
rcu.cfg0.modify(|_, w| unsafe { w.ahbpsc().bits(0b0000) }); // CK_SYS
rcu.cfg0.modify(|_, w| unsafe { w.apb1psc().bits(0b100) }); // CK_AHB / 2
rcu.cfg0.modify(|_, w| unsafe { w.apb2psc().bits(0b000) }); // CK_AHB
let apb1_psc = 2;
let apb2_psc = 1;
if self.hxtal.is_some() {
// Enable external oscillator
rcu.ctl.modify(|_, w| w.hxtalen().set_bit());
// Wait for oscillator to stabilize
while rcu.ctl.read().hxtalstb().bit_is_clear() {}
// Select HXTAL as prescaler input source clock
rcu.cfg1.modify(|_, w| w.predv0sel().clear_bit());
// Configure the prescaler
rcu.cfg1.modify(|_, w| unsafe { w.predv0().bits(predv0_bits) });
}
if use_pll {
// Configure PLL input selector
rcu.cfg0.modify(|_, w| w.pllsel().bit(pllsel_bit));
// Configure PLL multiplier
rcu.cfg0.modify(|_, w| unsafe { w
.pllmf_4().bit(pllmf_bits & 0x10!= 0)
.pllmf_3_0().bits(pllmf_bits & 0xf)
});
// Enable PLL
rcu.ctl.modify(|_, w| w.pllen().set_bit());
// Wait for PLL to stabilize
while rcu.ctl.read().pllstb().bit_is_clear() {}
} else {
// Disable PLL
rcu.ctl.modify(|_, w| w.pllen().clear_bit());
}
// Switch to the configured clock source
rcu.cfg0.modify(|_, w| unsafe { w.scs().bits(scs_bits) });
let usbclk_valid;
if use_pll {
let pllclk = target_sysclk;
let (valid, pr) = match pllclk {
48_000_000 => (true, 0b01), // pllclk / 1
72_000_000 => (true, 0b00), // pllclk / 1.5
96_000_000 => (true, 0b11), // pllclk / 2
_ => (false, 0),
};
usbclk_valid = valid;
// Configure USB prescaler
rcu.cfg0.modify(|_, w| unsafe { w.usbfspsc().bits(pr) });
} else {
usbclk_valid = false;
}
let clocks = Clocks {
sysclk: Hertz(target_sysclk),
apb1_psc,
apb2_psc,
usbclk_valid
};
Rcu {
clocks,
regs: self.regs
}
}
}
#[derive(Copy, Clone)]
pub struct Clocks {
sysclk: Hertz,
apb1_psc: u8,
apb2_psc: u8,
usbclk_valid: bool,
}
impl Clocks {
/// Returns the system (core) frequency
pub const fn sysclk(&self) -> Hertz {
self.sysclk
}
/// Returns the frequency of the AHB
pub const fn hclk(&self) -> Hertz {
self.sysclk
}
/// Returns the frequency of the APB1
pub const fn pclk1(&self) -> Hertz {
Hertz(self.sysclk.0 / self.apb1_psc as u32)
}
/// Returns the frequency of the APB2
pub const fn pclk2(&self) -> Hertz {
Hertz(self.sysclk.0 / self.apb2_psc as u32)
}
/// Returns the frequency of the SysTick timer
pub const fn systick(&self) -> Hertz {
Hertz(self.sysclk.0 / 4)
}
/// Returns the frequency of the TIMER0 base clock
pub fn timer0(&self) -> Hertz {
let pclk2 = self.pclk2();
if self.apb2_psc == 1 {
pclk2
} else {
Hertz(pclk2.0 * 2)
}
}
/// Returns the frequency of the TIMER1..6 base clock
pub fn timerx(&self) -> Hertz {
let pclk1 = self.pclk1();
if self.apb1_psc == 1 {
pclk1
} else {
Hertz(pclk1.0 * 2)
}
}
/// Returns whether the USBCLK clock frequency is valid for the USB peripheral
pub const fn usbclk_valid(&self) -> bool {
self.usbclk_valid
}
}
macro_rules! base_freq {
($($PER:ident => $func:ident,)+) => {
$(
impl BaseFrequency for crate::pac::$PER {
#[inline(always)]
fn base_frequency(rcu: &Rcu) -> Hertz {
rcu.clocks.$func()
}
}
)+
}
}
base_freq! {
ADC0 => pclk2,
ADC1 => pclk2,
I2C0 => pclk1,
I2C1 => pclk1,
SPI0 => pclk2,
SPI1 => pclk1,
SPI2 => pclk1,
TIMER0 => timer0,
TIMER1 => timerx,
TIMER2 => timerx,
TIMER3 => timerx,
TIMER4 => timerx,
TIMER5 => timerx,
TIMER6 => timerx,
UART3 => pclk1,
UART4 => pclk1,
USART0 => pclk2,
USART1 => pclk1,
USART2 => pclk1,
}
pub(crate) mod closed_traits {
use super::Rcu;
use crate::time::Hertz;
/// Enable/disable peripheral
pub trait Enable {
fn enable(rcu: &mut Rcu);
fn disable(rcu: &mut Rcu);
}
/// Reset peripheral
pub trait Reset {
fn reset(rcu: &mut Rcu);
}
pub trait BaseFrequency {
fn base_frequency(rcu: &Rcu) -> Hertz;
}
}
pub(crate) use closed_traits::*;
macro_rules! bus_enable {
($PER:ident => ($apben:ident, $peren:ident)) => {
impl Enable for crate::pac::$PER {
#[inline(always)]
fn enable(rcu: &mut Rcu) {
interrupt::free(|_| {
rcu.regs.$apben.modify(|_, w| w.$peren().set_bit());
});
}
#[inline(always)]
fn disable(rcu: &mut Rcu) {
interrupt::free(|_| {
rcu.regs.$apben.modify(|_, w| w.$peren().clear_bit());
});
}
}
}
}
macro_rules! bus {
($($PER:ident => ($apben:ident, $apbrst:ident, $peren:ident, $perrst:ident),)+) => {
$(
bus_enable!($PER => ($apben, $peren));
impl Reset for crate::pac::$PER {
#[inline(always)]
fn reset(rcu: &mut Rcu) {
interrupt::free(|_| {
rcu.regs.$apbrst.modify(|_, w| w.$perrst().set_bit());
rcu.regs.$apbrst.modify(|_, w| w.$perrst().clear_bit());
});
}
}
)+
}
}
bus! {
ADC0 => (apb2en, apb2rst, adc0en, adc0rst),
ADC1 => (apb2en, apb2rst, adc1en, adc1rst),
AFIO => (apb2en, apb2rst, afen, afrst),
BKP => (apb1en, apb1rst, bkpien, bkpirst),
CAN0 => (apb1en, apb1rst, can0en, can0rst),
CAN1 => (apb1en, apb1rst, can1en, can1rst),
DAC => (apb1en, apb1rst, dacen, dacrst),
GPIOA => (apb2en, apb2rst, paen, parst),
GPIOB => (apb2en, apb2rst, pben, pbrst),
GPIOC => (apb2en, apb2rst, pcen, pcrst),
GPIOD => (apb2en, apb2rst, pden, pdrst),
GPIOE => (apb2en, apb2rst, peen, perst),
I2C0 => (apb1en, apb1rst, i2c0en, i2c0rst),
I2C1 => (apb1en, apb1rst, i2c1en, i2c1rst),
PMU => (apb1en, apb1rst, pmuen, pmurst),
SPI0 => (apb2en, apb2rst, spi0en, spi0rst),
SPI1 => (apb1en, apb1rst, spi1en, spi1rst),
SPI2 => (apb1en, apb1rst, spi2en, spi2rst),
TIMER0 => (apb2en, apb2rst, timer0en, timer0rst),
TIMER1 => (apb1en, apb1rst, timer1en, timer1rst),
TIMER2 => (apb1en, apb1rst, timer2en, timer2rst),
TIMER3 => (apb1en, apb1rst, timer3en, timer3rst),
TIMER4 => (apb1en, apb1rst, timer4en, timer4rst),
TIMER5 => (apb1en, apb1rst, timer5en, timer5rst),
TIMER6 => (apb1en, apb1rst, timer6en, timer6rst),
UART3 => (apb1en, apb1rst, uart3en, uart3rst),
UART4 => (apb1en, apb1rst, uart4en, uart4rst),
USART0 => (apb2en, apb2rst, usart0en, usart0rst),
USART1 => (apb1en, apb1rst, usart1en, usart1rst),
USART2 => (apb1en, apb1rst, usart2en, usart2rst),
USBFS_GLOBAL => (ahben, ahbrst, usbfsen, usbfsrst),
WWDGT => (apb1en, apb1rst, wwdgten, wwdgtrst),
}
bus_enable!(CRC => (ahben, crcen));
bus_enable!(DMA0 => (ahben, dma0en));
bus_enable!(DMA1 => (ahben, dma1en));
bus_enable!(EXMC => (ahben, exmcen));
| {
let pllmf;
if let Some(hxtal_freq) = self.hxtal {
// Use external clock + divider
pllsel_bit = true;
let calculate_pll = |source: u32, target: u32| -> Option<(u8, u8)> {
const PLL_IN_MIN: u32 = 600_000;
let div_max = cmp::min(16, source / PLL_IN_MIN);
for d in 1..=div_max {
let pllsource = source / d;
let pllm = target / pllsource;
if pllm < 2 || pllm == 15 || pllm > 32{
continue;
}
let actual_freq = pllsource * pllm;
if actual_freq == target {
return Some((d as u8, pllm as u8)); | conditional_block |
lib.rs | #![doc(html_root_url = "https://docs.rs/broadword/0.2.2")]
//! Broadword operations treat a `u64` as a parallel vector of eight `u8`s or `i8`s.
//! This module also provides a population count function [`count_ones`](fn.count_ones.html) and a
//! select function [`select1`](fn.select1.html).
//!
//! The algorithms here are from [Sebastiano Vigna, “Broadword Implementation of
//! Rank/Select Queries,”](http://sux.di.unimi.it/paper.pdf) but with several changes from
//! that work:
//!
//! - Vigna uses a 17-digit (68-bit) constant “0x0F0F0F0F0F0F0F0F0.” I believe
//! the correct constant is these 64 bits: 0x0F0F_0F0F_0F0F_0F0F.
//!
//! - Arithmetic operations are assumed to wrap on overflow. If this
//! were not the case, Algorithm 1 ([count_ones](fn.count_ones.html))
//! would overflow its last line, when multiplying by L₈.
//!
//! - Line 2 of Algorithm 2 should read
//!
//! ```
//! # let mut s: u64 = 0;
//! s = (s & 0x3333_3333_3333_3333) + ((s >> 2) & 0x3333_3333_3333_3333);
//! ```
//!
//! In the paper, the shifted `s` appears as `x`.
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
/// Has the lowest bit of every byte set: `0x0101_0101_0101_0101`.
pub const L8: u64 = 0x0101_0101_0101_0101;
/// Has the highest bit of every byte set: `0x8080_8080_8080_8080`.
pub const H8: u64 = 0x8080_8080_8080_8080;
/// Counts the number of ones in a `u64`.
///
/// Branchless. Uses the broadword algorithm from Vigna.
///
/// # Examples
///
/// ```
/// use broadword::count_ones;
///
/// assert_eq!( count_ones(0x0000_0000_0000_0000), 0 );
/// assert_eq!( count_ones(0x0000_0001_0000_0000), 1 );
/// assert_eq!( count_ones(0x0000_0001_0400_0000), 2 );
/// assert_eq!( count_ones(0x0000_0001_0600_0000), 3 );
/// assert_eq!( count_ones(0x3333_0001_0600_0000), 11 );
/// ```
#[inline]
pub fn count_ones(mut x: u64) -> usize {
x = x - ((x & 0xAAAA_AAAA_AAAA_AAAA) >> 1);
x = (x & 0x3333_3333_3333_3333) + ((x >> 2) & 0x3333_3333_3333_3333);
x = (x + (x >> 4)) & 0x0F0F_0F0F_0F0F_0F0F;
(x.wrapping_mul(L8) >> 56) as usize
}
/// Finds the index of the `r`th one bit in `x`.
///
/// Uses the broadword algorithm from Vigna.
/// Note that bits are numbered from least-significant to most.
///
/// # Examples
///
/// ```
/// use broadword::select1;
///
/// assert_eq!( select1(0, 0x0000_0000_0000_0000), None );
/// assert_eq!( select1(0, 0x0000_0000_0000_0001), Some(0) );
/// assert_eq!( select1(0, 0x0000_0000_0000_0002), Some(1) );
/// assert_eq!( select1(0, 0x0000_0000_0000_0004), Some(2) );
/// assert_eq!( select1(2, 0x0000_0000_0000_0004), None );
/// assert_eq!( select1(2, 0x0000_1010_1010_0114), Some(8) );
/// assert_eq!( select1(3, 0x0000_1010_1010_0114), Some(20) );
/// assert_eq!( select1(4, 0x0000_1010_1010_0114), Some(28) );
/// ```
#[inline]
pub fn select1(r: usize, x: u64) -> Option<usize> {
let result = select1_raw(r, x);
if result == 72 {None} else {Some(result)}
}
/// Finds the index of the `r`th one bit in `x`, returning 72 when not found.
///
/// Branchless. Uses the broadword algorithm from Vigna.
/// Note that bits are numbered from least-significant to most.
#[inline]
#[allow(clippy::many_single_char_names)]
pub fn select1_raw(r: usize, x: u64) -> usize {
let r = r as u64;
let mut s = x - ((x & 0xAAAA_AAAA_AAAA_AAAA) >> 1);
s = (s & 0x3333_3333_3333_3333) + ((s >> 2) & 0x3333_3333_3333_3333);
s = ((s + (s >> 4)) & 0x0F0F_0F0F_0F0F_0F0F).wrapping_mul(L8);
let b = (i_le8(s, r.wrapping_mul(L8)) >> 7).wrapping_mul(L8)>> 53 &!7;
let l = r - ((s << 8).wrapping_shr(b as u32) & 0xFF);
s = (u_nz8((x.wrapping_shr(b as u32) & 0xFF)
.wrapping_mul(L8) & 0x8040_2010_0804_0201) >> 7)
.wrapping_mul(L8);
(b + ((i_le8(s, l.wrapping_mul(L8)) >> 7).wrapping_mul(L8) >> 56)) as usize
}
/// Parallel ≤, treating a `u64` as a vector of 8 `u8`s.
///
/// Branchless.
///
/// # Examples
///
/// ```
/// use broadword::u_le8;
///
/// assert_eq!( u_le8(0x03_03_04_17_92_A0_A0_A1,
/// 0x04_03_03_92_17_A0_A0_A0),
/// 0x80_80_00_80_00_80_80_00 );
/// ```
#[inline]
pub fn u_le8(x: u64, y: u64) -> u64 {
((((y | H8) - (x &!H8)) | (x ^ y)) ^ (x &!y)) & H8
}
/// Parallel ≤, treating a `u64` as a vector of 8 `i8`s.
///
/// Branchless.
///
/// # Examples
///
/// ```
/// use broadword::i_le8;
///
/// assert_eq!( i_le8(0x03_03_04_00_FF_A0_A0_A1,
/// 0x04_03_03_FF_00_A0_A0_A0),
/// 0x80_80_00_00_80_80_80_00 );
/// ```
#[inline]
pub fn i_le8(x: u64, y: u64) -> u64 {
(((y | H8) - (x &!H8)) ^ x ^ y) & H8
}
/// Parallel >0, treating a `u64` as a vector of 8 `u8`s.
///
/// Branchless.
///
/// # Examples
///
/// ```
/// use broadword::u_nz8;
///
/// assert_eq!( u_nz8(0x00_01_A9_40_20_17_00_06),
/// 0x00_80_80_80_80_80_00_80 );
#[inline]
pub fn u_nz8(x: u64) -> u64 {
(((x | H8) - L8) | x) & H8
}
#[cfg(test)]
#[allow(clippy::many_single_char_names)]
mod test {
use std::hash::{Hash, Hasher};
use std::collections::hash_map::DefaultHasher;
use quickcheck::TestResult;
use super::*;
#[test]
fn count_ones_0() {
assert_eq!(0, count_ones(0));
}
#[test]
fn count_ones_1() {
assert_eq!(1, count_ones(1));
}
#[test]
fn count_ones_0000_0000_0000_0010() {
assert_eq!(1, count_ones(0x0000_0000_0000_0010));
}
#[test]
fn count_ones_1000_0000_0000_0000() {
assert_eq!(1, count_ones(0x1000_0000_0000_0000));
}
#[test]
fn count_ones_ffff_ffff_ffff_ffff() {
assert_eq!(64, count_ones(0xFFFF_FFFF_FFFF_FFFF));
}
fn count_ones_prop_base(word: u64) -> bool {
count_ones(word) == word.count_ones() as usize
}
quickcheck! {
fn count_ones_prop(word: u64) -> bool {
count_ones_prop_base(word)
}
fn count_ones_prop_hash(word: u64) -> bool {
count_ones_prop_base(hash(&word))
}
}
#[test]
fn select1_0_0() {
assert_eq!(None, select1(0, 0));
}
#[test]
fn select1_0_1() {
assert_eq!(Some(0), select1(0, 1));
}
#[test]
fn select1_0_2() {
assert_eq!(Some(1), select1(0, 2));
}
#[test]
fn select1_0_3() {
assert_eq!(Some(0), select1(0, 3));
}
#[test]
fn select1_1_2() {
assert_eq!(None, select1(1, 2));
}
#[test]
fn select1_1_3() {
assert_eq!(Some(1), select1(1, 3));
}
#[test]
fn select1_3_13() {
assert_eq!(None, select1(3, 0b1101));
}
fn select1_slow(r: usize, x: u64) -> Option<usize> {
let mut count = 0;
for index in 0.. 64 {
if (x >> index) & 1 == 1 {
count += 1;
}
if count == r + 1 {
return Some(index);
}
}
None
}
fn select1_prop_base(r: u8, x: u64) -> TestResult {
if r > 64 { return TestResult::discard(); }
TestResult::from_bool(
select1(r as usize, x) == select1_slow(r as usize, x))
}
quickcheck! {
fn select1_prop(r: u8, x: u64) -> TestResult {
select1_prop_base(r, x)
}
fn select1_prop_hash(r: u8, x: u64) -> TestResult {
select1_prop_base(r, hash(&x))
}
}
fn get_bits(x: u64, i: u8, n: u8) -> u64 {
let mask = if n == 64 {!0} else {(1 << n) - 1};
(x >> i) & mask | }
quickcheck! {
fn u_nz8_prop(argument: (u64, u64, u64, u64)) -> bool {
let n = hash(&argument);
let r = u_nz8(n);
for i in 0..8 {
let ni = get_bits(n, 8 * i, 8);
let ri = get_bits(r, 8 * i, 8);
if (ni!= 0)!= (ri == 0x80) {
return false;
}
}
true
}
}
#[test]
fn u_nz8_works() {
assert_eq!(b(0, 0, 0, 0, 0, 0, 0, 0),
u_nz8(u(0, 0, 0, 0, 0, 0, 0, 0)));
assert_eq!(b( 1, 1, 0, 1, 0, 1, 1, 1),
u_nz8(u(45, 12, 0, 129, 0, 3, 80, 1)));
assert_eq!(b(1, 1, 1, 1, 1, 1, 1, 1),
u_nz8(u(1, 2, 3, 4, 5, 6, 7, 8)));
assert_eq!(b( 1, 1, 1, 1, 0, 1, 1, 1),
u_nz8(0xFF_FF_FF_FF_00_FF_FF_FF));
}
fn u_le8_prop_base(n: u64, m: u64) -> bool {
let r = u_le8(n, m);
for i in 0..8 {
let ni = get_bits(n, 8 * i, 8);
let mi = get_bits(m, 8 * i, 8);
let ri = get_bits(r, 8 * i, 8);
if (ni <= mi)!= (ri == 0x80) {
return false;
}
}
true
}
quickcheck! {
fn u_le8_prop(n: u64, m: u64) -> bool {
u_le8_prop_base(n, m)
}
fn u_le8_prop_hashed(n: (u64, u64, u64, u64),
m: (u64, u64, u64, u64)) -> bool {
let n = hash(&n);
let m = hash(&m);
u_le8_prop_base(n, m)
}
}
#[test]
fn le8_works() {
assert_eq!(b( 1, 1, 1, 1, 0, 0, 0, 0),
i_le8(i(0, 0, 0, 0, 0, 0, 0, 0),
i( 3, 2, 1, 0, -1, -2, -3, -4)));
assert_eq!(b( 0, 0, 0, 1, 1, 1, 1, 1),
i_le8(i(3, 2, 1, 0, -1, -2, -3, -4),
i( 0, 0, 0, 0, 0, 0, 0, 0)));
assert_eq!(b( 0, 0, 1, 1, 1, 1, 1, 1),
i_le8(i(19, 18, 17, 16, 15, 0, -1, -2),
i(17, 17, 17, 17, 17, 17, 17, 17)));
assert_eq!(b( 1, 1, 0, 0, 0, 0, 0, 0),
i_le8(i(-9, -8, -7, 0, 1, 2, 3, 4),
i(-8, -8, -8, -8, -8, -8, -8, -8)));
assert_eq!(b( 0, 1, 0, 1, 1, 0, 1, 0),
i_le8(i(8, 3, 46, 0, 0, 0, -6, -1),
i( 7, 3, 24, 1, 0, -9, 5, -2)));
}
#[test]
fn u_le8_works() {
assert_eq!(b( 1, 1, 1, 1, 1, 1, 1, 1),
u_le8(u( 0, 0, 0, 0, 0, 0, 0, 0),
u( 7, 6, 5, 4, 3, 2, 1, 0)));
assert_eq!(b( 1, 0, 0, 0, 0, 0, 0, 0),
u_le8(u( 0, 1, 2, 3, 4, 5, 6, 7),
u( 0, 0, 0, 0, 0, 0, 0, 0)));
assert_eq!(b( 0, 0, 1, 1, 1, 1, 1, 1),
u_le8(u(19, 18, 17, 16, 15, 14, 13, 12),
u(17, 17, 17, 17, 17, 17, 17, 17)));
assert_eq!(b( 0, 1, 0, 1, 1, 0, 1, 0),
u_le8(u( 8, 3, 46, 0, 0, 9, 3, 2),
u( 7, 3, 24, 1, 0, 0, 5, 1)));
}
/// Helpers for creating u64s.
fn b(a: u64, b: u64, c: u64, d: u64,
e: u64, f: u64, g: u64, h: u64) -> u64 {
(a << 63) | (b << 55) | (c << 47) | (d << 39) |
(e << 31) | (f << 23) | (g << 15) | (h << 7)
}
fn u(a: u8, b: u8, c: u8, d: u8,
e: u8, f: u8, g: u8, h: u8) -> u64 {
((a as u64) << 56)
| ((b as u64) << 48)
| ((c as u64) << 40)
| ((d as u64) << 32)
| ((e as u64) << 24)
| ((f as u64) << 16)
| ((g as u64) << 8)
| (h as u64)
}
fn i(a: i8, b: i8, c: i8, d: i8,
e: i8, f: i8, g: i8, h: i8) -> u64 {
u(a as u8, b as u8, c as u8, d as u8,
e as u8, f as u8, g as u8, h as u8)
}
fn hash<T: Hash>(t: &T) -> u64 {
let mut s = DefaultHasher::new();
t.hash(&mut s);
s.finish()
}
} | random_line_split |
|
lib.rs | #![doc(html_root_url = "https://docs.rs/broadword/0.2.2")]
//! Broadword operations treat a `u64` as a parallel vector of eight `u8`s or `i8`s.
//! This module also provides a population count function [`count_ones`](fn.count_ones.html) and a
//! select function [`select1`](fn.select1.html).
//!
//! The algorithms here are from [Sebastiano Vigna, “Broadword Implementation of
//! Rank/Select Queries,”](http://sux.di.unimi.it/paper.pdf) but with several changes from
//! that work:
//!
//! - Vigna uses a 17-digit (68-bit) constant “0x0F0F0F0F0F0F0F0F0.” I believe
//! the correct constant is these 64 bits: 0x0F0F_0F0F_0F0F_0F0F.
//!
//! - Arithmetic operations are assumed to wrap on overflow. If this
//! were not the case, Algorithm 1 ([count_ones](fn.count_ones.html))
//! would overflow its last line, when multiplying by L₈.
//!
//! - Line 2 of Algorithm 2 should read
//!
//! ```
//! # let mut s: u64 = 0;
//! s = (s & 0x3333_3333_3333_3333) + ((s >> 2) & 0x3333_3333_3333_3333);
//! ```
//!
//! In the paper, the shifted `s` appears as `x`.
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
/// Has the lowest bit of every byte set: `0x0101_0101_0101_0101`.
pub const L8: u64 = 0x0101_0101_0101_0101;
/// Has the highest bit of every byte set: `0x8080_8080_8080_8080`.
pub const H8: u64 = 0x8080_8080_8080_8080;
/// Counts the number of ones in a `u64`.
///
/// Branchless. Uses the broadword algorithm from Vigna.
///
/// # Examples
///
/// ```
/// use broadword::count_ones;
///
/// assert_eq!( count_ones(0x0000_0000_0000_0000), 0 );
/// assert_eq!( count_ones(0x0000_0001_0000_0000), 1 );
/// assert_eq!( count_ones(0x0000_0001_0400_0000), 2 );
/// assert_eq!( count_ones(0x0000_0001_0600_0000), 3 );
/// assert_eq!( count_ones(0x3333_0001_0600_0000), 11 );
/// ```
#[inline]
pub fn count_ones(mut x: u64) -> usize {
x = x - ((x & 0xAAAA_AAAA_AAAA_AAAA) >> 1);
x = (x & 0x3333_3333_3333_3333) + ((x >> 2) & 0x3333_3333_3333_3333);
x = (x + (x >> 4)) & 0x0F0F_0F0F_0F0F_0F0F;
(x.wrapping_mul(L8) >> 56) as usize
}
/// Finds the index of the `r`th one bit in `x`.
///
/// Uses the broadword algorithm from Vigna.
/// Note that bits are numbered from least-significant to most.
///
/// # Examples
///
/// ```
/// use broadword::select1;
///
/// assert_eq!( select1(0, 0x0000_0000_0000_0000), None );
/// assert_eq!( select1(0, 0x0000_0000_0000_0001), Some(0) );
/// assert_eq!( select1(0, 0x0000_0000_0000_0002), Some(1) );
/// assert_eq!( select1(0, 0x0000_0000_0000_0004), Some(2) );
/// assert_eq!( select1(2, 0x0000_0000_0000_0004), None );
/// assert_eq!( select1(2, 0x0000_1010_1010_0114), Some(8) );
/// assert_eq!( select1(3, 0x0000_1010_1010_0114), Some(20) );
/// assert_eq!( select1(4, 0x0000_1010_1010_0114), Some(28) );
/// ```
#[inline]
pub fn select1(r: usize, x: u64) -> Option<usize> {
let result = select1_raw(r, x);
if result == 72 {None} else {Some(result)}
}
/// Finds the index of the `r`th one bit in `x`, returning 72 when not found.
///
/// Branchless. Uses the broadword algorithm from Vigna.
/// Note that bits are numbered from least-significant to most.
#[inline]
#[allow(clippy::many_single_char_names)]
pub fn select1_raw(r: usize, x: u64) -> usize {
let r = r as u64;
let mut s = x - ((x & 0xAAAA_AAAA_AAAA_AAAA) >> 1);
s = (s & 0x3333_3333_3333_3333) + ((s >> 2) & 0x3333_3333_3333_3333);
s = ((s + (s >> 4)) & 0x0F0F_0F0F_0F0F_0F0F).wrapping_mul(L8);
let b = (i_le8(s, r.wrapping_mul(L8)) >> 7).wrapping_mul(L8)>> 53 &!7;
let l = r - ((s << 8).wrapping_shr(b as u32) & 0xFF);
s = (u_nz8((x.wrapping_shr(b as u32) & 0xFF)
.wrapping_mul(L8) & 0x8040_2010_0804_0201) >> 7)
.wrapping_mul(L8);
(b + ((i_le8(s, l.wrapping_mul(L8)) >> 7).wrapping_mul(L8) >> 56)) as usize
}
/// Parallel ≤, treating a `u64` as a vector of 8 `u8`s.
///
/// Branchless.
///
/// # Examples
///
/// ```
/// use broadword::u_le8;
///
/// assert_eq!( u_le8(0x03_03_04_17_92_A0_A0_A1,
/// 0x04_03_03_92_17_A0_A0_A0),
/// 0x80_80_00_80_00_80_80_00 );
/// ```
#[inline]
pub fn u_le8(x: u64, y: u64) -> u64 {
((((y | H8) - (x &!H8)) | (x ^ y)) ^ (x &!y)) & H8
}
/// Parallel ≤, treating a `u64` as a vector of 8 `i8`s.
///
/// Branchless.
///
/// # Examples
///
/// ```
/// use broadword::i_le8;
///
/// assert_eq!( i_le8(0x03_03_04_00_FF_A0_A0_A1,
/// 0x04_03_03_FF_00_A0_A0_A0),
/// 0x80_80_00_00_80_80_80_00 );
/// ```
#[inline]
pub fn i_le8(x: u64, y: u64) -> u64 {
(((y | H8) - (x &!H8)) ^ x ^ y) & H8
}
/// Parallel >0, treating a `u64` as a vector of 8 `u8`s.
///
/// Branchless.
///
/// # Examples
///
/// ```
/// use broadword::u_nz8;
///
/// assert_eq!( u_nz8(0x00_01_A9_40_20_17_00_06),
/// 0x00_80_80_80_80_80_00_80 );
#[inline]
pub fn u_nz8(x: u64) -> u64 {
(((x | H8) - L8) | x) & H8
}
#[cfg(test)]
#[allow(clippy::many_single_char_names)]
mod test {
use std::hash::{Hash, Hasher};
use std::collections::hash_map::DefaultHasher;
use quickcheck::TestResult;
use super::*;
#[test]
fn count_ones_0() {
assert_eq!(0, count_ones(0));
}
#[test]
fn count_ones_1() {
assert_eq!(1, count_ones(1));
}
#[test]
fn count_ones_0000_0000_0000_0010() {
assert_eq!(1, count_ones(0x0000_0000_0000_0010));
}
#[test]
fn count_ones_1000_0000_0000_0000() {
assert_eq!(1, count_ones(0x1000_0000_0000_0000));
}
#[test]
fn count_ones_ffff_ffff_ffff_ffff() {
assert_eq!(64, count_ones(0xFFFF_FFFF_FFFF_FFFF));
}
fn count_ones_prop_base(word: u64) -> bool {
count_ones(word) == word.count_ones() as usize
}
quickcheck! {
fn count_ones_prop(word: u64) -> bool {
count_ones_prop_base(word)
}
fn count_ones_prop_hash(word: u64) -> bool {
count_ones_prop_base(hash(&word))
}
}
#[test]
fn select1_0_0() {
assert_eq!(None, select1(0, 0));
}
#[test]
fn select1_0_1() {
assert_eq!(Some(0), select1(0, 1));
}
#[test]
fn select1_0_2() {
assert_eq!(Some(1), select1(0, 2));
}
#[test]
fn select1_0_3() {
assert_eq!(Some(0), select1(0, 3));
}
#[test]
fn select1_1_2() {
assert_eq!(None, select1(1, 2));
}
#[test]
fn select1_1_3() {
assert_eq!(Some(1), select1(1, 3));
}
#[test]
fn select1_3_13() {
assert_eq!(None, select1(3, 0b1101));
}
fn select1_slow(r: usize, x: u64) -> Option<usize> {
let mut count = 0;
for index in 0.. 64 {
if (x >> index) & 1 == 1 {
count += 1;
}
if count == r + 1 {
return Some(index);
}
}
None
}
fn select1_prop_base(r: u8, x: u64) -> TestResult {
if r > 64 { return TestResult::discard(); }
TestResult::from_bool(
select1(r as usize, x) == select1_slow(r as usize, x))
}
quickcheck! {
fn select1_prop(r: u8, x: u64) -> TestResult {
select1_prop_base(r, x)
}
fn select1_prop_hash(r: u8, x: u64) -> TestResult {
select1_prop_base(r, hash(&x))
}
}
fn get_bits(x: u64, i: u8, n: u8) -> u64 {
let mask = if n == 64 {!0} else {(1 << n) - 1};
(x >> i) & mask
}
quickcheck! {
fn u_nz8_prop(argument: (u64, u64, u64, u64)) -> bool {
let n = hash(&argument);
let r = u_nz8(n);
for i in 0..8 {
let ni = get_bits(n, 8 * i, 8);
let ri = get_bits(r, 8 * i, 8);
if (ni!= 0)!= (ri == 0x80) {
return false;
}
}
true
}
}
#[test]
fn u_nz8_works() {
assert_eq!(b(0, 0, 0, 0, 0, 0, 0, 0),
u_nz8(u(0, 0, 0, 0, 0, 0, 0, 0)));
assert_eq!(b( 1, 1, 0, 1, 0, 1, 1, 1),
u_nz8(u(45, 12, 0, 129, 0, 3, 80, 1)));
assert_eq!(b(1, 1, 1, 1, 1, 1, 1, 1),
u_nz8(u(1, 2, 3, 4, 5, 6, 7, 8)));
assert_eq!(b( 1, 1, 1, 1, 0, 1, 1, 1),
u_nz8(0xFF_FF_FF_FF_00_FF_FF_FF));
}
fn u_le8_prop_base(n: u64, m: u64) -> bool {
let r = u_le8(n, m);
for i in 0..8 {
let ni = get_bits(n, 8 * i, 8);
let mi = get_bits(m, 8 * i, 8);
let ri = get_bits(r, 8 * i, 8);
if (ni <= mi)!= (ri == 0x80) {
return false;
}
}
true
}
quickcheck! {
fn u_le8_prop(n: u64, m: u64) -> bool {
u_le8_prop_base(n, m)
}
fn u_le8_prop_hashed(n: (u64, u64, u64, u64),
m: (u64, u64, u64, u64)) -> bool {
let n = hash(&n);
let m = hash(&m);
u_le8_prop_base(n, m)
}
}
#[test]
fn le8_works() {
assert_eq!(b( 1, 1, 1, 1, 0, 0, 0, 0),
i_le8(i(0, 0, 0, 0, 0, 0, 0, 0),
i( 3, 2, 1, 0, -1, -2, -3, -4)));
assert_eq!(b( 0, 0, 0, 1, 1, 1, 1, 1),
i_le8(i(3, 2, 1, 0, -1, -2, -3, -4),
i( 0, 0, 0, 0, 0, 0, 0, 0)));
assert_eq!(b( 0, 0, 1, 1, 1, 1, 1, 1),
i_le8(i(19, 18, 17, 16, 15, 0, -1, -2),
i(17, 17, 17, 17, 17, 17, 17, 17)));
assert_eq!(b( 1, 1, 0, 0, 0, 0, 0, 0),
i_le8(i(-9, -8, -7, 0, 1, 2, 3, 4),
i(-8, -8, -8, -8, -8, -8, -8, -8)));
assert_eq!(b( 0, 1, 0, 1, 1, 0, 1, 0),
i_le8(i(8, 3, 46, 0, 0, 0, -6, -1),
i( 7, 3, 24, 1, 0, -9, 5, -2)));
}
#[test]
fn u_le8_works() {
assert_eq!(b( 1, 1, 1, 1, 1, 1, 1, 1),
u_le8(u( 0, 0, 0, 0, 0, 0, 0, 0),
u( 7, 6, 5, 4, 3, 2, 1, 0)));
assert_eq!(b( 1, 0, 0, 0, 0, 0, 0, 0),
u_le8(u( 0, 1, 2, 3, 4, 5, 6, 7),
u( 0, 0, 0, 0, 0, 0, 0, 0)));
assert_eq!(b( 0, 0, 1, 1, 1, 1, 1, 1),
u_le8(u(19, 18, 17, 16, 15, 14, 13, 12),
u(17, 17, 17, 17, 17, 17, 17, 17)));
assert_eq!(b( 0, 1, 0, 1, 1, 0, 1, 0),
u_le8(u( 8, 3, 46, 0, 0, 9, 3, 2),
u( 7, 3, 24, 1, 0, 0, 5, 1)));
}
/// Helpers for creating u64s.
fn b(a: u64, b: u64, c: u64, d: u64,
e: u64, f: u64, g: u64, h: u64) -> u64 {
(a << 63) | (b << 55) | (c << 47) | (d << 39) |
(e << 31) | (f << 23) | (g << 15) | (h << 7)
}
fn u(a: u8, b: u8, c: u8, d: u8,
e: u8, f: u8, g: u8, h: u8) -> u64 {
((a | i8, b: i8, c: i8, d: i8,
e: i8, f: i8, g: i8, h: i8) -> u64 {
u(a as u8, b as u8, c as u8, d as u8,
e as u8, f as u8, g as u8, h as u8)
}
fn hash<T: Hash>(t: &T) -> u64 {
let mut s = DefaultHasher::new();
t.hash(&mut s);
s.finish()
}
}
| as u64) << 56)
| ((b as u64) << 48)
| ((c as u64) << 40)
| ((d as u64) << 32)
| ((e as u64) << 24)
| ((f as u64) << 16)
| ((g as u64) << 8)
| (h as u64)
}
fn i(a: | identifier_body |
lib.rs | #![doc(html_root_url = "https://docs.rs/broadword/0.2.2")]
//! Broadword operations treat a `u64` as a parallel vector of eight `u8`s or `i8`s.
//! This module also provides a population count function [`count_ones`](fn.count_ones.html) and a
//! select function [`select1`](fn.select1.html).
//!
//! The algorithms here are from [Sebastiano Vigna, “Broadword Implementation of
//! Rank/Select Queries,”](http://sux.di.unimi.it/paper.pdf) but with several changes from
//! that work:
//!
//! - Vigna uses a 17-digit (68-bit) constant “0x0F0F0F0F0F0F0F0F0.” I believe
//! the correct constant is these 64 bits: 0x0F0F_0F0F_0F0F_0F0F.
//!
//! - Arithmetic operations are assumed to wrap on overflow. If this
//! were not the case, Algorithm 1 ([count_ones](fn.count_ones.html))
//! would overflow its last line, when multiplying by L₈.
//!
//! - Line 2 of Algorithm 2 should read
//!
//! ```
//! # let mut s: u64 = 0;
//! s = (s & 0x3333_3333_3333_3333) + ((s >> 2) & 0x3333_3333_3333_3333);
//! ```
//!
//! In the paper, the shifted `s` appears as `x`.
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
/// Has the lowest bit of every byte set: `0x0101_0101_0101_0101`.
pub const L8: u64 = 0x0101_0101_0101_0101;
/// Has the highest bit of every byte set: `0x8080_8080_8080_8080`.
pub const H8: u64 = 0x8080_8080_8080_8080;
/// Counts the number of ones in a `u64`.
///
/// Branchless. Uses the broadword algorithm from Vigna.
///
/// # Examples
///
/// ```
/// use broadword::count_ones;
///
/// assert_eq!( count_ones(0x0000_0000_0000_0000), 0 );
/// assert_eq!( count_ones(0x0000_0001_0000_0000), 1 );
/// assert_eq!( count_ones(0x0000_0001_0400_0000), 2 );
/// assert_eq!( count_ones(0x0000_0001_0600_0000), 3 );
/// assert_eq!( count_ones(0x3333_0001_0600_0000), 11 );
/// ```
#[inline]
pub fn count_ones(mut x: u64) -> usize {
x = x - ((x & 0xAAAA_AAAA_AAAA_AAAA) >> 1);
x = (x & 0x3333_3333_3333_3333) + ((x >> 2) & 0x3333_3333_3333_3333);
x = (x + (x >> 4)) & 0x0F0F_0F0F_0F0F_0F0F;
(x.wrapping_mul(L8) >> 56) as usize
}
/// Finds the index of the `r`th one bit in `x`.
///
/// Uses the broadword algorithm from Vigna.
/// Note that bits are numbered from least-significant to most.
///
/// # Examples
///
/// ```
/// use broadword::select1;
///
/// assert_eq!( select1(0, 0x0000_0000_0000_0000), None );
/// assert_eq!( select1(0, 0x0000_0000_0000_0001), Some(0) );
/// assert_eq!( select1(0, 0x0000_0000_0000_0002), Some(1) );
/// assert_eq!( select1(0, 0x0000_0000_0000_0004), Some(2) );
/// assert_eq!( select1(2, 0x0000_0000_0000_0004), None );
/// assert_eq!( select1(2, 0x0000_1010_1010_0114), Some(8) );
/// assert_eq!( select1(3, 0x0000_1010_1010_0114), Some(20) );
/// assert_eq!( select1(4, 0x0000_1010_1010_0114), Some(28) );
/// ```
#[inline]
pub fn select1(r: usize, x: u64) -> Option<usize> {
let result = select1_raw(r, x);
if result == 72 {None} else {Some(result)}
}
/// Finds the index of the `r`th one bit in `x`, returning 72 when not found.
///
/// Branchless. Uses the broadword algorithm from Vigna.
/// Note that bits are numbered from least-significant to most.
#[inline]
#[allow(clippy::many_single_char_names)]
pub fn select1_raw(r: usize, x: u64) -> usize {
let r = r as u64;
let mut s = x - ((x & 0xAAAA_AAAA_AAAA_AAAA) >> 1);
s = (s & 0x3333_3333_3333_3333) + ((s >> 2) & 0x3333_3333_3333_3333);
s = ((s + (s >> 4)) & 0x0F0F_0F0F_0F0F_0F0F).wrapping_mul(L8);
let b = (i_le8(s, r.wrapping_mul(L8)) >> 7).wrapping_mul(L8)>> 53 &!7;
let l = r - ((s << 8).wrapping_shr(b as u32) & 0xFF);
s = (u_nz8((x.wrapping_shr(b as u32) & 0xFF)
.wrapping_mul(L8) & 0x8040_2010_0804_0201) >> 7)
.wrapping_mul(L8);
(b + ((i_le8(s, l.wrapping_mul(L8)) >> 7).wrapping_mul(L8) >> 56)) as usize
}
/// Parallel ≤, treating a `u64` as a vector of 8 `u8`s.
///
/// Branchless.
///
/// # Examples
///
/// ```
/// use broadword::u_le8;
///
/// assert_eq!( u_le8(0x03_03_04_17_92_A0_A0_A1,
/// 0x04_03_03_92_17_A0_A0_A0),
/// 0x80_80_00_80_00_80_80_00 );
/// ```
#[inline]
pub fn u_le8(x: u64, y: u64) -> u64 {
((((y | H8) - (x &!H8)) | (x ^ y)) ^ (x &!y)) & H8
}
/// Parallel ≤, treating a `u64` as a vector of 8 `i8`s.
///
/// Branchless.
///
/// # Examples
///
/// ```
/// use broadword::i_le8;
///
/// assert_eq!( i_le8(0x03_03_04_00_FF_A0_A0_A1,
/// 0x04_03_03_FF_00_A0_A0_A0),
/// 0x80_80_00_00_80_80_80_00 );
/// ```
#[inline]
pub fn i_le8(x: u64, y: u64) -> u64 {
(((y | H8) - (x &!H8)) ^ x ^ y) & H8
}
/// Parallel >0, treating a `u64` as a vector of 8 `u8`s.
///
/// Branchless.
///
/// # Examples
///
/// ```
/// use broadword::u_nz8;
///
/// assert_eq!( u_nz8(0x00_01_A9_40_20_17_00_06),
/// 0x00_80_80_80_80_80_00_80 );
#[inline]
pub fn u_nz8(x: u64) -> u64 {
(((x | H8) - L8) | x) & H8
}
#[cfg(test)]
#[allow(clippy::many_single_char_names)]
mod test {
use std::hash::{Hash, Hasher};
use std::collections::hash_map::DefaultHasher;
use quickcheck::TestResult;
use super::*;
#[test]
fn count_ones_0() {
assert_eq!(0, count_ones(0));
}
#[test]
fn count_ones_1() {
assert_eq!(1, count_ones(1));
}
#[test]
fn count_ones_0000_0000_0000_0010() {
assert_eq!(1, count_ones(0x0000_0000_0000_0010));
}
#[test]
fn count_ones_1000_0000_0000_0000() {
assert_eq!(1, count_ones(0x1000_0000_0000_0000));
}
#[test]
fn count_ones_fff | ssert_eq!(64, count_ones(0xFFFF_FFFF_FFFF_FFFF));
}
fn count_ones_prop_base(word: u64) -> bool {
count_ones(word) == word.count_ones() as usize
}
quickcheck! {
fn count_ones_prop(word: u64) -> bool {
count_ones_prop_base(word)
}
fn count_ones_prop_hash(word: u64) -> bool {
count_ones_prop_base(hash(&word))
}
}
#[test]
fn select1_0_0() {
assert_eq!(None, select1(0, 0));
}
#[test]
fn select1_0_1() {
assert_eq!(Some(0), select1(0, 1));
}
#[test]
fn select1_0_2() {
assert_eq!(Some(1), select1(0, 2));
}
#[test]
fn select1_0_3() {
assert_eq!(Some(0), select1(0, 3));
}
#[test]
fn select1_1_2() {
assert_eq!(None, select1(1, 2));
}
#[test]
fn select1_1_3() {
assert_eq!(Some(1), select1(1, 3));
}
#[test]
fn select1_3_13() {
assert_eq!(None, select1(3, 0b1101));
}
fn select1_slow(r: usize, x: u64) -> Option<usize> {
let mut count = 0;
for index in 0.. 64 {
if (x >> index) & 1 == 1 {
count += 1;
}
if count == r + 1 {
return Some(index);
}
}
None
}
fn select1_prop_base(r: u8, x: u64) -> TestResult {
if r > 64 { return TestResult::discard(); }
TestResult::from_bool(
select1(r as usize, x) == select1_slow(r as usize, x))
}
quickcheck! {
fn select1_prop(r: u8, x: u64) -> TestResult {
select1_prop_base(r, x)
}
fn select1_prop_hash(r: u8, x: u64) -> TestResult {
select1_prop_base(r, hash(&x))
}
}
fn get_bits(x: u64, i: u8, n: u8) -> u64 {
let mask = if n == 64 {!0} else {(1 << n) - 1};
(x >> i) & mask
}
quickcheck! {
fn u_nz8_prop(argument: (u64, u64, u64, u64)) -> bool {
let n = hash(&argument);
let r = u_nz8(n);
for i in 0..8 {
let ni = get_bits(n, 8 * i, 8);
let ri = get_bits(r, 8 * i, 8);
if (ni!= 0)!= (ri == 0x80) {
return false;
}
}
true
}
}
#[test]
fn u_nz8_works() {
assert_eq!(b(0, 0, 0, 0, 0, 0, 0, 0),
u_nz8(u(0, 0, 0, 0, 0, 0, 0, 0)));
assert_eq!(b( 1, 1, 0, 1, 0, 1, 1, 1),
u_nz8(u(45, 12, 0, 129, 0, 3, 80, 1)));
assert_eq!(b(1, 1, 1, 1, 1, 1, 1, 1),
u_nz8(u(1, 2, 3, 4, 5, 6, 7, 8)));
assert_eq!(b( 1, 1, 1, 1, 0, 1, 1, 1),
u_nz8(0xFF_FF_FF_FF_00_FF_FF_FF));
}
fn u_le8_prop_base(n: u64, m: u64) -> bool {
let r = u_le8(n, m);
for i in 0..8 {
let ni = get_bits(n, 8 * i, 8);
let mi = get_bits(m, 8 * i, 8);
let ri = get_bits(r, 8 * i, 8);
if (ni <= mi)!= (ri == 0x80) {
return false;
}
}
true
}
quickcheck! {
fn u_le8_prop(n: u64, m: u64) -> bool {
u_le8_prop_base(n, m)
}
fn u_le8_prop_hashed(n: (u64, u64, u64, u64),
m: (u64, u64, u64, u64)) -> bool {
let n = hash(&n);
let m = hash(&m);
u_le8_prop_base(n, m)
}
}
#[test]
fn le8_works() {
assert_eq!(b( 1, 1, 1, 1, 0, 0, 0, 0),
i_le8(i(0, 0, 0, 0, 0, 0, 0, 0),
i( 3, 2, 1, 0, -1, -2, -3, -4)));
assert_eq!(b( 0, 0, 0, 1, 1, 1, 1, 1),
i_le8(i(3, 2, 1, 0, -1, -2, -3, -4),
i( 0, 0, 0, 0, 0, 0, 0, 0)));
assert_eq!(b( 0, 0, 1, 1, 1, 1, 1, 1),
i_le8(i(19, 18, 17, 16, 15, 0, -1, -2),
i(17, 17, 17, 17, 17, 17, 17, 17)));
assert_eq!(b( 1, 1, 0, 0, 0, 0, 0, 0),
i_le8(i(-9, -8, -7, 0, 1, 2, 3, 4),
i(-8, -8, -8, -8, -8, -8, -8, -8)));
assert_eq!(b( 0, 1, 0, 1, 1, 0, 1, 0),
i_le8(i(8, 3, 46, 0, 0, 0, -6, -1),
i( 7, 3, 24, 1, 0, -9, 5, -2)));
}
#[test]
fn u_le8_works() {
assert_eq!(b( 1, 1, 1, 1, 1, 1, 1, 1),
u_le8(u( 0, 0, 0, 0, 0, 0, 0, 0),
u( 7, 6, 5, 4, 3, 2, 1, 0)));
assert_eq!(b( 1, 0, 0, 0, 0, 0, 0, 0),
u_le8(u( 0, 1, 2, 3, 4, 5, 6, 7),
u( 0, 0, 0, 0, 0, 0, 0, 0)));
assert_eq!(b( 0, 0, 1, 1, 1, 1, 1, 1),
u_le8(u(19, 18, 17, 16, 15, 14, 13, 12),
u(17, 17, 17, 17, 17, 17, 17, 17)));
assert_eq!(b( 0, 1, 0, 1, 1, 0, 1, 0),
u_le8(u( 8, 3, 46, 0, 0, 9, 3, 2),
u( 7, 3, 24, 1, 0, 0, 5, 1)));
}
/// Helpers for creating u64s.
fn b(a: u64, b: u64, c: u64, d: u64,
e: u64, f: u64, g: u64, h: u64) -> u64 {
(a << 63) | (b << 55) | (c << 47) | (d << 39) |
(e << 31) | (f << 23) | (g << 15) | (h << 7)
}
fn u(a: u8, b: u8, c: u8, d: u8,
e: u8, f: u8, g: u8, h: u8) -> u64 {
((a as u64) << 56)
| ((b as u64) << 48)
| ((c as u64) << 40)
| ((d as u64) << 32)
| ((e as u64) << 24)
| ((f as u64) << 16)
| ((g as u64) << 8)
| (h as u64)
}
fn i(a: i8, b: i8, c: i8, d: i8,
e: i8, f: i8, g: i8, h: i8) -> u64 {
u(a as u8, b as u8, c as u8, d as u8,
e as u8, f as u8, g as u8, h as u8)
}
fn hash<T: Hash>(t: &T) -> u64 {
let mut s = DefaultHasher::new();
t.hash(&mut s);
s.finish()
}
}
| f_ffff_ffff_ffff() {
a | identifier_name |
lib.rs | #![doc(html_root_url = "https://docs.rs/broadword/0.2.2")]
//! Broadword operations treat a `u64` as a parallel vector of eight `u8`s or `i8`s.
//! This module also provides a population count function [`count_ones`](fn.count_ones.html) and a
//! select function [`select1`](fn.select1.html).
//!
//! The algorithms here are from [Sebastiano Vigna, “Broadword Implementation of
//! Rank/Select Queries,”](http://sux.di.unimi.it/paper.pdf) but with several changes from
//! that work:
//!
//! - Vigna uses a 17-digit (68-bit) constant “0x0F0F0F0F0F0F0F0F0.” I believe
//! the correct constant is these 64 bits: 0x0F0F_0F0F_0F0F_0F0F.
//!
//! - Arithmetic operations are assumed to wrap on overflow. If this
//! were not the case, Algorithm 1 ([count_ones](fn.count_ones.html))
//! would overflow its last line, when multiplying by L₈.
//!
//! - Line 2 of Algorithm 2 should read
//!
//! ```
//! # let mut s: u64 = 0;
//! s = (s & 0x3333_3333_3333_3333) + ((s >> 2) & 0x3333_3333_3333_3333);
//! ```
//!
//! In the paper, the shifted `s` appears as `x`.
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
/// Has the lowest bit of every byte set: `0x0101_0101_0101_0101`.
pub const L8: u64 = 0x0101_0101_0101_0101;
/// Has the highest bit of every byte set: `0x8080_8080_8080_8080`.
pub const H8: u64 = 0x8080_8080_8080_8080;
/// Counts the number of ones in a `u64`.
///
/// Branchless. Uses the broadword algorithm from Vigna.
///
/// # Examples
///
/// ```
/// use broadword::count_ones;
///
/// assert_eq!( count_ones(0x0000_0000_0000_0000), 0 );
/// assert_eq!( count_ones(0x0000_0001_0000_0000), 1 );
/// assert_eq!( count_ones(0x0000_0001_0400_0000), 2 );
/// assert_eq!( count_ones(0x0000_0001_0600_0000), 3 );
/// assert_eq!( count_ones(0x3333_0001_0600_0000), 11 );
/// ```
#[inline]
pub fn count_ones(mut x: u64) -> usize {
x = x - ((x & 0xAAAA_AAAA_AAAA_AAAA) >> 1);
x = (x & 0x3333_3333_3333_3333) + ((x >> 2) & 0x3333_3333_3333_3333);
x = (x + (x >> 4)) & 0x0F0F_0F0F_0F0F_0F0F;
(x.wrapping_mul(L8) >> 56) as usize
}
/// Finds the index of the `r`th one bit in `x`.
///
/// Uses the broadword algorithm from Vigna.
/// Note that bits are numbered from least-significant to most.
///
/// # Examples
///
/// ```
/// use broadword::select1;
///
/// assert_eq!( select1(0, 0x0000_0000_0000_0000), None );
/// assert_eq!( select1(0, 0x0000_0000_0000_0001), Some(0) );
/// assert_eq!( select1(0, 0x0000_0000_0000_0002), Some(1) );
/// assert_eq!( select1(0, 0x0000_0000_0000_0004), Some(2) );
/// assert_eq!( select1(2, 0x0000_0000_0000_0004), None );
/// assert_eq!( select1(2, 0x0000_1010_1010_0114), Some(8) );
/// assert_eq!( select1(3, 0x0000_1010_1010_0114), Some(20) );
/// assert_eq!( select1(4, 0x0000_1010_1010_0114), Some(28) );
/// ```
#[inline]
pub fn select1(r: usize, x: u64) -> Option<usize> {
let result = select1_raw(r, x);
if result == 72 {None} else {Some(resu | nds the index of the `r`th one bit in `x`, returning 72 when not found.
///
/// Branchless. Uses the broadword algorithm from Vigna.
/// Note that bits are numbered from least-significant to most.
#[inline]
#[allow(clippy::many_single_char_names)]
pub fn select1_raw(r: usize, x: u64) -> usize {
let r = r as u64;
let mut s = x - ((x & 0xAAAA_AAAA_AAAA_AAAA) >> 1);
s = (s & 0x3333_3333_3333_3333) + ((s >> 2) & 0x3333_3333_3333_3333);
s = ((s + (s >> 4)) & 0x0F0F_0F0F_0F0F_0F0F).wrapping_mul(L8);
let b = (i_le8(s, r.wrapping_mul(L8)) >> 7).wrapping_mul(L8)>> 53 &!7;
let l = r - ((s << 8).wrapping_shr(b as u32) & 0xFF);
s = (u_nz8((x.wrapping_shr(b as u32) & 0xFF)
.wrapping_mul(L8) & 0x8040_2010_0804_0201) >> 7)
.wrapping_mul(L8);
(b + ((i_le8(s, l.wrapping_mul(L8)) >> 7).wrapping_mul(L8) >> 56)) as usize
}
/// Parallel ≤, treating a `u64` as a vector of 8 `u8`s.
///
/// Branchless.
///
/// # Examples
///
/// ```
/// use broadword::u_le8;
///
/// assert_eq!( u_le8(0x03_03_04_17_92_A0_A0_A1,
/// 0x04_03_03_92_17_A0_A0_A0),
/// 0x80_80_00_80_00_80_80_00 );
/// ```
#[inline]
pub fn u_le8(x: u64, y: u64) -> u64 {
((((y | H8) - (x &!H8)) | (x ^ y)) ^ (x &!y)) & H8
}
/// Parallel ≤, treating a `u64` as a vector of 8 `i8`s.
///
/// Branchless.
///
/// # Examples
///
/// ```
/// use broadword::i_le8;
///
/// assert_eq!( i_le8(0x03_03_04_00_FF_A0_A0_A1,
/// 0x04_03_03_FF_00_A0_A0_A0),
/// 0x80_80_00_00_80_80_80_00 );
/// ```
#[inline]
pub fn i_le8(x: u64, y: u64) -> u64 {
(((y | H8) - (x &!H8)) ^ x ^ y) & H8
}
/// Parallel >0, treating a `u64` as a vector of 8 `u8`s.
///
/// Branchless.
///
/// # Examples
///
/// ```
/// use broadword::u_nz8;
///
/// assert_eq!( u_nz8(0x00_01_A9_40_20_17_00_06),
/// 0x00_80_80_80_80_80_00_80 );
#[inline]
pub fn u_nz8(x: u64) -> u64 {
(((x | H8) - L8) | x) & H8
}
#[cfg(test)]
#[allow(clippy::many_single_char_names)]
mod test {
use std::hash::{Hash, Hasher};
use std::collections::hash_map::DefaultHasher;
use quickcheck::TestResult;
use super::*;
#[test]
fn count_ones_0() {
assert_eq!(0, count_ones(0));
}
#[test]
fn count_ones_1() {
assert_eq!(1, count_ones(1));
}
#[test]
fn count_ones_0000_0000_0000_0010() {
assert_eq!(1, count_ones(0x0000_0000_0000_0010));
}
#[test]
fn count_ones_1000_0000_0000_0000() {
assert_eq!(1, count_ones(0x1000_0000_0000_0000));
}
#[test]
fn count_ones_ffff_ffff_ffff_ffff() {
assert_eq!(64, count_ones(0xFFFF_FFFF_FFFF_FFFF));
}
fn count_ones_prop_base(word: u64) -> bool {
count_ones(word) == word.count_ones() as usize
}
quickcheck! {
fn count_ones_prop(word: u64) -> bool {
count_ones_prop_base(word)
}
fn count_ones_prop_hash(word: u64) -> bool {
count_ones_prop_base(hash(&word))
}
}
#[test]
fn select1_0_0() {
assert_eq!(None, select1(0, 0));
}
#[test]
fn select1_0_1() {
assert_eq!(Some(0), select1(0, 1));
}
#[test]
fn select1_0_2() {
assert_eq!(Some(1), select1(0, 2));
}
#[test]
fn select1_0_3() {
assert_eq!(Some(0), select1(0, 3));
}
#[test]
fn select1_1_2() {
assert_eq!(None, select1(1, 2));
}
#[test]
fn select1_1_3() {
assert_eq!(Some(1), select1(1, 3));
}
#[test]
fn select1_3_13() {
assert_eq!(None, select1(3, 0b1101));
}
fn select1_slow(r: usize, x: u64) -> Option<usize> {
let mut count = 0;
for index in 0.. 64 {
if (x >> index) & 1 == 1 {
count += 1;
}
if count == r + 1 {
return Some(index);
}
}
None
}
fn select1_prop_base(r: u8, x: u64) -> TestResult {
if r > 64 { return TestResult::discard(); }
TestResult::from_bool(
select1(r as usize, x) == select1_slow(r as usize, x))
}
quickcheck! {
fn select1_prop(r: u8, x: u64) -> TestResult {
select1_prop_base(r, x)
}
fn select1_prop_hash(r: u8, x: u64) -> TestResult {
select1_prop_base(r, hash(&x))
}
}
fn get_bits(x: u64, i: u8, n: u8) -> u64 {
let mask = if n == 64 {!0} else {(1 << n) - 1};
(x >> i) & mask
}
quickcheck! {
fn u_nz8_prop(argument: (u64, u64, u64, u64)) -> bool {
let n = hash(&argument);
let r = u_nz8(n);
for i in 0..8 {
let ni = get_bits(n, 8 * i, 8);
let ri = get_bits(r, 8 * i, 8);
if (ni!= 0)!= (ri == 0x80) {
return false;
}
}
true
}
}
#[test]
fn u_nz8_works() {
assert_eq!(b(0, 0, 0, 0, 0, 0, 0, 0),
u_nz8(u(0, 0, 0, 0, 0, 0, 0, 0)));
assert_eq!(b( 1, 1, 0, 1, 0, 1, 1, 1),
u_nz8(u(45, 12, 0, 129, 0, 3, 80, 1)));
assert_eq!(b(1, 1, 1, 1, 1, 1, 1, 1),
u_nz8(u(1, 2, 3, 4, 5, 6, 7, 8)));
assert_eq!(b( 1, 1, 1, 1, 0, 1, 1, 1),
u_nz8(0xFF_FF_FF_FF_00_FF_FF_FF));
}
fn u_le8_prop_base(n: u64, m: u64) -> bool {
let r = u_le8(n, m);
for i in 0..8 {
let ni = get_bits(n, 8 * i, 8);
let mi = get_bits(m, 8 * i, 8);
let ri = get_bits(r, 8 * i, 8);
if (ni <= mi)!= (ri == 0x80) {
return false;
}
}
true
}
quickcheck! {
fn u_le8_prop(n: u64, m: u64) -> bool {
u_le8_prop_base(n, m)
}
fn u_le8_prop_hashed(n: (u64, u64, u64, u64),
m: (u64, u64, u64, u64)) -> bool {
let n = hash(&n);
let m = hash(&m);
u_le8_prop_base(n, m)
}
}
#[test]
fn le8_works() {
assert_eq!(b( 1, 1, 1, 1, 0, 0, 0, 0),
i_le8(i(0, 0, 0, 0, 0, 0, 0, 0),
i( 3, 2, 1, 0, -1, -2, -3, -4)));
assert_eq!(b( 0, 0, 0, 1, 1, 1, 1, 1),
i_le8(i(3, 2, 1, 0, -1, -2, -3, -4),
i( 0, 0, 0, 0, 0, 0, 0, 0)));
assert_eq!(b( 0, 0, 1, 1, 1, 1, 1, 1),
i_le8(i(19, 18, 17, 16, 15, 0, -1, -2),
i(17, 17, 17, 17, 17, 17, 17, 17)));
assert_eq!(b( 1, 1, 0, 0, 0, 0, 0, 0),
i_le8(i(-9, -8, -7, 0, 1, 2, 3, 4),
i(-8, -8, -8, -8, -8, -8, -8, -8)));
assert_eq!(b( 0, 1, 0, 1, 1, 0, 1, 0),
i_le8(i(8, 3, 46, 0, 0, 0, -6, -1),
i( 7, 3, 24, 1, 0, -9, 5, -2)));
}
#[test]
fn u_le8_works() {
assert_eq!(b( 1, 1, 1, 1, 1, 1, 1, 1),
u_le8(u( 0, 0, 0, 0, 0, 0, 0, 0),
u( 7, 6, 5, 4, 3, 2, 1, 0)));
assert_eq!(b( 1, 0, 0, 0, 0, 0, 0, 0),
u_le8(u( 0, 1, 2, 3, 4, 5, 6, 7),
u( 0, 0, 0, 0, 0, 0, 0, 0)));
assert_eq!(b( 0, 0, 1, 1, 1, 1, 1, 1),
u_le8(u(19, 18, 17, 16, 15, 14, 13, 12),
u(17, 17, 17, 17, 17, 17, 17, 17)));
assert_eq!(b( 0, 1, 0, 1, 1, 0, 1, 0),
u_le8(u( 8, 3, 46, 0, 0, 9, 3, 2),
u( 7, 3, 24, 1, 0, 0, 5, 1)));
}
/// Helpers for creating u64s.
fn b(a: u64, b: u64, c: u64, d: u64,
e: u64, f: u64, g: u64, h: u64) -> u64 {
(a << 63) | (b << 55) | (c << 47) | (d << 39) |
(e << 31) | (f << 23) | (g << 15) | (h << 7)
}
fn u(a: u8, b: u8, c: u8, d: u8,
e: u8, f: u8, g: u8, h: u8) -> u64 {
((a as u64) << 56)
| ((b as u64) << 48)
| ((c as u64) << 40)
| ((d as u64) << 32)
| ((e as u64) << 24)
| ((f as u64) << 16)
| ((g as u64) << 8)
| (h as u64)
}
fn i(a: i8, b: i8, c: i8, d: i8,
e: i8, f: i8, g: i8, h: i8) -> u64 {
u(a as u8, b as u8, c as u8, d as u8,
e as u8, f as u8, g as u8, h as u8)
}
fn hash<T: Hash>(t: &T) -> u64 {
let mut s = DefaultHasher::new();
t.hash(&mut s);
s.finish()
}
}
| lt)}
}
/// Fi | conditional_block |
uart.rs | //! Universal asynchronous receiver/transmitter with EasyDMA (UARTE)
//!
//! The driver provides only tranmission functionlity
//!
//! Author
//! -------------------
//!
//! * Author: Niklas Adolfsson <[email protected]>
//! * Date: March 10 2018
use core::cell::Cell;
use kernel;
use kernel::common::regs::{ReadOnly, ReadWrite, WriteOnly};
use nrf5x::pinmux;
const UARTE_BASE: u32 = 0x40002000;
static mut BYTE: u8 = 0;
#[repr(C)]
struct UarteRegisters {
pub task_startrx: WriteOnly<u32, Task::Register>, // 0x000
pub task_stoprx: WriteOnly<u32, Task::Register>, // 0x004
pub task_starttx: WriteOnly<u32, Task::Register>, // 0x008
pub task_stoptx: WriteOnly<u32, Task::Register>, // 0x00c
_reserved1: [u32; 7], // 0x010-0x02c
pub task_flush_rx: WriteOnly<u32, Task::Register>, // 0x02c
_reserved2: [u32; 52], // 0x030-0x100
pub event_cts: ReadWrite<u32, Event::Register>, // 0x100-0x104
pub event_ncts: ReadWrite<u32, Event::Register>, // 0x104-0x108
_reserved3: [u32; 2], // 0x108-0x110
pub event_endrx: ReadWrite<u32, Event::Register>, // 0x110-0x114
_reserved4: [u32; 3], // 0x114-0x120
pub event_endtx: ReadWrite<u32, Event::Register>, // 0x120-0x124
pub event_error: ReadWrite<u32, Event::Register>, // 0x124-0x128
_reserved6: [u32; 7], // 0x128-0x144
pub event_rxto: ReadWrite<u32, Event::Register>, // 0x144-0x148
_reserved7: [u32; 1], // 0x148-0x14C
pub event_rxstarted: ReadWrite<u32, Event::Register>, // 0x14C-0x150
pub event_txstarted: ReadWrite<u32, Event::Register>, // 0x150-0x154
_reserved8: [u32; 1], // 0x154-0x158
pub event_txstopped: ReadWrite<u32, Event::Register>, // 0x158-0x15c
_reserved9: [u32; 41], // 0x15c-0x200
pub shorts: ReadWrite<u32, Shorts::Register>, // 0x200-0x204
_reserved10: [u32; 64], // 0x204-0x304
pub intenset: ReadWrite<u32, Interrupt::Register>, // 0x304-0x308
pub intenclr: ReadWrite<u32, Interrupt::Register>, // 0x308-0x30C
_reserved11: [u32; 93], // 0x30C-0x480
pub errorsrc: ReadWrite<u32, ErrorSrc::Register>, // 0x480-0x484
_reserved12: [u32; 31], // 0x484-0x500
pub enable: ReadWrite<u32, Uart::Register>, // 0x500-0x504
_reserved13: [u32; 1], // 0x504-0x508
pub pselrts: ReadWrite<u32, Psel::Register>, // 0x508-0x50c
pub pseltxd: ReadWrite<u32, Psel::Register>, // 0x50c-0x510
pub pselcts: ReadWrite<u32, Psel::Register>, // 0x510-0x514
pub pselrxd: ReadWrite<u32, Psel::Register>, // 0x514-0x518
_reserved14: [u32; 3], // 0x518-0x524
pub baudrate: ReadWrite<u32, Baudrate::Register>, // 0x524-0x528
_reserved15: [u32; 3], // 0x528-0x534
pub rxd_ptr: ReadWrite<u32, Pointer::Register>, // 0x534-0x538
pub rxd_maxcnt: ReadWrite<u32, Counter::Register>, // 0x538-0x53c
pub rxd_amount: ReadOnly<u32, Counter::Register>, // 0x53c-0x540
_reserved16: [u32; 1], // 0x540-0x544
pub txd_ptr: ReadWrite<u32, Pointer::Register>, // 0x544-0x548
pub txd_maxcnt: ReadWrite<u32, Counter::Register>, // 0x548-0x54c
pub txd_amount: ReadOnly<u32, Counter::Register>, // 0x54c-0x550
_reserved17: [u32; 7], // 0x550-0x56C
pub config: ReadWrite<u32, Config::Register>, // 0x56C-0x570
}
#[cfg_attr(rustfmt, rustfmt_skip)]
register_bitfields! [u32,
/// Start task
Task [
ENABLE OFFSET(0) NUMBITS(1)
],
/// Read event
Event [
READY OFFSET(0) NUMBITS(1)
],
/// Shortcuts
Shorts [
// Shortcut between ENDRX and STARTRX
ENDRX_STARTRX OFFSET(5) NUMBITS(1),
// Shortcut between ENDRX and STOPRX
ENDRX_STOPRX OFFSET(6) NUMBITS(1)
],
/// UART Interrupts
Interrupt [
CTS OFFSET(0) NUMBITS(1),
NCTS OFFSET(1) NUMBITS(1),
ENDRX OFFSET(4) NUMBITS(1),
ENDTX OFFSET(8) NUMBITS(1),
ERROR OFFSET(9) NUMBITS(1),
RXTO OFFSET(17) NUMBITS(1),
RXSTARTED OFFSET(19) NUMBITS(1),
TXSTARTED OFFSET(20) NUMBITS(1),
TXSTOPPED OFFSET(22) NUMBITS(1)
],
/// UART Errors
ErrorSrc [
OVERRUN OFFSET(0) NUMBITS(1),
PARITY OFFSET(1) NUMBITS(1),
FRAMING OFFSET(2) NUMBITS(1),
BREAK OFFSET(3) NUMBITS(1)
],
/// Enable UART
Uart [
ENABLE OFFSET(0) NUMBITS(4) [
ON = 8,
OFF = 0
]
],
/// Pin select
Psel [
// Pin number
PIN OFFSET(0) NUMBITS(5),
// Connect/Disconnect
CONNECT OFFSET(31) NUMBITS(1)
],
/// Baudrate
Baudrate [
BAUDRAUTE OFFSET(0) NUMBITS(32)
],
/// DMA pointer
Pointer [
POINTER OFFSET(0) NUMBITS(32)
],
/// Counter value
Counter [
COUNTER OFFSET(0) NUMBITS(8)
],
/// Configuration of parity and flow control
Config [
HWFC OFFSET(0) NUMBITS(1),
PARITY OFFSET(1) NUMBITS(3)
]
];
/// UARTE
// It should never be instanced outside this module but because a static mutable reference to it
// is exported outside this module it must be `pub`
pub struct | {
regs: *const UarteRegisters,
client: Cell<Option<&'static kernel::hil::uart::Client>>,
buffer: kernel::common::take_cell::TakeCell<'static, [u8]>,
remaining_bytes: Cell<usize>,
offset: Cell<usize>,
}
#[derive(Copy, Clone)]
pub struct UARTParams {
pub baud_rate: u32,
}
/// UARTE0 handle
// This should only be accessed by the reset_handler on startup
pub static mut UARTE0: Uarte = Uarte::new();
impl Uarte {
/// Constructor
pub const fn new() -> Uarte {
Uarte {
regs: UARTE_BASE as *const UarteRegisters,
client: Cell::new(None),
buffer: kernel::common::take_cell::TakeCell::empty(),
remaining_bytes: Cell::new(0),
offset: Cell::new(0),
}
}
/// Configure which pins the UART should use for txd, rxd, cts and rts
pub fn configure(
&self,
txd: pinmux::Pinmux,
rxd: pinmux::Pinmux,
cts: pinmux::Pinmux,
rts: pinmux::Pinmux,
) {
let regs = unsafe { &*self.regs };
regs.pseltxd.write(Psel::PIN.val(txd.into()));
regs.pselrxd.write(Psel::PIN.val(rxd.into()));
regs.pselcts.write(Psel::PIN.val(cts.into()));
regs.pselrts.write(Psel::PIN.val(rts.into()));
}
fn set_baud_rate(&self, baud_rate: u32) {
let regs = unsafe { &*self.regs };
match baud_rate {
1200 => regs.baudrate.set(0x0004F000),
2400 => regs.baudrate.set(0x0009D000),
4800 => regs.baudrate.set(0x0013B000),
9600 => regs.baudrate.set(0x00275000),
14400 => regs.baudrate.set(0x003AF000),
19200 => regs.baudrate.set(0x004EA000),
28800 => regs.baudrate.set(0x0075C000),
38400 => regs.baudrate.set(0x009D0000),
57600 => regs.baudrate.set(0x00EB0000),
76800 => regs.baudrate.set(0x013A9000),
115200 => regs.baudrate.set(0x01D60000),
230400 => regs.baudrate.set(0x03B00000),
250000 => regs.baudrate.set(0x04000000),
460800 => regs.baudrate.set(0x07400000),
921600 => regs.baudrate.set(0x0F000000),
1000000 => regs.baudrate.set(0x10000000),
_ => regs.baudrate.set(0x01D60000), //setting default to 115200
}
}
// Enable UART peripheral, this need to disabled for low power applications
fn enable_uart(&self) {
let regs = unsafe { &*self.regs };
regs.enable.write(Uart::ENABLE::ON);
}
#[allow(dead_code)]
fn disable_uart(&self) {
let regs = unsafe { &*self.regs };
regs.enable.write(Uart::ENABLE::OFF);
}
#[allow(dead_code)]
fn enable_rx_interrupts(&self) {
let regs = unsafe { &*self.regs };
regs.intenset.write(Interrupt::ENDRX::SET);
}
fn enable_tx_interrupts(&self) {
let regs = unsafe { &*self.regs };
regs.intenset.write(Interrupt::ENDTX::SET);
}
#[allow(dead_code)]
fn disable_rx_interrupts(&self) {
let regs = unsafe { &*self.regs };
regs.intenclr.write(Interrupt::ENDRX::SET);
}
fn disable_tx_interrupts(&self) {
let regs = unsafe { &*self.regs };
regs.intenclr.write(Interrupt::ENDTX::SET);
}
/// UART interrupt handler that only listens to `tx_end` events
#[inline(never)]
pub fn handle_interrupt(&mut self) {
// disable interrupts
self.disable_tx_interrupts();
let regs = unsafe { &*self.regs };
if self.tx_ready() {
regs.event_endtx.write(Event::READY::CLEAR);
let tx_bytes = regs.txd_amount.get() as usize;
let rem = self.remaining_bytes.get();
// More bytes transmitted than requested `return silently`
// Cause probably a hardware fault
// FIXME: Progate error to the capsule
if tx_bytes > rem {
debug!("error more bytes than requested\r\n");
return;
}
self.remaining_bytes.set(rem - tx_bytes);
self.offset.set(tx_bytes);
if self.remaining_bytes.get() == 0 {
// Signal client write done
self.client.get().map(|client| {
self.buffer.take().map(|buffer| {
client.transmit_complete(buffer, kernel::hil::uart::Error::CommandComplete);
});
});
}
// Not all bytes have been transmitted then update offset and continue transmitting
else {
self.set_dma_pointer_to_buffer();
regs.task_starttx.write(Task::ENABLE::SET);
self.enable_tx_interrupts();
}
}
}
/// Transmit one byte at the time and the client is resposible for polling
/// This is used by the panic handler
pub unsafe fn send_byte(&self, byte: u8) {
let regs = &*self.regs;
self.remaining_bytes.set(1);
regs.event_endtx.write(Event::READY::CLEAR);
// precaution: copy value into variable with static lifetime
BYTE = byte;
regs.txd_ptr.set((&BYTE as *const u8) as u32);
regs.txd_maxcnt.write(Counter::COUNTER.val(1));
regs.task_starttx.write(Task::ENABLE::SET);
}
/// Check if the UART tranmission is done
pub fn tx_ready(&self) -> bool {
let regs = unsafe { &*self.regs };
regs.event_endtx.is_set(Event::READY)
}
fn set_dma_pointer_to_buffer(&self) {
let regs = unsafe { &*self.regs };
self.buffer.map(|buffer| {
regs.txd_ptr
.set(buffer[self.offset.get()..].as_ptr() as u32);
});
}
}
impl kernel::hil::uart::UART for Uarte {
fn set_client(&self, client: &'static kernel::hil::uart::Client) {
self.client.set(Some(client));
}
fn init(&self, params: kernel::hil::uart::UARTParams) {
self.enable_uart();
self.set_baud_rate(params.baud_rate);
}
fn transmit(&self, tx_data: &'static mut [u8], tx_len: usize) {
let regs = unsafe { &*self.regs };
if tx_len == 0 {
return;
}
self.remaining_bytes.set(tx_len);
self.offset.set(0);
self.buffer.replace(tx_data);
self.set_dma_pointer_to_buffer();
regs.txd_maxcnt.write(Counter::COUNTER.val(tx_len as u32));
regs.task_stoptx.write(Task::ENABLE::SET);
regs.task_starttx.write(Task::ENABLE::SET);
self.enable_tx_interrupts();
}
#[allow(unused)]
fn receive(&self, rx_buffer: &'static mut [u8], rx_len: usize) {
unimplemented!()
}
}
| Uarte | identifier_name |
uart.rs | //! Universal asynchronous receiver/transmitter with EasyDMA (UARTE)
//!
//! The driver provides only tranmission functionlity
//!
//! Author
//! -------------------
//!
//! * Author: Niklas Adolfsson <[email protected]>
//! * Date: March 10 2018
use core::cell::Cell;
use kernel;
use kernel::common::regs::{ReadOnly, ReadWrite, WriteOnly};
use nrf5x::pinmux;
const UARTE_BASE: u32 = 0x40002000;
static mut BYTE: u8 = 0;
#[repr(C)]
struct UarteRegisters {
pub task_startrx: WriteOnly<u32, Task::Register>, // 0x000
pub task_stoprx: WriteOnly<u32, Task::Register>, // 0x004
pub task_starttx: WriteOnly<u32, Task::Register>, // 0x008
pub task_stoptx: WriteOnly<u32, Task::Register>, // 0x00c
_reserved1: [u32; 7], // 0x010-0x02c
pub task_flush_rx: WriteOnly<u32, Task::Register>, // 0x02c
_reserved2: [u32; 52], // 0x030-0x100
pub event_cts: ReadWrite<u32, Event::Register>, // 0x100-0x104
pub event_ncts: ReadWrite<u32, Event::Register>, // 0x104-0x108
_reserved3: [u32; 2], // 0x108-0x110
pub event_endrx: ReadWrite<u32, Event::Register>, // 0x110-0x114
_reserved4: [u32; 3], // 0x114-0x120
pub event_endtx: ReadWrite<u32, Event::Register>, // 0x120-0x124
pub event_error: ReadWrite<u32, Event::Register>, // 0x124-0x128
_reserved6: [u32; 7], // 0x128-0x144
pub event_rxto: ReadWrite<u32, Event::Register>, // 0x144-0x148
_reserved7: [u32; 1], // 0x148-0x14C
pub event_rxstarted: ReadWrite<u32, Event::Register>, // 0x14C-0x150
pub event_txstarted: ReadWrite<u32, Event::Register>, // 0x150-0x154
_reserved8: [u32; 1], // 0x154-0x158
pub event_txstopped: ReadWrite<u32, Event::Register>, // 0x158-0x15c
_reserved9: [u32; 41], // 0x15c-0x200
pub shorts: ReadWrite<u32, Shorts::Register>, // 0x200-0x204
_reserved10: [u32; 64], // 0x204-0x304
pub intenset: ReadWrite<u32, Interrupt::Register>, // 0x304-0x308
pub intenclr: ReadWrite<u32, Interrupt::Register>, // 0x308-0x30C
_reserved11: [u32; 93], // 0x30C-0x480 | pub pselrts: ReadWrite<u32, Psel::Register>, // 0x508-0x50c
pub pseltxd: ReadWrite<u32, Psel::Register>, // 0x50c-0x510
pub pselcts: ReadWrite<u32, Psel::Register>, // 0x510-0x514
pub pselrxd: ReadWrite<u32, Psel::Register>, // 0x514-0x518
_reserved14: [u32; 3], // 0x518-0x524
pub baudrate: ReadWrite<u32, Baudrate::Register>, // 0x524-0x528
_reserved15: [u32; 3], // 0x528-0x534
pub rxd_ptr: ReadWrite<u32, Pointer::Register>, // 0x534-0x538
pub rxd_maxcnt: ReadWrite<u32, Counter::Register>, // 0x538-0x53c
pub rxd_amount: ReadOnly<u32, Counter::Register>, // 0x53c-0x540
_reserved16: [u32; 1], // 0x540-0x544
pub txd_ptr: ReadWrite<u32, Pointer::Register>, // 0x544-0x548
pub txd_maxcnt: ReadWrite<u32, Counter::Register>, // 0x548-0x54c
pub txd_amount: ReadOnly<u32, Counter::Register>, // 0x54c-0x550
_reserved17: [u32; 7], // 0x550-0x56C
pub config: ReadWrite<u32, Config::Register>, // 0x56C-0x570
}
#[cfg_attr(rustfmt, rustfmt_skip)]
register_bitfields! [u32,
/// Start task
Task [
ENABLE OFFSET(0) NUMBITS(1)
],
/// Read event
Event [
READY OFFSET(0) NUMBITS(1)
],
/// Shortcuts
Shorts [
// Shortcut between ENDRX and STARTRX
ENDRX_STARTRX OFFSET(5) NUMBITS(1),
// Shortcut between ENDRX and STOPRX
ENDRX_STOPRX OFFSET(6) NUMBITS(1)
],
/// UART Interrupts
Interrupt [
CTS OFFSET(0) NUMBITS(1),
NCTS OFFSET(1) NUMBITS(1),
ENDRX OFFSET(4) NUMBITS(1),
ENDTX OFFSET(8) NUMBITS(1),
ERROR OFFSET(9) NUMBITS(1),
RXTO OFFSET(17) NUMBITS(1),
RXSTARTED OFFSET(19) NUMBITS(1),
TXSTARTED OFFSET(20) NUMBITS(1),
TXSTOPPED OFFSET(22) NUMBITS(1)
],
/// UART Errors
ErrorSrc [
OVERRUN OFFSET(0) NUMBITS(1),
PARITY OFFSET(1) NUMBITS(1),
FRAMING OFFSET(2) NUMBITS(1),
BREAK OFFSET(3) NUMBITS(1)
],
/// Enable UART
Uart [
ENABLE OFFSET(0) NUMBITS(4) [
ON = 8,
OFF = 0
]
],
/// Pin select
Psel [
// Pin number
PIN OFFSET(0) NUMBITS(5),
// Connect/Disconnect
CONNECT OFFSET(31) NUMBITS(1)
],
/// Baudrate
Baudrate [
BAUDRAUTE OFFSET(0) NUMBITS(32)
],
/// DMA pointer
Pointer [
POINTER OFFSET(0) NUMBITS(32)
],
/// Counter value
Counter [
COUNTER OFFSET(0) NUMBITS(8)
],
/// Configuration of parity and flow control
Config [
HWFC OFFSET(0) NUMBITS(1),
PARITY OFFSET(1) NUMBITS(3)
]
];
/// UARTE
// It should never be instanced outside this module but because a static mutable reference to it
// is exported outside this module it must be `pub`
pub struct Uarte {
regs: *const UarteRegisters,
client: Cell<Option<&'static kernel::hil::uart::Client>>,
buffer: kernel::common::take_cell::TakeCell<'static, [u8]>,
remaining_bytes: Cell<usize>,
offset: Cell<usize>,
}
#[derive(Copy, Clone)]
pub struct UARTParams {
pub baud_rate: u32,
}
/// UARTE0 handle
// This should only be accessed by the reset_handler on startup
pub static mut UARTE0: Uarte = Uarte::new();
impl Uarte {
/// Constructor
pub const fn new() -> Uarte {
Uarte {
regs: UARTE_BASE as *const UarteRegisters,
client: Cell::new(None),
buffer: kernel::common::take_cell::TakeCell::empty(),
remaining_bytes: Cell::new(0),
offset: Cell::new(0),
}
}
/// Configure which pins the UART should use for txd, rxd, cts and rts
pub fn configure(
&self,
txd: pinmux::Pinmux,
rxd: pinmux::Pinmux,
cts: pinmux::Pinmux,
rts: pinmux::Pinmux,
) {
let regs = unsafe { &*self.regs };
regs.pseltxd.write(Psel::PIN.val(txd.into()));
regs.pselrxd.write(Psel::PIN.val(rxd.into()));
regs.pselcts.write(Psel::PIN.val(cts.into()));
regs.pselrts.write(Psel::PIN.val(rts.into()));
}
fn set_baud_rate(&self, baud_rate: u32) {
let regs = unsafe { &*self.regs };
match baud_rate {
1200 => regs.baudrate.set(0x0004F000),
2400 => regs.baudrate.set(0x0009D000),
4800 => regs.baudrate.set(0x0013B000),
9600 => regs.baudrate.set(0x00275000),
14400 => regs.baudrate.set(0x003AF000),
19200 => regs.baudrate.set(0x004EA000),
28800 => regs.baudrate.set(0x0075C000),
38400 => regs.baudrate.set(0x009D0000),
57600 => regs.baudrate.set(0x00EB0000),
76800 => regs.baudrate.set(0x013A9000),
115200 => regs.baudrate.set(0x01D60000),
230400 => regs.baudrate.set(0x03B00000),
250000 => regs.baudrate.set(0x04000000),
460800 => regs.baudrate.set(0x07400000),
921600 => regs.baudrate.set(0x0F000000),
1000000 => regs.baudrate.set(0x10000000),
_ => regs.baudrate.set(0x01D60000), //setting default to 115200
}
}
// Enable UART peripheral, this need to disabled for low power applications
fn enable_uart(&self) {
let regs = unsafe { &*self.regs };
regs.enable.write(Uart::ENABLE::ON);
}
#[allow(dead_code)]
fn disable_uart(&self) {
let regs = unsafe { &*self.regs };
regs.enable.write(Uart::ENABLE::OFF);
}
#[allow(dead_code)]
fn enable_rx_interrupts(&self) {
let regs = unsafe { &*self.regs };
regs.intenset.write(Interrupt::ENDRX::SET);
}
fn enable_tx_interrupts(&self) {
let regs = unsafe { &*self.regs };
regs.intenset.write(Interrupt::ENDTX::SET);
}
#[allow(dead_code)]
fn disable_rx_interrupts(&self) {
let regs = unsafe { &*self.regs };
regs.intenclr.write(Interrupt::ENDRX::SET);
}
fn disable_tx_interrupts(&self) {
let regs = unsafe { &*self.regs };
regs.intenclr.write(Interrupt::ENDTX::SET);
}
/// UART interrupt handler that only listens to `tx_end` events
#[inline(never)]
pub fn handle_interrupt(&mut self) {
// disable interrupts
self.disable_tx_interrupts();
let regs = unsafe { &*self.regs };
if self.tx_ready() {
regs.event_endtx.write(Event::READY::CLEAR);
let tx_bytes = regs.txd_amount.get() as usize;
let rem = self.remaining_bytes.get();
// More bytes transmitted than requested `return silently`
// Cause probably a hardware fault
// FIXME: Progate error to the capsule
if tx_bytes > rem {
debug!("error more bytes than requested\r\n");
return;
}
self.remaining_bytes.set(rem - tx_bytes);
self.offset.set(tx_bytes);
if self.remaining_bytes.get() == 0 {
// Signal client write done
self.client.get().map(|client| {
self.buffer.take().map(|buffer| {
client.transmit_complete(buffer, kernel::hil::uart::Error::CommandComplete);
});
});
}
// Not all bytes have been transmitted then update offset and continue transmitting
else {
self.set_dma_pointer_to_buffer();
regs.task_starttx.write(Task::ENABLE::SET);
self.enable_tx_interrupts();
}
}
}
/// Transmit one byte at the time and the client is resposible for polling
/// This is used by the panic handler
pub unsafe fn send_byte(&self, byte: u8) {
let regs = &*self.regs;
self.remaining_bytes.set(1);
regs.event_endtx.write(Event::READY::CLEAR);
// precaution: copy value into variable with static lifetime
BYTE = byte;
regs.txd_ptr.set((&BYTE as *const u8) as u32);
regs.txd_maxcnt.write(Counter::COUNTER.val(1));
regs.task_starttx.write(Task::ENABLE::SET);
}
/// Check if the UART tranmission is done
pub fn tx_ready(&self) -> bool {
let regs = unsafe { &*self.regs };
regs.event_endtx.is_set(Event::READY)
}
fn set_dma_pointer_to_buffer(&self) {
let regs = unsafe { &*self.regs };
self.buffer.map(|buffer| {
regs.txd_ptr
.set(buffer[self.offset.get()..].as_ptr() as u32);
});
}
}
impl kernel::hil::uart::UART for Uarte {
fn set_client(&self, client: &'static kernel::hil::uart::Client) {
self.client.set(Some(client));
}
fn init(&self, params: kernel::hil::uart::UARTParams) {
self.enable_uart();
self.set_baud_rate(params.baud_rate);
}
fn transmit(&self, tx_data: &'static mut [u8], tx_len: usize) {
let regs = unsafe { &*self.regs };
if tx_len == 0 {
return;
}
self.remaining_bytes.set(tx_len);
self.offset.set(0);
self.buffer.replace(tx_data);
self.set_dma_pointer_to_buffer();
regs.txd_maxcnt.write(Counter::COUNTER.val(tx_len as u32));
regs.task_stoptx.write(Task::ENABLE::SET);
regs.task_starttx.write(Task::ENABLE::SET);
self.enable_tx_interrupts();
}
#[allow(unused)]
fn receive(&self, rx_buffer: &'static mut [u8], rx_len: usize) {
unimplemented!()
}
} | pub errorsrc: ReadWrite<u32, ErrorSrc::Register>, // 0x480-0x484
_reserved12: [u32; 31], // 0x484-0x500
pub enable: ReadWrite<u32, Uart::Register>, // 0x500-0x504
_reserved13: [u32; 1], // 0x504-0x508 | random_line_split |
uart.rs | //! Universal asynchronous receiver/transmitter with EasyDMA (UARTE)
//!
//! The driver provides only tranmission functionlity
//!
//! Author
//! -------------------
//!
//! * Author: Niklas Adolfsson <[email protected]>
//! * Date: March 10 2018
use core::cell::Cell;
use kernel;
use kernel::common::regs::{ReadOnly, ReadWrite, WriteOnly};
use nrf5x::pinmux;
const UARTE_BASE: u32 = 0x40002000;
static mut BYTE: u8 = 0;
#[repr(C)]
struct UarteRegisters {
pub task_startrx: WriteOnly<u32, Task::Register>, // 0x000
pub task_stoprx: WriteOnly<u32, Task::Register>, // 0x004
pub task_starttx: WriteOnly<u32, Task::Register>, // 0x008
pub task_stoptx: WriteOnly<u32, Task::Register>, // 0x00c
_reserved1: [u32; 7], // 0x010-0x02c
pub task_flush_rx: WriteOnly<u32, Task::Register>, // 0x02c
_reserved2: [u32; 52], // 0x030-0x100
pub event_cts: ReadWrite<u32, Event::Register>, // 0x100-0x104
pub event_ncts: ReadWrite<u32, Event::Register>, // 0x104-0x108
_reserved3: [u32; 2], // 0x108-0x110
pub event_endrx: ReadWrite<u32, Event::Register>, // 0x110-0x114
_reserved4: [u32; 3], // 0x114-0x120
pub event_endtx: ReadWrite<u32, Event::Register>, // 0x120-0x124
pub event_error: ReadWrite<u32, Event::Register>, // 0x124-0x128
_reserved6: [u32; 7], // 0x128-0x144
pub event_rxto: ReadWrite<u32, Event::Register>, // 0x144-0x148
_reserved7: [u32; 1], // 0x148-0x14C
pub event_rxstarted: ReadWrite<u32, Event::Register>, // 0x14C-0x150
pub event_txstarted: ReadWrite<u32, Event::Register>, // 0x150-0x154
_reserved8: [u32; 1], // 0x154-0x158
pub event_txstopped: ReadWrite<u32, Event::Register>, // 0x158-0x15c
_reserved9: [u32; 41], // 0x15c-0x200
pub shorts: ReadWrite<u32, Shorts::Register>, // 0x200-0x204
_reserved10: [u32; 64], // 0x204-0x304
pub intenset: ReadWrite<u32, Interrupt::Register>, // 0x304-0x308
pub intenclr: ReadWrite<u32, Interrupt::Register>, // 0x308-0x30C
_reserved11: [u32; 93], // 0x30C-0x480
pub errorsrc: ReadWrite<u32, ErrorSrc::Register>, // 0x480-0x484
_reserved12: [u32; 31], // 0x484-0x500
pub enable: ReadWrite<u32, Uart::Register>, // 0x500-0x504
_reserved13: [u32; 1], // 0x504-0x508
pub pselrts: ReadWrite<u32, Psel::Register>, // 0x508-0x50c
pub pseltxd: ReadWrite<u32, Psel::Register>, // 0x50c-0x510
pub pselcts: ReadWrite<u32, Psel::Register>, // 0x510-0x514
pub pselrxd: ReadWrite<u32, Psel::Register>, // 0x514-0x518
_reserved14: [u32; 3], // 0x518-0x524
pub baudrate: ReadWrite<u32, Baudrate::Register>, // 0x524-0x528
_reserved15: [u32; 3], // 0x528-0x534
pub rxd_ptr: ReadWrite<u32, Pointer::Register>, // 0x534-0x538
pub rxd_maxcnt: ReadWrite<u32, Counter::Register>, // 0x538-0x53c
pub rxd_amount: ReadOnly<u32, Counter::Register>, // 0x53c-0x540
_reserved16: [u32; 1], // 0x540-0x544
pub txd_ptr: ReadWrite<u32, Pointer::Register>, // 0x544-0x548
pub txd_maxcnt: ReadWrite<u32, Counter::Register>, // 0x548-0x54c
pub txd_amount: ReadOnly<u32, Counter::Register>, // 0x54c-0x550
_reserved17: [u32; 7], // 0x550-0x56C
pub config: ReadWrite<u32, Config::Register>, // 0x56C-0x570
}
#[cfg_attr(rustfmt, rustfmt_skip)]
register_bitfields! [u32,
/// Start task
Task [
ENABLE OFFSET(0) NUMBITS(1)
],
/// Read event
Event [
READY OFFSET(0) NUMBITS(1)
],
/// Shortcuts
Shorts [
// Shortcut between ENDRX and STARTRX
ENDRX_STARTRX OFFSET(5) NUMBITS(1),
// Shortcut between ENDRX and STOPRX
ENDRX_STOPRX OFFSET(6) NUMBITS(1)
],
/// UART Interrupts
Interrupt [
CTS OFFSET(0) NUMBITS(1),
NCTS OFFSET(1) NUMBITS(1),
ENDRX OFFSET(4) NUMBITS(1),
ENDTX OFFSET(8) NUMBITS(1),
ERROR OFFSET(9) NUMBITS(1),
RXTO OFFSET(17) NUMBITS(1),
RXSTARTED OFFSET(19) NUMBITS(1),
TXSTARTED OFFSET(20) NUMBITS(1),
TXSTOPPED OFFSET(22) NUMBITS(1)
],
/// UART Errors
ErrorSrc [
OVERRUN OFFSET(0) NUMBITS(1),
PARITY OFFSET(1) NUMBITS(1),
FRAMING OFFSET(2) NUMBITS(1),
BREAK OFFSET(3) NUMBITS(1)
],
/// Enable UART
Uart [
ENABLE OFFSET(0) NUMBITS(4) [
ON = 8,
OFF = 0
]
],
/// Pin select
Psel [
// Pin number
PIN OFFSET(0) NUMBITS(5),
// Connect/Disconnect
CONNECT OFFSET(31) NUMBITS(1)
],
/// Baudrate
Baudrate [
BAUDRAUTE OFFSET(0) NUMBITS(32)
],
/// DMA pointer
Pointer [
POINTER OFFSET(0) NUMBITS(32)
],
/// Counter value
Counter [
COUNTER OFFSET(0) NUMBITS(8)
],
/// Configuration of parity and flow control
Config [
HWFC OFFSET(0) NUMBITS(1),
PARITY OFFSET(1) NUMBITS(3)
]
];
/// UARTE
// It should never be instanced outside this module but because a static mutable reference to it
// is exported outside this module it must be `pub`
pub struct Uarte {
regs: *const UarteRegisters,
client: Cell<Option<&'static kernel::hil::uart::Client>>,
buffer: kernel::common::take_cell::TakeCell<'static, [u8]>,
remaining_bytes: Cell<usize>,
offset: Cell<usize>,
}
#[derive(Copy, Clone)]
pub struct UARTParams {
pub baud_rate: u32,
}
/// UARTE0 handle
// This should only be accessed by the reset_handler on startup
pub static mut UARTE0: Uarte = Uarte::new();
impl Uarte {
/// Constructor
pub const fn new() -> Uarte {
Uarte {
regs: UARTE_BASE as *const UarteRegisters,
client: Cell::new(None),
buffer: kernel::common::take_cell::TakeCell::empty(),
remaining_bytes: Cell::new(0),
offset: Cell::new(0),
}
}
/// Configure which pins the UART should use for txd, rxd, cts and rts
pub fn configure(
&self,
txd: pinmux::Pinmux,
rxd: pinmux::Pinmux,
cts: pinmux::Pinmux,
rts: pinmux::Pinmux,
) {
let regs = unsafe { &*self.regs };
regs.pseltxd.write(Psel::PIN.val(txd.into()));
regs.pselrxd.write(Psel::PIN.val(rxd.into()));
regs.pselcts.write(Psel::PIN.val(cts.into()));
regs.pselrts.write(Psel::PIN.val(rts.into()));
}
fn set_baud_rate(&self, baud_rate: u32) {
let regs = unsafe { &*self.regs };
match baud_rate {
1200 => regs.baudrate.set(0x0004F000),
2400 => regs.baudrate.set(0x0009D000),
4800 => regs.baudrate.set(0x0013B000),
9600 => regs.baudrate.set(0x00275000),
14400 => regs.baudrate.set(0x003AF000),
19200 => regs.baudrate.set(0x004EA000),
28800 => regs.baudrate.set(0x0075C000),
38400 => regs.baudrate.set(0x009D0000),
57600 => regs.baudrate.set(0x00EB0000),
76800 => regs.baudrate.set(0x013A9000),
115200 => regs.baudrate.set(0x01D60000),
230400 => regs.baudrate.set(0x03B00000),
250000 => regs.baudrate.set(0x04000000),
460800 => regs.baudrate.set(0x07400000),
921600 => regs.baudrate.set(0x0F000000),
1000000 => regs.baudrate.set(0x10000000),
_ => regs.baudrate.set(0x01D60000), //setting default to 115200
}
}
// Enable UART peripheral, this need to disabled for low power applications
fn enable_uart(&self) {
let regs = unsafe { &*self.regs };
regs.enable.write(Uart::ENABLE::ON);
}
#[allow(dead_code)]
fn disable_uart(&self) {
let regs = unsafe { &*self.regs };
regs.enable.write(Uart::ENABLE::OFF);
}
#[allow(dead_code)]
fn enable_rx_interrupts(&self) {
let regs = unsafe { &*self.regs };
regs.intenset.write(Interrupt::ENDRX::SET);
}
fn enable_tx_interrupts(&self) {
let regs = unsafe { &*self.regs };
regs.intenset.write(Interrupt::ENDTX::SET);
}
#[allow(dead_code)]
fn disable_rx_interrupts(&self) {
let regs = unsafe { &*self.regs };
regs.intenclr.write(Interrupt::ENDRX::SET);
}
fn disable_tx_interrupts(&self) {
let regs = unsafe { &*self.regs };
regs.intenclr.write(Interrupt::ENDTX::SET);
}
/// UART interrupt handler that only listens to `tx_end` events
#[inline(never)]
pub fn handle_interrupt(&mut self) {
// disable interrupts
self.disable_tx_interrupts();
let regs = unsafe { &*self.regs };
if self.tx_ready() | client.transmit_complete(buffer, kernel::hil::uart::Error::CommandComplete);
});
});
}
// Not all bytes have been transmitted then update offset and continue transmitting
else {
self.set_dma_pointer_to_buffer();
regs.task_starttx.write(Task::ENABLE::SET);
self.enable_tx_interrupts();
}
}
}
/// Transmit one byte at the time and the client is resposible for polling
/// This is used by the panic handler
pub unsafe fn send_byte(&self, byte: u8) {
let regs = &*self.regs;
self.remaining_bytes.set(1);
regs.event_endtx.write(Event::READY::CLEAR);
// precaution: copy value into variable with static lifetime
BYTE = byte;
regs.txd_ptr.set((&BYTE as *const u8) as u32);
regs.txd_maxcnt.write(Counter::COUNTER.val(1));
regs.task_starttx.write(Task::ENABLE::SET);
}
/// Check if the UART tranmission is done
pub fn tx_ready(&self) -> bool {
let regs = unsafe { &*self.regs };
regs.event_endtx.is_set(Event::READY)
}
fn set_dma_pointer_to_buffer(&self) {
let regs = unsafe { &*self.regs };
self.buffer.map(|buffer| {
regs.txd_ptr
.set(buffer[self.offset.get()..].as_ptr() as u32);
});
}
}
impl kernel::hil::uart::UART for Uarte {
fn set_client(&self, client: &'static kernel::hil::uart::Client) {
self.client.set(Some(client));
}
fn init(&self, params: kernel::hil::uart::UARTParams) {
self.enable_uart();
self.set_baud_rate(params.baud_rate);
}
fn transmit(&self, tx_data: &'static mut [u8], tx_len: usize) {
let regs = unsafe { &*self.regs };
if tx_len == 0 {
return;
}
self.remaining_bytes.set(tx_len);
self.offset.set(0);
self.buffer.replace(tx_data);
self.set_dma_pointer_to_buffer();
regs.txd_maxcnt.write(Counter::COUNTER.val(tx_len as u32));
regs.task_stoptx.write(Task::ENABLE::SET);
regs.task_starttx.write(Task::ENABLE::SET);
self.enable_tx_interrupts();
}
#[allow(unused)]
fn receive(&self, rx_buffer: &'static mut [u8], rx_len: usize) {
unimplemented!()
}
}
| {
regs.event_endtx.write(Event::READY::CLEAR);
let tx_bytes = regs.txd_amount.get() as usize;
let rem = self.remaining_bytes.get();
// More bytes transmitted than requested `return silently`
// Cause probably a hardware fault
// FIXME: Progate error to the capsule
if tx_bytes > rem {
debug!("error more bytes than requested\r\n");
return;
}
self.remaining_bytes.set(rem - tx_bytes);
self.offset.set(tx_bytes);
if self.remaining_bytes.get() == 0 {
// Signal client write done
self.client.get().map(|client| {
self.buffer.take().map(|buffer| { | conditional_block |
plonk_util.rs | use crate::partition::get_subgroup_shift;
use crate::witness::Witness;
use crate::{ifft_with_precomputation_power_of_2, msm_execute_parallel, AffinePoint, CircuitBuilder, Curve, FftPrecomputation, Field, HaloCurve, MsmPrecomputation, Polynomial, PolynomialCommitment, ProjectivePoint, Target, NUM_ROUTED_WIRES};
use rayon::prelude::*;
/// Evaluate the polynomial which vanishes on any multiplicative subgroup of a given order `n`.
pub(crate) fn eval_zero_poly<F: Field>(n: usize, x: F) -> F {
// Z(x) = x^n - 1
x.exp_usize(n) - F::ONE
}
/// Evaluate the Lagrange basis `L_1` with `L_1(1) = 1`, and `L_1(x) = 0` for other members of an
/// order `n` multiplicative subgroup.
pub(crate) fn eval_l_1<F: Field>(n: usize, x: F) -> F {
if x.is_one() {
// The code below would divide by zero, since we have (x - 1) in both the numerator and
// denominator.
return F::ONE;
}
// L_1(x) = (x^n - 1) / (n * (x - 1))
// = Z(x) / (n * (x - 1))
eval_zero_poly(n, x) / (F::from_canonical_usize(n) * (x - F::ONE))
}
/// Computes a sum of terms weighted by powers of alpha.
pub fn reduce_with_powers<F: Field>(terms: &[F], alpha: F) -> F {
let mut sum = F::ZERO;
for &term in terms.iter().rev() {
sum = sum * alpha + term;
}
sum
}
/// Computes a sum of terms weighted by powers of alpha.
pub(crate) fn reduce_with_powers_recursive<C: HaloCurve>(
builder: &mut CircuitBuilder<C>,
terms: &[Target<C::ScalarField>],
alpha: Target<C::ScalarField>,
) -> Target<C::ScalarField> {
let mut sum = builder.zero_wire();
for &term in terms.iter().rev() {
sum = builder.mul_add(sum, alpha, term);
}
sum
}
/// Compute `n(x)` for a given `x`, where `n` is the injective function related to the Halo
/// endomorphism.
pub fn halo_n<C: HaloCurve>(s_bits: &[bool]) -> C::ScalarField {
// This is based on Algorithm 2 of the Halo paper, except that we start with (a, b) = (0, 0).
debug_assert_eq!(s_bits.len() % 2, 0, "Number of scalar bits must be even");
let zero = C::ScalarField::ZERO;
let mut a = zero;
let mut b = zero;
for s_bits_chunk in s_bits.chunks(2) {
let bit_lo = s_bits_chunk[0];
let bit_hi = s_bits_chunk[1];
let sign = if bit_lo {
C::ScalarField::ONE
} else {
C::ScalarField::NEG_ONE
};
let (c, d) = if bit_hi { (sign, zero) } else { (zero, sign) };
a = a.double() + c;
b = b.double() + d;
}
a * C::ZETA_SCALAR + b
}
/// Compute `[n(s)].P` for a given `s`, where `n` is the injective function related to the Halo
/// endomorphism.
pub fn halo_n_mul<C: HaloCurve>(s_bits: &[bool], p: AffinePoint<C>) -> AffinePoint<C> {
// This is based on Algorithm 1 of the Halo paper, except that we start with Acc = O.
debug_assert_eq!(s_bits.len() % 2, 0, "Number of scalar bits must be even");
let p_p = p.to_projective();
let p_n = -p_p;
let endo_p_p = p.endomorphism().to_projective();
let endo_p_n = -endo_p_p;
let mut acc = ProjectivePoint::<C>::ZERO;
for s_bits_chunk in s_bits.chunks(2) {
let bit_lo = s_bits_chunk[0];
let bit_hi = s_bits_chunk[1];
let s = if bit_hi {
if bit_lo {
endo_p_p
} else {
endo_p_n
}
} else if bit_lo {
p_p
} else {
p_n
};
acc = acc.double() + s;
}
acc.to_affine()
}
pub fn | <F: Field>(coeffs: &[F], x: F) -> F {
let mut ans = F::ZERO;
let mut x_pow = F::ONE;
for &c in coeffs {
ans = ans + (c * x_pow);
x_pow = x_pow * x;
}
ans
}
/// Compute `[x^0, x^1,..., x^(n - 1)]`.
pub fn powers<F: Field>(x: F, n: usize) -> Vec<F> {
let mut powers = Vec::new();
let mut current = F::ONE;
for i in 0..n {
if i!= 0 {
current = current * x;
}
powers.push(current);
}
powers
}
/// Compute `[x^0, x^1,..., x^(n - 1)]`.
pub(crate) fn powers_recursive<C: HaloCurve>(
builder: &mut CircuitBuilder<C>,
x: Target<C::ScalarField>,
n: usize,
) -> Vec<Target<C::ScalarField>> {
let mut powers = Vec::new();
let mut current = builder.one_wire();
for i in 0..n {
if i!= 0 {
current = builder.mul(current, x);
}
powers.push(current);
}
powers
}
/// Returns the evaluation of a list of polynomials at a point.
pub(crate) fn eval_polys<F: Field>(polys: &[Polynomial<F>], powers: &[F]) -> Vec<F> {
polys.iter().map(|p| p.eval_from_power(powers)).collect()
}
/// Zero-pad a list of `n` polynomial coefficients to a length of `8n`, which is the degree at
/// which we do most polynomial arithmetic.
pub(crate) fn pad_to_8n<F: Field>(coeffs: &[F]) -> Vec<F> {
let n = coeffs.len();
let mut result = coeffs.to_vec();
while result.len() < 8 * n {
result.push(F::ZERO);
}
result
}
pub(crate) fn values_to_polynomials<F: Field>(
values_vec: &[Vec<F>],
fft_precomputation: &FftPrecomputation<F>,
) -> Vec<Polynomial<F>> {
values_vec
.par_iter()
.map(|values| Polynomial::from_evaluations(values, fft_precomputation))
.collect()
}
pub(crate) fn polynomials_to_values_padded<F: Field>(
polys_vec: &[Polynomial<F>],
fft_precomputation: &FftPrecomputation<F>,
) -> Vec<Vec<F>> {
polys_vec
.par_iter()
.map(|poly| {
let padded_poly = poly.padded(poly.len() * 8);
padded_poly.eval_domain(fft_precomputation)
})
.collect()
}
/// Like `pedersen_commit`, but with no blinding factor.
pub fn pedersen_hash<C: Curve>(
xs: &[C::ScalarField],
pedersen_g_msm_precomputation: &MsmPrecomputation<C>,
) -> ProjectivePoint<C> {
msm_execute_parallel(pedersen_g_msm_precomputation, xs)
}
#[allow(dead_code)]
fn pedersen_commit<C: Curve>(
xs: &[C::ScalarField],
opening: C::ScalarField,
h: AffinePoint<C>,
pedersen_g_msm_precomputation: &MsmPrecomputation<C>,
) -> ProjectivePoint<C> {
// TODO: Couldn't get this working with *.
let h = h.to_projective();
let mul_precomputation = h.mul_precompute();
let blinding_term = h.mul_with_precomputation(opening, mul_precomputation);
msm_execute_parallel(pedersen_g_msm_precomputation, xs) + blinding_term
}
pub fn commit_polynomials<C: Curve>(
polynomials: &[Polynomial<C::ScalarField>],
msm_precomputation: &MsmPrecomputation<C>,
blinding_point: AffinePoint<C>,
blinding: bool,
) -> Vec<PolynomialCommitment<C>> {
PolynomialCommitment::coeffs_vec_to_commitments(
polynomials
.iter()
.map(|p| p.coeffs())
.collect::<Vec<_>>()
.as_slice(),
msm_precomputation,
blinding_point,
blinding,
)
}
// Generate Z, which is used in Plonk's permutation argument.
pub fn permutation_polynomial<F: Field>(
degree: usize,
subgroup: &[F],
witness: &Witness<F>,
sigma_values: &[Vec<F>],
beta: F,
gamma: F,
) -> Vec<F> {
let mut plonk_z_points = vec![F::ONE];
let k_is = (0..NUM_ROUTED_WIRES)
.map(get_subgroup_shift::<F>)
.collect::<Vec<_>>();
for i in 1..degree {
let x = subgroup[i - 1];
let mut numerator = F::ONE;
let mut denominator = F::ONE;
for j in 0..NUM_ROUTED_WIRES {
let wire_value = witness.get_indices(i - 1, j);
let k_i = k_is[j];
let s_id = k_i * x;
let s_sigma = sigma_values[j][8 * (i - 1)];
numerator = numerator * (wire_value + beta * s_id + gamma);
denominator = denominator * (wire_value + beta * s_sigma + gamma);
}
let last = *plonk_z_points.last().unwrap();
plonk_z_points.push(last * numerator / denominator);
}
plonk_z_points
}
pub fn sigma_polynomials<F: Field>(
sigma: Vec<usize>,
degree: usize,
subgroup_generator: F,
) -> Vec<Vec<F>> {
sigma
.chunks(degree)
.map(|chunk| {
chunk
.par_iter()
.map(|&x| {
get_subgroup_shift::<F>(x / degree) * subgroup_generator.exp_usize(x % degree)
})
.collect::<Vec<_>>()
})
.collect()
}
/// Given polynomials `[p_0,...,p_k]` of degree `degree` and `alpha \in F`, returns `\sum_{i=0}^k alpha^i p_i`.
pub(crate) fn scale_polynomials<F: Field>(
polynomials: Vec<Polynomial<F>>,
alpha: F,
degree: usize,
) -> Polynomial<F> {
let alpha_powers = powers(alpha, polynomials.len());
Polynomial::from(
(0..degree)
.map(|i| {
(0..polynomials.len())
.map(|j| polynomials[j][i] * alpha_powers[j])
.fold(F::ZERO, |acc, x| acc + x)
})
.collect::<Vec<_>>(),
)
}
#[allow(dead_code)]
pub(crate) fn polynomial_degree_plus_1<F: Field>(
points: &[F],
fft_precomputation: &FftPrecomputation<F>,
) -> usize {
let coeffs = ifft_with_precomputation_power_of_2(points, fft_precomputation);
coeffs.iter().rev().skip_while(|c| c.is_zero()).count()
}
// TODO: Maybe a streaming version using an `Iterator` would be faster and wouldn't require as much memory for large circuits.
// TODO: Optimize this.
pub fn halo_s<F: Field>(us: &[F]) -> Vec<F> {
let n = 1 << us.len();
let mut res = vec![F::ONE; n];
let us_inv = F::batch_multiplicative_inverse(us);
for (j, (&u, &u_inv)) in us.iter().rev().zip(us_inv.iter().rev()).enumerate() {
for (i, x) in res.iter_mut().enumerate() {
if i & (1 << j) == 0 {
*x = *x * u_inv;
} else {
*x = *x * u;
}
}
}
res
}
/// Evaluate `g(X, {u_i})` as defined in the Halo paper.
pub fn halo_g<F: Field>(x: F, us: &[F]) -> F {
let mut product = F::ONE;
let mut x_power = x;
for &u_i in us.iter().rev() {
let u_i_inv = u_i.multiplicative_inverse_assuming_nonzero();
let term = u_i * x_power + u_i_inv;
product = product * term;
x_power = x_power.square();
}
product
}
#[cfg(test)]
mod test {
use super::*;
use crate::{CircuitBuilder, Curve, Field, PartialWitness, Tweedledee};
#[test]
fn test_halo_n() {
type C = Tweedledee;
type SF = <Tweedledee as Curve>::ScalarField;
let p = C::convert(SF::rand()) * C::GENERATOR_PROJECTIVE;
let r = SF::rand();
let res = C::convert(halo_n::<C>(&r.to_canonical_bool_vec()[..128])) * p;
let p = p.to_affine();
assert_eq!(
res.to_affine(),
halo_n_mul::<C>(&r.to_canonical_bool_vec()[..128], p)
)
}
#[test]
fn test_permutation_polynomial() {
let mut builder = CircuitBuilder::<Tweedledee>::new(128);
let one = builder.one_wire();
let t = builder.add_virtual_target();
let t_sq = builder.square(t);
let quad = builder.add_many(&[one, t, t_sq]);
let seven =
builder.constant_wire(<Tweedledee as Curve>::ScalarField::from_canonical_usize(7));
let res = builder.sub(quad, seven);
builder.assert_zero(res);
let mut partial_witness = PartialWitness::new();
partial_witness.set_target(t, <Tweedledee as Curve>::ScalarField::TWO);
let circuit = builder.build();
let witness = circuit.generate_witness(partial_witness);
let beta = <Tweedledee as Curve>::ScalarField::rand();
let gamma = <Tweedledee as Curve>::ScalarField::rand();
let plonk_z_points_n = permutation_polynomial(
circuit.degree(),
&circuit.subgroup_n,
&witness,
&circuit.s_sigma_values_8n,
beta,
gamma,
);
// Verify that the permutation polynomial is well-formed.
let k_is = (0..NUM_ROUTED_WIRES)
.map(get_subgroup_shift::<<Tweedledee as Curve>::ScalarField>)
.collect::<Vec<_>>();
let wire_values = &witness.transpose();
for (i, &x) in circuit.subgroup_n.iter().enumerate() {
let (z_x, z_gz) = (
plonk_z_points_n[i],
plonk_z_points_n[(i + 1) % circuit.degree()],
);
let mut f_prime = <Tweedledee as Curve>::ScalarField::ONE;
let mut g_prime = <Tweedledee as Curve>::ScalarField::ONE;
for j in 0..NUM_ROUTED_WIRES {
let wire_value = wire_values[j][i];
let k_i = k_is[j];
let s_id = k_i * x;
let s_sigma = circuit.s_sigma_values_8n[j][8 * i];
f_prime = f_prime * (wire_value + beta * s_id + gamma);
g_prime = g_prime * (wire_value + beta * s_sigma + gamma);
}
let vanishing_v_shift_term = f_prime * z_x - g_prime * z_gz;
assert_eq!(
vanishing_v_shift_term,
<Tweedledee as Curve>::ScalarField::ZERO
);
}
}
#[test]
fn test_s_vector_g_function() {
type F = <Tweedledee as Curve>::ScalarField;
let us = (0..10).map(|_| F::rand()).collect::<Vec<_>>();
let x = F::rand();
assert_eq!(
F::inner_product(&halo_s(&us), &powers(x, 1 << 10)),
halo_g(x, &us)
);
}
}
| eval_poly | identifier_name |
plonk_util.rs | use crate::partition::get_subgroup_shift;
use crate::witness::Witness;
use crate::{ifft_with_precomputation_power_of_2, msm_execute_parallel, AffinePoint, CircuitBuilder, Curve, FftPrecomputation, Field, HaloCurve, MsmPrecomputation, Polynomial, PolynomialCommitment, ProjectivePoint, Target, NUM_ROUTED_WIRES};
use rayon::prelude::*;
/// Evaluate the polynomial which vanishes on any multiplicative subgroup of a given order `n`.
pub(crate) fn eval_zero_poly<F: Field>(n: usize, x: F) -> F {
// Z(x) = x^n - 1
x.exp_usize(n) - F::ONE
}
/// Evaluate the Lagrange basis `L_1` with `L_1(1) = 1`, and `L_1(x) = 0` for other members of an
/// order `n` multiplicative subgroup.
pub(crate) fn eval_l_1<F: Field>(n: usize, x: F) -> F {
if x.is_one() {
// The code below would divide by zero, since we have (x - 1) in both the numerator and
// denominator.
return F::ONE;
}
// L_1(x) = (x^n - 1) / (n * (x - 1))
// = Z(x) / (n * (x - 1))
eval_zero_poly(n, x) / (F::from_canonical_usize(n) * (x - F::ONE))
}
/// Computes a sum of terms weighted by powers of alpha.
pub fn reduce_with_powers<F: Field>(terms: &[F], alpha: F) -> F {
let mut sum = F::ZERO;
for &term in terms.iter().rev() {
sum = sum * alpha + term;
}
sum
}
/// Computes a sum of terms weighted by powers of alpha.
pub(crate) fn reduce_with_powers_recursive<C: HaloCurve>(
builder: &mut CircuitBuilder<C>,
terms: &[Target<C::ScalarField>],
alpha: Target<C::ScalarField>,
) -> Target<C::ScalarField> {
let mut sum = builder.zero_wire();
for &term in terms.iter().rev() {
sum = builder.mul_add(sum, alpha, term);
}
sum
}
/// Compute `n(x)` for a given `x`, where `n` is the injective function related to the Halo
/// endomorphism.
pub fn halo_n<C: HaloCurve>(s_bits: &[bool]) -> C::ScalarField {
// This is based on Algorithm 2 of the Halo paper, except that we start with (a, b) = (0, 0).
debug_assert_eq!(s_bits.len() % 2, 0, "Number of scalar bits must be even");
let zero = C::ScalarField::ZERO;
let mut a = zero;
let mut b = zero;
for s_bits_chunk in s_bits.chunks(2) {
let bit_lo = s_bits_chunk[0];
let bit_hi = s_bits_chunk[1];
let sign = if bit_lo {
C::ScalarField::ONE
} else {
C::ScalarField::NEG_ONE
};
let (c, d) = if bit_hi { (sign, zero) } else { (zero, sign) };
a = a.double() + c;
b = b.double() + d;
}
a * C::ZETA_SCALAR + b
}
/// Compute `[n(s)].P` for a given `s`, where `n` is the injective function related to the Halo
/// endomorphism.
pub fn halo_n_mul<C: HaloCurve>(s_bits: &[bool], p: AffinePoint<C>) -> AffinePoint<C> {
// This is based on Algorithm 1 of the Halo paper, except that we start with Acc = O.
debug_assert_eq!(s_bits.len() % 2, 0, "Number of scalar bits must be even");
let p_p = p.to_projective();
let p_n = -p_p;
let endo_p_p = p.endomorphism().to_projective();
let endo_p_n = -endo_p_p;
let mut acc = ProjectivePoint::<C>::ZERO;
for s_bits_chunk in s_bits.chunks(2) {
let bit_lo = s_bits_chunk[0];
let bit_hi = s_bits_chunk[1];
let s = if bit_hi {
if bit_lo {
endo_p_p
} else {
endo_p_n
}
} else if bit_lo {
p_p
} else {
p_n
};
acc = acc.double() + s;
}
acc.to_affine()
}
pub fn eval_poly<F: Field>(coeffs: &[F], x: F) -> F {
let mut ans = F::ZERO;
let mut x_pow = F::ONE;
for &c in coeffs {
ans = ans + (c * x_pow);
x_pow = x_pow * x;
}
ans
}
/// Compute `[x^0, x^1,..., x^(n - 1)]`.
pub fn powers<F: Field>(x: F, n: usize) -> Vec<F> {
let mut powers = Vec::new();
let mut current = F::ONE;
for i in 0..n {
if i!= 0 {
current = current * x;
}
powers.push(current);
}
powers
}
/// Compute `[x^0, x^1,..., x^(n - 1)]`.
pub(crate) fn powers_recursive<C: HaloCurve>(
builder: &mut CircuitBuilder<C>,
x: Target<C::ScalarField>,
n: usize,
) -> Vec<Target<C::ScalarField>> {
let mut powers = Vec::new();
let mut current = builder.one_wire();
for i in 0..n {
if i!= 0 {
current = builder.mul(current, x);
}
powers.push(current);
}
powers
}
/// Returns the evaluation of a list of polynomials at a point.
pub(crate) fn eval_polys<F: Field>(polys: &[Polynomial<F>], powers: &[F]) -> Vec<F> {
polys.iter().map(|p| p.eval_from_power(powers)).collect()
}
/// Zero-pad a list of `n` polynomial coefficients to a length of `8n`, which is the degree at
/// which we do most polynomial arithmetic.
pub(crate) fn pad_to_8n<F: Field>(coeffs: &[F]) -> Vec<F> {
let n = coeffs.len();
let mut result = coeffs.to_vec();
while result.len() < 8 * n {
result.push(F::ZERO);
}
result
}
pub(crate) fn values_to_polynomials<F: Field>(
values_vec: &[Vec<F>],
fft_precomputation: &FftPrecomputation<F>,
) -> Vec<Polynomial<F>> {
values_vec
.par_iter()
.map(|values| Polynomial::from_evaluations(values, fft_precomputation))
.collect()
}
pub(crate) fn polynomials_to_values_padded<F: Field>(
polys_vec: &[Polynomial<F>],
fft_precomputation: &FftPrecomputation<F>,
) -> Vec<Vec<F>> {
polys_vec
.par_iter()
.map(|poly| {
let padded_poly = poly.padded(poly.len() * 8);
padded_poly.eval_domain(fft_precomputation)
})
.collect()
}
/// Like `pedersen_commit`, but with no blinding factor.
pub fn pedersen_hash<C: Curve>(
xs: &[C::ScalarField],
pedersen_g_msm_precomputation: &MsmPrecomputation<C>,
) -> ProjectivePoint<C> {
msm_execute_parallel(pedersen_g_msm_precomputation, xs)
}
#[allow(dead_code)]
fn pedersen_commit<C: Curve>(
xs: &[C::ScalarField],
opening: C::ScalarField,
h: AffinePoint<C>,
pedersen_g_msm_precomputation: &MsmPrecomputation<C>,
) -> ProjectivePoint<C> {
// TODO: Couldn't get this working with *.
let h = h.to_projective();
let mul_precomputation = h.mul_precompute();
let blinding_term = h.mul_with_precomputation(opening, mul_precomputation);
msm_execute_parallel(pedersen_g_msm_precomputation, xs) + blinding_term
}
pub fn commit_polynomials<C: Curve>(
polynomials: &[Polynomial<C::ScalarField>],
msm_precomputation: &MsmPrecomputation<C>,
blinding_point: AffinePoint<C>,
blinding: bool,
) -> Vec<PolynomialCommitment<C>> {
PolynomialCommitment::coeffs_vec_to_commitments(
polynomials
.iter()
.map(|p| p.coeffs())
.collect::<Vec<_>>()
.as_slice(),
msm_precomputation,
blinding_point,
blinding,
)
}
// Generate Z, which is used in Plonk's permutation argument.
pub fn permutation_polynomial<F: Field>(
degree: usize,
subgroup: &[F],
witness: &Witness<F>,
sigma_values: &[Vec<F>],
beta: F,
gamma: F,
) -> Vec<F> {
let mut plonk_z_points = vec![F::ONE];
let k_is = (0..NUM_ROUTED_WIRES)
.map(get_subgroup_shift::<F>)
.collect::<Vec<_>>();
for i in 1..degree {
let x = subgroup[i - 1];
let mut numerator = F::ONE;
let mut denominator = F::ONE;
for j in 0..NUM_ROUTED_WIRES {
let wire_value = witness.get_indices(i - 1, j);
let k_i = k_is[j];
let s_id = k_i * x;
let s_sigma = sigma_values[j][8 * (i - 1)];
numerator = numerator * (wire_value + beta * s_id + gamma);
denominator = denominator * (wire_value + beta * s_sigma + gamma);
}
let last = *plonk_z_points.last().unwrap();
plonk_z_points.push(last * numerator / denominator);
}
plonk_z_points
}
pub fn sigma_polynomials<F: Field>(
sigma: Vec<usize>,
degree: usize,
subgroup_generator: F,
) -> Vec<Vec<F>> {
sigma
.chunks(degree)
.map(|chunk| {
chunk
.par_iter()
.map(|&x| {
get_subgroup_shift::<F>(x / degree) * subgroup_generator.exp_usize(x % degree)
})
.collect::<Vec<_>>()
})
.collect()
}
/// Given polynomials `[p_0,...,p_k]` of degree `degree` and `alpha \in F`, returns `\sum_{i=0}^k alpha^i p_i`.
pub(crate) fn scale_polynomials<F: Field>(
polynomials: Vec<Polynomial<F>>,
alpha: F,
degree: usize,
) -> Polynomial<F> {
let alpha_powers = powers(alpha, polynomials.len());
Polynomial::from(
(0..degree)
.map(|i| {
(0..polynomials.len())
.map(|j| polynomials[j][i] * alpha_powers[j])
.fold(F::ZERO, |acc, x| acc + x)
})
.collect::<Vec<_>>(),
)
}
#[allow(dead_code)]
pub(crate) fn polynomial_degree_plus_1<F: Field>(
points: &[F],
fft_precomputation: &FftPrecomputation<F>,
) -> usize {
let coeffs = ifft_with_precomputation_power_of_2(points, fft_precomputation);
coeffs.iter().rev().skip_while(|c| c.is_zero()).count()
}
// TODO: Maybe a streaming version using an `Iterator` would be faster and wouldn't require as much memory for large circuits.
// TODO: Optimize this.
pub fn halo_s<F: Field>(us: &[F]) -> Vec<F> {
let n = 1 << us.len();
let mut res = vec![F::ONE; n];
let us_inv = F::batch_multiplicative_inverse(us);
for (j, (&u, &u_inv)) in us.iter().rev().zip(us_inv.iter().rev()).enumerate() {
for (i, x) in res.iter_mut().enumerate() {
if i & (1 << j) == 0 {
*x = *x * u_inv;
} else {
*x = *x * u;
}
}
}
res
}
/// Evaluate `g(X, {u_i})` as defined in the Halo paper.
pub fn halo_g<F: Field>(x: F, us: &[F]) -> F {
let mut product = F::ONE;
let mut x_power = x;
for &u_i in us.iter().rev() {
let u_i_inv = u_i.multiplicative_inverse_assuming_nonzero();
let term = u_i * x_power + u_i_inv;
product = product * term;
x_power = x_power.square();
}
product
}
#[cfg(test)]
mod test {
use super::*;
use crate::{CircuitBuilder, Curve, Field, PartialWitness, Tweedledee};
#[test]
fn test_halo_n() {
type C = Tweedledee;
type SF = <Tweedledee as Curve>::ScalarField;
let p = C::convert(SF::rand()) * C::GENERATOR_PROJECTIVE;
let r = SF::rand();
let res = C::convert(halo_n::<C>(&r.to_canonical_bool_vec()[..128])) * p;
let p = p.to_affine();
assert_eq!(
res.to_affine(),
halo_n_mul::<C>(&r.to_canonical_bool_vec()[..128], p)
)
}
#[test]
fn test_permutation_polynomial() {
let mut builder = CircuitBuilder::<Tweedledee>::new(128);
let one = builder.one_wire();
let t = builder.add_virtual_target();
let t_sq = builder.square(t);
let quad = builder.add_many(&[one, t, t_sq]);
let seven =
builder.constant_wire(<Tweedledee as Curve>::ScalarField::from_canonical_usize(7));
let res = builder.sub(quad, seven);
builder.assert_zero(res);
let mut partial_witness = PartialWitness::new();
partial_witness.set_target(t, <Tweedledee as Curve>::ScalarField::TWO);
let circuit = builder.build();
let witness = circuit.generate_witness(partial_witness);
let beta = <Tweedledee as Curve>::ScalarField::rand();
let gamma = <Tweedledee as Curve>::ScalarField::rand();
let plonk_z_points_n = permutation_polynomial(
circuit.degree(),
&circuit.subgroup_n,
&witness,
&circuit.s_sigma_values_8n,
beta,
gamma,
);
// Verify that the permutation polynomial is well-formed.
let k_is = (0..NUM_ROUTED_WIRES)
.map(get_subgroup_shift::<<Tweedledee as Curve>::ScalarField>)
.collect::<Vec<_>>();
let wire_values = &witness.transpose();
for (i, &x) in circuit.subgroup_n.iter().enumerate() {
let (z_x, z_gz) = (
plonk_z_points_n[i],
plonk_z_points_n[(i + 1) % circuit.degree()],
);
let mut f_prime = <Tweedledee as Curve>::ScalarField::ONE;
let mut g_prime = <Tweedledee as Curve>::ScalarField::ONE;
for j in 0..NUM_ROUTED_WIRES {
let wire_value = wire_values[j][i];
let k_i = k_is[j];
let s_id = k_i * x;
let s_sigma = circuit.s_sigma_values_8n[j][8 * i];
f_prime = f_prime * (wire_value + beta * s_id + gamma);
g_prime = g_prime * (wire_value + beta * s_sigma + gamma);
}
let vanishing_v_shift_term = f_prime * z_x - g_prime * z_gz;
assert_eq!(
vanishing_v_shift_term,
<Tweedledee as Curve>::ScalarField::ZERO
);
}
}
#[test]
fn test_s_vector_g_function() |
}
| {
type F = <Tweedledee as Curve>::ScalarField;
let us = (0..10).map(|_| F::rand()).collect::<Vec<_>>();
let x = F::rand();
assert_eq!(
F::inner_product(&halo_s(&us), &powers(x, 1 << 10)),
halo_g(x, &us)
);
} | identifier_body |
plonk_util.rs | use crate::partition::get_subgroup_shift;
use crate::witness::Witness;
use crate::{ifft_with_precomputation_power_of_2, msm_execute_parallel, AffinePoint, CircuitBuilder, Curve, FftPrecomputation, Field, HaloCurve, MsmPrecomputation, Polynomial, PolynomialCommitment, ProjectivePoint, Target, NUM_ROUTED_WIRES};
use rayon::prelude::*;
/// Evaluate the polynomial which vanishes on any multiplicative subgroup of a given order `n`.
pub(crate) fn eval_zero_poly<F: Field>(n: usize, x: F) -> F {
// Z(x) = x^n - 1
x.exp_usize(n) - F::ONE
}
/// Evaluate the Lagrange basis `L_1` with `L_1(1) = 1`, and `L_1(x) = 0` for other members of an
/// order `n` multiplicative subgroup.
pub(crate) fn eval_l_1<F: Field>(n: usize, x: F) -> F {
if x.is_one() {
// The code below would divide by zero, since we have (x - 1) in both the numerator and
// denominator.
return F::ONE;
}
// L_1(x) = (x^n - 1) / (n * (x - 1))
// = Z(x) / (n * (x - 1))
eval_zero_poly(n, x) / (F::from_canonical_usize(n) * (x - F::ONE))
}
/// Computes a sum of terms weighted by powers of alpha.
pub fn reduce_with_powers<F: Field>(terms: &[F], alpha: F) -> F {
let mut sum = F::ZERO;
for &term in terms.iter().rev() {
sum = sum * alpha + term;
}
sum
}
/// Computes a sum of terms weighted by powers of alpha.
pub(crate) fn reduce_with_powers_recursive<C: HaloCurve>(
builder: &mut CircuitBuilder<C>,
terms: &[Target<C::ScalarField>],
alpha: Target<C::ScalarField>,
) -> Target<C::ScalarField> {
let mut sum = builder.zero_wire();
for &term in terms.iter().rev() {
sum = builder.mul_add(sum, alpha, term);
}
sum
}
/// Compute `n(x)` for a given `x`, where `n` is the injective function related to the Halo
/// endomorphism.
pub fn halo_n<C: HaloCurve>(s_bits: &[bool]) -> C::ScalarField {
// This is based on Algorithm 2 of the Halo paper, except that we start with (a, b) = (0, 0).
debug_assert_eq!(s_bits.len() % 2, 0, "Number of scalar bits must be even");
let zero = C::ScalarField::ZERO;
let mut a = zero;
let mut b = zero;
for s_bits_chunk in s_bits.chunks(2) {
let bit_lo = s_bits_chunk[0];
let bit_hi = s_bits_chunk[1];
let sign = if bit_lo {
C::ScalarField::ONE
} else {
C::ScalarField::NEG_ONE
};
let (c, d) = if bit_hi { (sign, zero) } else { (zero, sign) };
a = a.double() + c;
b = b.double() + d;
}
a * C::ZETA_SCALAR + b
}
/// Compute `[n(s)].P` for a given `s`, where `n` is the injective function related to the Halo
/// endomorphism.
pub fn halo_n_mul<C: HaloCurve>(s_bits: &[bool], p: AffinePoint<C>) -> AffinePoint<C> {
// This is based on Algorithm 1 of the Halo paper, except that we start with Acc = O.
debug_assert_eq!(s_bits.len() % 2, 0, "Number of scalar bits must be even");
let p_p = p.to_projective();
let p_n = -p_p;
let endo_p_p = p.endomorphism().to_projective();
let endo_p_n = -endo_p_p;
let mut acc = ProjectivePoint::<C>::ZERO;
for s_bits_chunk in s_bits.chunks(2) {
let bit_lo = s_bits_chunk[0];
let bit_hi = s_bits_chunk[1];
| if bit_lo {
endo_p_p
} else {
endo_p_n
}
} else if bit_lo {
p_p
} else {
p_n
};
acc = acc.double() + s;
}
acc.to_affine()
}
pub fn eval_poly<F: Field>(coeffs: &[F], x: F) -> F {
let mut ans = F::ZERO;
let mut x_pow = F::ONE;
for &c in coeffs {
ans = ans + (c * x_pow);
x_pow = x_pow * x;
}
ans
}
/// Compute `[x^0, x^1,..., x^(n - 1)]`.
pub fn powers<F: Field>(x: F, n: usize) -> Vec<F> {
let mut powers = Vec::new();
let mut current = F::ONE;
for i in 0..n {
if i!= 0 {
current = current * x;
}
powers.push(current);
}
powers
}
/// Compute `[x^0, x^1,..., x^(n - 1)]`.
pub(crate) fn powers_recursive<C: HaloCurve>(
builder: &mut CircuitBuilder<C>,
x: Target<C::ScalarField>,
n: usize,
) -> Vec<Target<C::ScalarField>> {
let mut powers = Vec::new();
let mut current = builder.one_wire();
for i in 0..n {
if i!= 0 {
current = builder.mul(current, x);
}
powers.push(current);
}
powers
}
/// Returns the evaluation of a list of polynomials at a point.
pub(crate) fn eval_polys<F: Field>(polys: &[Polynomial<F>], powers: &[F]) -> Vec<F> {
polys.iter().map(|p| p.eval_from_power(powers)).collect()
}
/// Zero-pad a list of `n` polynomial coefficients to a length of `8n`, which is the degree at
/// which we do most polynomial arithmetic.
pub(crate) fn pad_to_8n<F: Field>(coeffs: &[F]) -> Vec<F> {
let n = coeffs.len();
let mut result = coeffs.to_vec();
while result.len() < 8 * n {
result.push(F::ZERO);
}
result
}
pub(crate) fn values_to_polynomials<F: Field>(
values_vec: &[Vec<F>],
fft_precomputation: &FftPrecomputation<F>,
) -> Vec<Polynomial<F>> {
values_vec
.par_iter()
.map(|values| Polynomial::from_evaluations(values, fft_precomputation))
.collect()
}
pub(crate) fn polynomials_to_values_padded<F: Field>(
polys_vec: &[Polynomial<F>],
fft_precomputation: &FftPrecomputation<F>,
) -> Vec<Vec<F>> {
polys_vec
.par_iter()
.map(|poly| {
let padded_poly = poly.padded(poly.len() * 8);
padded_poly.eval_domain(fft_precomputation)
})
.collect()
}
/// Like `pedersen_commit`, but with no blinding factor.
pub fn pedersen_hash<C: Curve>(
xs: &[C::ScalarField],
pedersen_g_msm_precomputation: &MsmPrecomputation<C>,
) -> ProjectivePoint<C> {
msm_execute_parallel(pedersen_g_msm_precomputation, xs)
}
#[allow(dead_code)]
fn pedersen_commit<C: Curve>(
xs: &[C::ScalarField],
opening: C::ScalarField,
h: AffinePoint<C>,
pedersen_g_msm_precomputation: &MsmPrecomputation<C>,
) -> ProjectivePoint<C> {
// TODO: Couldn't get this working with *.
let h = h.to_projective();
let mul_precomputation = h.mul_precompute();
let blinding_term = h.mul_with_precomputation(opening, mul_precomputation);
msm_execute_parallel(pedersen_g_msm_precomputation, xs) + blinding_term
}
pub fn commit_polynomials<C: Curve>(
polynomials: &[Polynomial<C::ScalarField>],
msm_precomputation: &MsmPrecomputation<C>,
blinding_point: AffinePoint<C>,
blinding: bool,
) -> Vec<PolynomialCommitment<C>> {
PolynomialCommitment::coeffs_vec_to_commitments(
polynomials
.iter()
.map(|p| p.coeffs())
.collect::<Vec<_>>()
.as_slice(),
msm_precomputation,
blinding_point,
blinding,
)
}
// Generate Z, which is used in Plonk's permutation argument.
pub fn permutation_polynomial<F: Field>(
degree: usize,
subgroup: &[F],
witness: &Witness<F>,
sigma_values: &[Vec<F>],
beta: F,
gamma: F,
) -> Vec<F> {
let mut plonk_z_points = vec![F::ONE];
let k_is = (0..NUM_ROUTED_WIRES)
.map(get_subgroup_shift::<F>)
.collect::<Vec<_>>();
for i in 1..degree {
let x = subgroup[i - 1];
let mut numerator = F::ONE;
let mut denominator = F::ONE;
for j in 0..NUM_ROUTED_WIRES {
let wire_value = witness.get_indices(i - 1, j);
let k_i = k_is[j];
let s_id = k_i * x;
let s_sigma = sigma_values[j][8 * (i - 1)];
numerator = numerator * (wire_value + beta * s_id + gamma);
denominator = denominator * (wire_value + beta * s_sigma + gamma);
}
let last = *plonk_z_points.last().unwrap();
plonk_z_points.push(last * numerator / denominator);
}
plonk_z_points
}
pub fn sigma_polynomials<F: Field>(
sigma: Vec<usize>,
degree: usize,
subgroup_generator: F,
) -> Vec<Vec<F>> {
sigma
.chunks(degree)
.map(|chunk| {
chunk
.par_iter()
.map(|&x| {
get_subgroup_shift::<F>(x / degree) * subgroup_generator.exp_usize(x % degree)
})
.collect::<Vec<_>>()
})
.collect()
}
/// Given polynomials `[p_0,...,p_k]` of degree `degree` and `alpha \in F`, returns `\sum_{i=0}^k alpha^i p_i`.
pub(crate) fn scale_polynomials<F: Field>(
polynomials: Vec<Polynomial<F>>,
alpha: F,
degree: usize,
) -> Polynomial<F> {
let alpha_powers = powers(alpha, polynomials.len());
Polynomial::from(
(0..degree)
.map(|i| {
(0..polynomials.len())
.map(|j| polynomials[j][i] * alpha_powers[j])
.fold(F::ZERO, |acc, x| acc + x)
})
.collect::<Vec<_>>(),
)
}
#[allow(dead_code)]
pub(crate) fn polynomial_degree_plus_1<F: Field>(
points: &[F],
fft_precomputation: &FftPrecomputation<F>,
) -> usize {
let coeffs = ifft_with_precomputation_power_of_2(points, fft_precomputation);
coeffs.iter().rev().skip_while(|c| c.is_zero()).count()
}
// TODO: Maybe a streaming version using an `Iterator` would be faster and wouldn't require as much memory for large circuits.
// TODO: Optimize this.
pub fn halo_s<F: Field>(us: &[F]) -> Vec<F> {
let n = 1 << us.len();
let mut res = vec![F::ONE; n];
let us_inv = F::batch_multiplicative_inverse(us);
for (j, (&u, &u_inv)) in us.iter().rev().zip(us_inv.iter().rev()).enumerate() {
for (i, x) in res.iter_mut().enumerate() {
if i & (1 << j) == 0 {
*x = *x * u_inv;
} else {
*x = *x * u;
}
}
}
res
}
/// Evaluate `g(X, {u_i})` as defined in the Halo paper.
pub fn halo_g<F: Field>(x: F, us: &[F]) -> F {
let mut product = F::ONE;
let mut x_power = x;
for &u_i in us.iter().rev() {
let u_i_inv = u_i.multiplicative_inverse_assuming_nonzero();
let term = u_i * x_power + u_i_inv;
product = product * term;
x_power = x_power.square();
}
product
}
#[cfg(test)]
mod test {
use super::*;
use crate::{CircuitBuilder, Curve, Field, PartialWitness, Tweedledee};
#[test]
fn test_halo_n() {
type C = Tweedledee;
type SF = <Tweedledee as Curve>::ScalarField;
let p = C::convert(SF::rand()) * C::GENERATOR_PROJECTIVE;
let r = SF::rand();
let res = C::convert(halo_n::<C>(&r.to_canonical_bool_vec()[..128])) * p;
let p = p.to_affine();
assert_eq!(
res.to_affine(),
halo_n_mul::<C>(&r.to_canonical_bool_vec()[..128], p)
)
}
#[test]
fn test_permutation_polynomial() {
let mut builder = CircuitBuilder::<Tweedledee>::new(128);
let one = builder.one_wire();
let t = builder.add_virtual_target();
let t_sq = builder.square(t);
let quad = builder.add_many(&[one, t, t_sq]);
let seven =
builder.constant_wire(<Tweedledee as Curve>::ScalarField::from_canonical_usize(7));
let res = builder.sub(quad, seven);
builder.assert_zero(res);
let mut partial_witness = PartialWitness::new();
partial_witness.set_target(t, <Tweedledee as Curve>::ScalarField::TWO);
let circuit = builder.build();
let witness = circuit.generate_witness(partial_witness);
let beta = <Tweedledee as Curve>::ScalarField::rand();
let gamma = <Tweedledee as Curve>::ScalarField::rand();
let plonk_z_points_n = permutation_polynomial(
circuit.degree(),
&circuit.subgroup_n,
&witness,
&circuit.s_sigma_values_8n,
beta,
gamma,
);
// Verify that the permutation polynomial is well-formed.
let k_is = (0..NUM_ROUTED_WIRES)
.map(get_subgroup_shift::<<Tweedledee as Curve>::ScalarField>)
.collect::<Vec<_>>();
let wire_values = &witness.transpose();
for (i, &x) in circuit.subgroup_n.iter().enumerate() {
let (z_x, z_gz) = (
plonk_z_points_n[i],
plonk_z_points_n[(i + 1) % circuit.degree()],
);
let mut f_prime = <Tweedledee as Curve>::ScalarField::ONE;
let mut g_prime = <Tweedledee as Curve>::ScalarField::ONE;
for j in 0..NUM_ROUTED_WIRES {
let wire_value = wire_values[j][i];
let k_i = k_is[j];
let s_id = k_i * x;
let s_sigma = circuit.s_sigma_values_8n[j][8 * i];
f_prime = f_prime * (wire_value + beta * s_id + gamma);
g_prime = g_prime * (wire_value + beta * s_sigma + gamma);
}
let vanishing_v_shift_term = f_prime * z_x - g_prime * z_gz;
assert_eq!(
vanishing_v_shift_term,
<Tweedledee as Curve>::ScalarField::ZERO
);
}
}
#[test]
fn test_s_vector_g_function() {
type F = <Tweedledee as Curve>::ScalarField;
let us = (0..10).map(|_| F::rand()).collect::<Vec<_>>();
let x = F::rand();
assert_eq!(
F::inner_product(&halo_s(&us), &powers(x, 1 << 10)),
halo_g(x, &us)
);
}
} | let s = if bit_hi { | random_line_split |
plonk_util.rs | use crate::partition::get_subgroup_shift;
use crate::witness::Witness;
use crate::{ifft_with_precomputation_power_of_2, msm_execute_parallel, AffinePoint, CircuitBuilder, Curve, FftPrecomputation, Field, HaloCurve, MsmPrecomputation, Polynomial, PolynomialCommitment, ProjectivePoint, Target, NUM_ROUTED_WIRES};
use rayon::prelude::*;
/// Evaluate the polynomial which vanishes on any multiplicative subgroup of a given order `n`.
pub(crate) fn eval_zero_poly<F: Field>(n: usize, x: F) -> F {
// Z(x) = x^n - 1
x.exp_usize(n) - F::ONE
}
/// Evaluate the Lagrange basis `L_1` with `L_1(1) = 1`, and `L_1(x) = 0` for other members of an
/// order `n` multiplicative subgroup.
pub(crate) fn eval_l_1<F: Field>(n: usize, x: F) -> F {
if x.is_one() {
// The code below would divide by zero, since we have (x - 1) in both the numerator and
// denominator.
return F::ONE;
}
// L_1(x) = (x^n - 1) / (n * (x - 1))
// = Z(x) / (n * (x - 1))
eval_zero_poly(n, x) / (F::from_canonical_usize(n) * (x - F::ONE))
}
/// Computes a sum of terms weighted by powers of alpha.
pub fn reduce_with_powers<F: Field>(terms: &[F], alpha: F) -> F {
let mut sum = F::ZERO;
for &term in terms.iter().rev() {
sum = sum * alpha + term;
}
sum
}
/// Computes a sum of terms weighted by powers of alpha.
pub(crate) fn reduce_with_powers_recursive<C: HaloCurve>(
builder: &mut CircuitBuilder<C>,
terms: &[Target<C::ScalarField>],
alpha: Target<C::ScalarField>,
) -> Target<C::ScalarField> {
let mut sum = builder.zero_wire();
for &term in terms.iter().rev() {
sum = builder.mul_add(sum, alpha, term);
}
sum
}
/// Compute `n(x)` for a given `x`, where `n` is the injective function related to the Halo
/// endomorphism.
pub fn halo_n<C: HaloCurve>(s_bits: &[bool]) -> C::ScalarField {
// This is based on Algorithm 2 of the Halo paper, except that we start with (a, b) = (0, 0).
debug_assert_eq!(s_bits.len() % 2, 0, "Number of scalar bits must be even");
let zero = C::ScalarField::ZERO;
let mut a = zero;
let mut b = zero;
for s_bits_chunk in s_bits.chunks(2) {
let bit_lo = s_bits_chunk[0];
let bit_hi = s_bits_chunk[1];
let sign = if bit_lo {
C::ScalarField::ONE
} else | ;
let (c, d) = if bit_hi { (sign, zero) } else { (zero, sign) };
a = a.double() + c;
b = b.double() + d;
}
a * C::ZETA_SCALAR + b
}
/// Compute `[n(s)].P` for a given `s`, where `n` is the injective function related to the Halo
/// endomorphism.
pub fn halo_n_mul<C: HaloCurve>(s_bits: &[bool], p: AffinePoint<C>) -> AffinePoint<C> {
// This is based on Algorithm 1 of the Halo paper, except that we start with Acc = O.
debug_assert_eq!(s_bits.len() % 2, 0, "Number of scalar bits must be even");
let p_p = p.to_projective();
let p_n = -p_p;
let endo_p_p = p.endomorphism().to_projective();
let endo_p_n = -endo_p_p;
let mut acc = ProjectivePoint::<C>::ZERO;
for s_bits_chunk in s_bits.chunks(2) {
let bit_lo = s_bits_chunk[0];
let bit_hi = s_bits_chunk[1];
let s = if bit_hi {
if bit_lo {
endo_p_p
} else {
endo_p_n
}
} else if bit_lo {
p_p
} else {
p_n
};
acc = acc.double() + s;
}
acc.to_affine()
}
pub fn eval_poly<F: Field>(coeffs: &[F], x: F) -> F {
let mut ans = F::ZERO;
let mut x_pow = F::ONE;
for &c in coeffs {
ans = ans + (c * x_pow);
x_pow = x_pow * x;
}
ans
}
/// Compute `[x^0, x^1,..., x^(n - 1)]`.
pub fn powers<F: Field>(x: F, n: usize) -> Vec<F> {
let mut powers = Vec::new();
let mut current = F::ONE;
for i in 0..n {
if i!= 0 {
current = current * x;
}
powers.push(current);
}
powers
}
/// Compute `[x^0, x^1,..., x^(n - 1)]`.
pub(crate) fn powers_recursive<C: HaloCurve>(
builder: &mut CircuitBuilder<C>,
x: Target<C::ScalarField>,
n: usize,
) -> Vec<Target<C::ScalarField>> {
let mut powers = Vec::new();
let mut current = builder.one_wire();
for i in 0..n {
if i!= 0 {
current = builder.mul(current, x);
}
powers.push(current);
}
powers
}
/// Returns the evaluation of a list of polynomials at a point.
pub(crate) fn eval_polys<F: Field>(polys: &[Polynomial<F>], powers: &[F]) -> Vec<F> {
polys.iter().map(|p| p.eval_from_power(powers)).collect()
}
/// Zero-pad a list of `n` polynomial coefficients to a length of `8n`, which is the degree at
/// which we do most polynomial arithmetic.
pub(crate) fn pad_to_8n<F: Field>(coeffs: &[F]) -> Vec<F> {
let n = coeffs.len();
let mut result = coeffs.to_vec();
while result.len() < 8 * n {
result.push(F::ZERO);
}
result
}
pub(crate) fn values_to_polynomials<F: Field>(
values_vec: &[Vec<F>],
fft_precomputation: &FftPrecomputation<F>,
) -> Vec<Polynomial<F>> {
values_vec
.par_iter()
.map(|values| Polynomial::from_evaluations(values, fft_precomputation))
.collect()
}
pub(crate) fn polynomials_to_values_padded<F: Field>(
polys_vec: &[Polynomial<F>],
fft_precomputation: &FftPrecomputation<F>,
) -> Vec<Vec<F>> {
polys_vec
.par_iter()
.map(|poly| {
let padded_poly = poly.padded(poly.len() * 8);
padded_poly.eval_domain(fft_precomputation)
})
.collect()
}
/// Like `pedersen_commit`, but with no blinding factor.
pub fn pedersen_hash<C: Curve>(
xs: &[C::ScalarField],
pedersen_g_msm_precomputation: &MsmPrecomputation<C>,
) -> ProjectivePoint<C> {
msm_execute_parallel(pedersen_g_msm_precomputation, xs)
}
#[allow(dead_code)]
fn pedersen_commit<C: Curve>(
xs: &[C::ScalarField],
opening: C::ScalarField,
h: AffinePoint<C>,
pedersen_g_msm_precomputation: &MsmPrecomputation<C>,
) -> ProjectivePoint<C> {
// TODO: Couldn't get this working with *.
let h = h.to_projective();
let mul_precomputation = h.mul_precompute();
let blinding_term = h.mul_with_precomputation(opening, mul_precomputation);
msm_execute_parallel(pedersen_g_msm_precomputation, xs) + blinding_term
}
pub fn commit_polynomials<C: Curve>(
polynomials: &[Polynomial<C::ScalarField>],
msm_precomputation: &MsmPrecomputation<C>,
blinding_point: AffinePoint<C>,
blinding: bool,
) -> Vec<PolynomialCommitment<C>> {
PolynomialCommitment::coeffs_vec_to_commitments(
polynomials
.iter()
.map(|p| p.coeffs())
.collect::<Vec<_>>()
.as_slice(),
msm_precomputation,
blinding_point,
blinding,
)
}
// Generate Z, which is used in Plonk's permutation argument.
pub fn permutation_polynomial<F: Field>(
degree: usize,
subgroup: &[F],
witness: &Witness<F>,
sigma_values: &[Vec<F>],
beta: F,
gamma: F,
) -> Vec<F> {
let mut plonk_z_points = vec![F::ONE];
let k_is = (0..NUM_ROUTED_WIRES)
.map(get_subgroup_shift::<F>)
.collect::<Vec<_>>();
for i in 1..degree {
let x = subgroup[i - 1];
let mut numerator = F::ONE;
let mut denominator = F::ONE;
for j in 0..NUM_ROUTED_WIRES {
let wire_value = witness.get_indices(i - 1, j);
let k_i = k_is[j];
let s_id = k_i * x;
let s_sigma = sigma_values[j][8 * (i - 1)];
numerator = numerator * (wire_value + beta * s_id + gamma);
denominator = denominator * (wire_value + beta * s_sigma + gamma);
}
let last = *plonk_z_points.last().unwrap();
plonk_z_points.push(last * numerator / denominator);
}
plonk_z_points
}
pub fn sigma_polynomials<F: Field>(
sigma: Vec<usize>,
degree: usize,
subgroup_generator: F,
) -> Vec<Vec<F>> {
sigma
.chunks(degree)
.map(|chunk| {
chunk
.par_iter()
.map(|&x| {
get_subgroup_shift::<F>(x / degree) * subgroup_generator.exp_usize(x % degree)
})
.collect::<Vec<_>>()
})
.collect()
}
/// Given polynomials `[p_0,...,p_k]` of degree `degree` and `alpha \in F`, returns `\sum_{i=0}^k alpha^i p_i`.
pub(crate) fn scale_polynomials<F: Field>(
polynomials: Vec<Polynomial<F>>,
alpha: F,
degree: usize,
) -> Polynomial<F> {
let alpha_powers = powers(alpha, polynomials.len());
Polynomial::from(
(0..degree)
.map(|i| {
(0..polynomials.len())
.map(|j| polynomials[j][i] * alpha_powers[j])
.fold(F::ZERO, |acc, x| acc + x)
})
.collect::<Vec<_>>(),
)
}
#[allow(dead_code)]
pub(crate) fn polynomial_degree_plus_1<F: Field>(
points: &[F],
fft_precomputation: &FftPrecomputation<F>,
) -> usize {
let coeffs = ifft_with_precomputation_power_of_2(points, fft_precomputation);
coeffs.iter().rev().skip_while(|c| c.is_zero()).count()
}
// TODO: Maybe a streaming version using an `Iterator` would be faster and wouldn't require as much memory for large circuits.
// TODO: Optimize this.
pub fn halo_s<F: Field>(us: &[F]) -> Vec<F> {
let n = 1 << us.len();
let mut res = vec![F::ONE; n];
let us_inv = F::batch_multiplicative_inverse(us);
for (j, (&u, &u_inv)) in us.iter().rev().zip(us_inv.iter().rev()).enumerate() {
for (i, x) in res.iter_mut().enumerate() {
if i & (1 << j) == 0 {
*x = *x * u_inv;
} else {
*x = *x * u;
}
}
}
res
}
/// Evaluate `g(X, {u_i})` as defined in the Halo paper.
pub fn halo_g<F: Field>(x: F, us: &[F]) -> F {
let mut product = F::ONE;
let mut x_power = x;
for &u_i in us.iter().rev() {
let u_i_inv = u_i.multiplicative_inverse_assuming_nonzero();
let term = u_i * x_power + u_i_inv;
product = product * term;
x_power = x_power.square();
}
product
}
#[cfg(test)]
mod test {
use super::*;
use crate::{CircuitBuilder, Curve, Field, PartialWitness, Tweedledee};
#[test]
fn test_halo_n() {
type C = Tweedledee;
type SF = <Tweedledee as Curve>::ScalarField;
let p = C::convert(SF::rand()) * C::GENERATOR_PROJECTIVE;
let r = SF::rand();
let res = C::convert(halo_n::<C>(&r.to_canonical_bool_vec()[..128])) * p;
let p = p.to_affine();
assert_eq!(
res.to_affine(),
halo_n_mul::<C>(&r.to_canonical_bool_vec()[..128], p)
)
}
#[test]
fn test_permutation_polynomial() {
let mut builder = CircuitBuilder::<Tweedledee>::new(128);
let one = builder.one_wire();
let t = builder.add_virtual_target();
let t_sq = builder.square(t);
let quad = builder.add_many(&[one, t, t_sq]);
let seven =
builder.constant_wire(<Tweedledee as Curve>::ScalarField::from_canonical_usize(7));
let res = builder.sub(quad, seven);
builder.assert_zero(res);
let mut partial_witness = PartialWitness::new();
partial_witness.set_target(t, <Tweedledee as Curve>::ScalarField::TWO);
let circuit = builder.build();
let witness = circuit.generate_witness(partial_witness);
let beta = <Tweedledee as Curve>::ScalarField::rand();
let gamma = <Tweedledee as Curve>::ScalarField::rand();
let plonk_z_points_n = permutation_polynomial(
circuit.degree(),
&circuit.subgroup_n,
&witness,
&circuit.s_sigma_values_8n,
beta,
gamma,
);
// Verify that the permutation polynomial is well-formed.
let k_is = (0..NUM_ROUTED_WIRES)
.map(get_subgroup_shift::<<Tweedledee as Curve>::ScalarField>)
.collect::<Vec<_>>();
let wire_values = &witness.transpose();
for (i, &x) in circuit.subgroup_n.iter().enumerate() {
let (z_x, z_gz) = (
plonk_z_points_n[i],
plonk_z_points_n[(i + 1) % circuit.degree()],
);
let mut f_prime = <Tweedledee as Curve>::ScalarField::ONE;
let mut g_prime = <Tweedledee as Curve>::ScalarField::ONE;
for j in 0..NUM_ROUTED_WIRES {
let wire_value = wire_values[j][i];
let k_i = k_is[j];
let s_id = k_i * x;
let s_sigma = circuit.s_sigma_values_8n[j][8 * i];
f_prime = f_prime * (wire_value + beta * s_id + gamma);
g_prime = g_prime * (wire_value + beta * s_sigma + gamma);
}
let vanishing_v_shift_term = f_prime * z_x - g_prime * z_gz;
assert_eq!(
vanishing_v_shift_term,
<Tweedledee as Curve>::ScalarField::ZERO
);
}
}
#[test]
fn test_s_vector_g_function() {
type F = <Tweedledee as Curve>::ScalarField;
let us = (0..10).map(|_| F::rand()).collect::<Vec<_>>();
let x = F::rand();
assert_eq!(
F::inner_product(&halo_s(&us), &powers(x, 1 << 10)),
halo_g(x, &us)
);
}
}
| {
C::ScalarField::NEG_ONE
} | conditional_block |
main.rs | #![no_std]
#![no_main]
use core::cell::RefCell;
use cortex_m::{asm::wfi, interrupt::Mutex};
use cortex_m_rt::entry;
use embedded_hal::spi::MODE_1;
use stm32f4xx_hal as hal;
use hal::{
adc::config::{Align, Clock, Continuous, Resolution, Scan},
gpio::{gpioa, Output, PushPull, gpioc::{PC10, PC11, PC12}, Alternate},
pac,
pac::{ADC1, ADC_COMMON, interrupt, Interrupt, TIM1, TIM2},
prelude::*,
pwm,
signature::VDDA_CALIB,
time::KiloHertz,
timer::{Event, Timer},
};
use odrive_rs::spi::Spi;
extern crate drv8301;
use drv8301::drv8301::Drv8301;
use odrive_rs::as5048a::AS5048A;
use odrive_rs::motor::Motor;
use odrive_rs::rcc::{Enable, Reset};
use cortex_m_semihosting::{hprint, hprintln};
use panic_halt as _;
//type TypeLed = gpioa::PA2<Output<PushPull>>;
//static G_LED: Mutex<RefCell<Option<TypeLed>>> = Mutex::new(RefCell::new(None));
type TypeSpi3 = Spi<pac::SPI3, (PC10<Alternate<stm32f4xx_hal::gpio::AF6>>, PC11<Alternate<stm32f4xx_hal::gpio::AF6>>, PC12<Alternate<stm32f4xx_hal::gpio::AF6>>)>;
static G_SPI3: Mutex<RefCell<Option<TypeSpi3>>> = Mutex::new(RefCell::new(None));
type TypeEncoder<'a> = AS5048A<'a, TypeSpi3, gpioa::PA3<Output<PushPull>>>;
//static G_AS5048A: Mutex<RefCell<Option<TypeEncoder>>> = Mutex::new(RefCell::new(None));
type TypeMotor = Motor;
static G_MOTOR: Mutex<RefCell<Option<TypeMotor>>> = Mutex::new(RefCell::new(None));
static G_TIM: Mutex<RefCell<Option<Timer<TIM2>>>> = Mutex::new(RefCell::new(None));
static mut CURRENT_A: f32 = 0.0;
static mut CURRENT_B: f32 = 0.0;
static mut CURRENT_C: f32 = 0.0;
static mut MOT_ANGLE: u16 = 0;
static mut MOT_ANGLE_OLD: u16 = 0;
static mut MOT_VELOCITY: f32 = 0.0;
static mut MOT_VELOCITY_OLD: f32 = 0.0;
static mut ERR_VELOCITY: f32 = 0.0;
static mut ERR_VELOCITY_INT: f32 = 0.0;
static mut REF_CURR_D: f32 = 0.0;
static mut REF_CURR_Q: f32 = 0.0;
// System
const TIM2_FREQ_KHZ: u32 = 10;
// Motor
const MOT_POLE_PAIRS: u16 = 12;
// Encoder
const ENC_RESOLUTION: u16 = 16384;
#[interrupt]
fn ADC() {
// current sensing
unsafe {
let max_sample:u32 = (1 << 12) - 1;
let device = pac::Peripherals::steal();
device.ADC1.sr.modify(|_, w| w.jeoc().clear_bit());
let jdr1_data = device.ADC1.jdr1.read().jdata().bits();
let jdr1_offset = 48u32;
let so1 = ( ( (u32::from(jdr1_data) + jdr1_offset) * VDDA_CALIB ) / max_sample) as u16;
let jdr2_data = device.ADC1.jdr2.read().jdata().bits();
let jdr2_offset = 118u32;
let so2 = ( ( (u32::from(jdr2_data) + jdr2_offset) * VDDA_CALIB ) / max_sample) as u16;
CURRENT_B = (so1 as f32 - 1650.0) / 200.0;
CURRENT_C = (so2 as f32 - 1650.0) / 200.0;
CURRENT_A = - CURRENT_B - CURRENT_C;
}
/*
// LED Debug
cortex_m::interrupt::free(|cs| {
if let Some(ref mut led) = G_LED.borrow(cs).borrow_mut().as_mut() {
led.toggle().unwrap();
}
});
*/
}
#[interrupt]
fn TIM2() {
cortex_m::interrupt::free(|cs| {
if let Some(ref mut tim) = G_TIM.borrow(cs).borrow_mut().as_mut() |
});
static mut SPI3: Option<TypeSpi3> = None;
static mut MOTOR: Option<TypeMotor> = None;
unsafe{
let mut spi3 = SPI3.get_or_insert_with(|| {
cortex_m::interrupt::free(|cs| {
G_SPI3.borrow(cs).replace(None).unwrap()
})
});
let device = pac::Peripherals::steal();
let gpioa = device.GPIOA.split();
let ncs = gpioa.pa3.into_push_pull_output();
let mut as5048: TypeEncoder = AS5048A::new(&mut spi3, ncs);
// AS5048A
let measured_angle = as5048.angle().unwrap();
let angle_offset = 650u16;
MOT_ANGLE = (measured_angle - angle_offset) % ENC_RESOLUTION;
let electric_angle = MOT_ANGLE % (ENC_RESOLUTION/MOT_POLE_PAIRS);
let motor = MOTOR.get_or_insert_with(|| {
cortex_m::interrupt::free(|cs| {
G_MOTOR.borrow(cs).replace(None).unwrap()
})
});
// Velocity control
const REF_VELOCITY: f32 = - 100.0;
const VELLOCITY_PGAIN: f32 = 0.1;
const VELOCITY_IGAIN: f32 = 0.00001;
let res_velocity =
if (ENC_RESOLUTION-1000) < MOT_ANGLE_OLD && MOT_ANGLE < 1000 {
( MOT_ANGLE as f32 - MOT_ANGLE_OLD as f32 + ENC_RESOLUTION as f32 ) * TIM2_FREQ_KHZ as f32
} else if MOT_ANGLE_OLD < 1000 && MOT_ANGLE > (ENC_RESOLUTION-1000) {
( MOT_ANGLE as f32 - MOT_ANGLE_OLD as f32 - ENC_RESOLUTION as f32 ) * TIM2_FREQ_KHZ as f32
} else {
( MOT_ANGLE as f32 - MOT_ANGLE_OLD as f32 ) * TIM2_FREQ_KHZ as f32
};
let alpha = 0.1;
MOT_VELOCITY = alpha * res_velocity + (1.0 - alpha) * MOT_VELOCITY_OLD;
ERR_VELOCITY = MOT_VELOCITY - REF_VELOCITY;
ERR_VELOCITY_INT += ERR_VELOCITY;
REF_CURR_D = 0.0;
REF_CURR_Q = VELLOCITY_PGAIN * ERR_VELOCITY + VELOCITY_IGAIN * ERR_VELOCITY_INT;
REF_CURR_Q = -1.0 * REF_CURR_Q;
MOT_ANGLE_OLD = MOT_ANGLE;
MOT_VELOCITY_OLD = MOT_VELOCITY;
// select control mode
//motor.drive_profile().unwrap();
//motor.drive_sixstep().unwrap();
//motor.drive_anglebased_sixstep(electric_angle).unwrap();
motor.drive_foc(electric_angle, CURRENT_A, CURRENT_B, CURRENT_C, REF_CURR_D, REF_CURR_Q).unwrap();
}
/*
// LED Debug
cortex_m::interrupt::free(|cs| {
if let Some(ref mut led) = G_LED.borrow(cs).borrow_mut().as_mut() {
led.toggle().unwrap();
}
});
*/
}
#[entry]
fn main() ->! {
let dp = pac::Peripherals::take().unwrap();
let cp = cortex_m::peripheral::Peripherals::take().unwrap();
let rcc = dp.RCC.constrain();
let clocks = rcc
.cfgr
.use_hse(8.mhz())
.sysclk(168.mhz())
.hclk(168.mhz())
.pclk1(42.mhz())
.pclk2(84.mhz())
.require_pll48clk()
.freeze();
let mut delay = hal::delay::Delay::new(cp.SYST, clocks);
let gpioa = dp.GPIOA.split();
let gpiob = dp.GPIOB.split();
let gpioc = dp.GPIOC.split();
// SPI3
let sck = gpioc.pc10.into_alternate_af6();
let miso = gpioc.pc11.into_alternate_af6();
let mosi = gpioc.pc12.into_alternate_af6();
let mut spi = Spi::spi3(
dp.SPI3,
(sck, miso, mosi),
MODE_1,
KiloHertz(2000).into(),
clocks,
);
// DRV8301
let ncs = gpioc.pc13.into_push_pull_output();
let en_gate = gpiob.pb12.into_push_pull_output();
let mut drv8301 = Drv8301::new(&mut spi, ncs, en_gate);
drv8301.init().unwrap();
// Move the pin into our global storage
cortex_m::interrupt::free(|cs| *G_SPI3.borrow(cs).borrow_mut() = Some(spi));
// PWM
let channels = (gpioa.pa8.into_alternate_af1(), gpioa.pa9.into_alternate_af1(), gpioa.pa10.into_alternate_af1(), gpioa.pa11.into_alternate_af1());
let pwm = pwm::tim1(dp.TIM1, channels, clocks, 16u32.khz());
let (ch1, ch2, ch3, ch4) = pwm;
let mut ch4 = ch4;
{
// Set complementary oututs mode as AF1
gpiob.pb13.into_alternate_af1();
gpiob.pb14.into_alternate_af1();
gpiob.pb15.into_alternate_af1();
unsafe {
let tim1_regb = &(*(TIM1::ptr()));
// Enable complementary outputs
tim1_regb.ccer.modify(|_, w| w.cc1ne().set_bit());
tim1_regb.ccer.modify(|_, w| w.cc2ne().set_bit());
tim1_regb.ccer.modify(|_, w| w.cc3ne().set_bit());
// Set dead time
tim1_regb.bdtr.modify(|_, w| w.dtg().bits(10));
// Center aligned
tim1_regb.cr1.modify(|_, w| w.cms().center_aligned1());
// OC4REF signal is used as trigger output
tim1_regb.cr2.modify(|_, w| w.mms().compare_oc4());
}
ch4.enable();
ch4.set_duty( (ch4.get_max_duty() as f32 * 0.99)as u16 );
}
delay.delay_ms(1u32);
// Motor
let mut motor = Motor::new(ch1, ch2, ch3, MOT_POLE_PAIRS, ENC_RESOLUTION);
motor.set_duty(0,0,0).unwrap();
motor.enable().unwrap();
delay.delay_ms(1u32);
/*
// for current sensing test
unsafe{
motor.set_hiz_c();
motor.set_duty((motor.max_duty as f32 * 0.6) as u16, (motor.max_duty as f32 * 0.4) as u16, 0u16).unwrap();
}
*/
cortex_m::interrupt::free(|cs| *G_MOTOR.borrow(cs).borrow_mut() = Some(motor));
// ADC1
gpioc.pc0.into_analog();
gpioc.pc1.into_analog();
unsafe {
// All ADCs share the same reset interface.
// NOTE(unsafe) this reference will only be used for atomic writes with no side effects.
let rcc = &(*pac::RCC::ptr());
// Enable the clock
pac::ADC1::enable(rcc);
pac::ADC1::reset(rcc);
let adcc_regb = &(*(ADC_COMMON::ptr()));
let adc1_regb = &(*(ADC1::ptr()));
// Probably unnecessary to disable the ADC in most cases but it shouldn't do any harm either
adc1_regb.cr2.modify(|_, w| w.adon().clear_bit());
// Config common
adcc_regb.ccr.modify(|_, w| w.adcpre().bits(Clock::Pclk2_div_2.into()));
// Config regular conversion
adc1_regb.cr1.modify(|_, w| w.res().bits(Resolution::Twelve.into()));
adc1_regb.cr1.modify(|_, w| w.scan().bit(Scan::Enabled.into()));
adc1_regb.cr2.modify(|_, w| w.align().bit(Align::Right.into()));
adc1_regb.cr2.modify(|_, w| w.cont().bit(Continuous::Single.into()));
// config injected conversion
adc1_regb.cr1.modify(|_, w | w.jeocie().enabled());
adc1_regb.cr2.modify(|_, w| w.jexten().rising_edge());
adc1_regb.cr2.modify(|_, w| w.jextsel().tim1cc4());
adc1_regb.jsqr.modify(|_, w| w.jl().bits(0b01));
adc1_regb.jsqr.modify(|_, w| w.jsq3().bits(10u8));
adc1_regb.jsqr.modify(|_, w| w.jsq4().bits(11u8));
adc1_regb.smpr1.modify(|_, w| w.smp10().cycles3());
adc1_regb.smpr1.modify(|_, w| w.smp11().cycles3());
// enable ADC
adc1_regb.cr2.modify(|_, w| w.adon().set_bit());
delay.delay_ms(1u32);
// enable interrupt
cortex_m::peripheral::NVIC::unmask(Interrupt::ADC);
}
/*
// Debug LED
let mut led = gpioa.pa2.into_push_pull_output();
let _ = led.set_high();
cortex_m::interrupt::free(|cs| *G_LED.borrow(cs).borrow_mut() = Some(led));
*/
// TIM2 Interrupt
let mut timer = Timer::tim2(dp.TIM2, TIM2_FREQ_KHZ.khz(), clocks);
timer.listen(Event::TimeOut);
cortex_m::interrupt::free(|cs| *G_TIM.borrow(cs).borrow_mut() = Some(timer));
//enable TIM2 interrupt
unsafe {
cortex_m::peripheral::NVIC::unmask(Interrupt::TIM2);
}
loop {
wfi();
/*
unsafe{
//hprintln!("CURRENT_A: {}A, CURRENT_B: {}A, CURRENT_C: {}A", CURRENT_A, CURRENT_B, CURRENT_C);
}
delay.delay_ms(1u32);
*/
}
}
| {
let _ = tim.wait();
} | conditional_block |
main.rs | #![no_std]
#![no_main]
use core::cell::RefCell;
use cortex_m::{asm::wfi, interrupt::Mutex};
use cortex_m_rt::entry;
use embedded_hal::spi::MODE_1;
use stm32f4xx_hal as hal;
use hal::{
adc::config::{Align, Clock, Continuous, Resolution, Scan},
gpio::{gpioa, Output, PushPull, gpioc::{PC10, PC11, PC12}, Alternate},
pac,
pac::{ADC1, ADC_COMMON, interrupt, Interrupt, TIM1, TIM2},
prelude::*,
pwm,
signature::VDDA_CALIB,
time::KiloHertz,
timer::{Event, Timer},
};
use odrive_rs::spi::Spi;
extern crate drv8301;
use drv8301::drv8301::Drv8301;
use odrive_rs::as5048a::AS5048A;
use odrive_rs::motor::Motor;
use odrive_rs::rcc::{Enable, Reset};
use cortex_m_semihosting::{hprint, hprintln};
use panic_halt as _;
//type TypeLed = gpioa::PA2<Output<PushPull>>;
//static G_LED: Mutex<RefCell<Option<TypeLed>>> = Mutex::new(RefCell::new(None));
type TypeSpi3 = Spi<pac::SPI3, (PC10<Alternate<stm32f4xx_hal::gpio::AF6>>, PC11<Alternate<stm32f4xx_hal::gpio::AF6>>, PC12<Alternate<stm32f4xx_hal::gpio::AF6>>)>;
static G_SPI3: Mutex<RefCell<Option<TypeSpi3>>> = Mutex::new(RefCell::new(None));
type TypeEncoder<'a> = AS5048A<'a, TypeSpi3, gpioa::PA3<Output<PushPull>>>;
//static G_AS5048A: Mutex<RefCell<Option<TypeEncoder>>> = Mutex::new(RefCell::new(None));
type TypeMotor = Motor;
static G_MOTOR: Mutex<RefCell<Option<TypeMotor>>> = Mutex::new(RefCell::new(None));
static G_TIM: Mutex<RefCell<Option<Timer<TIM2>>>> = Mutex::new(RefCell::new(None));
static mut CURRENT_A: f32 = 0.0;
static mut CURRENT_B: f32 = 0.0;
static mut CURRENT_C: f32 = 0.0;
static mut MOT_ANGLE: u16 = 0;
static mut MOT_ANGLE_OLD: u16 = 0;
static mut MOT_VELOCITY: f32 = 0.0;
static mut MOT_VELOCITY_OLD: f32 = 0.0;
static mut ERR_VELOCITY: f32 = 0.0;
static mut ERR_VELOCITY_INT: f32 = 0.0;
static mut REF_CURR_D: f32 = 0.0;
static mut REF_CURR_Q: f32 = 0.0;
// System
const TIM2_FREQ_KHZ: u32 = 10;
// Motor
const MOT_POLE_PAIRS: u16 = 12;
// Encoder
const ENC_RESOLUTION: u16 = 16384;
#[interrupt]
fn ADC() |
/*
// LED Debug
cortex_m::interrupt::free(|cs| {
if let Some(ref mut led) = G_LED.borrow(cs).borrow_mut().as_mut() {
led.toggle().unwrap();
}
});
*/
}
#[interrupt]
fn TIM2() {
cortex_m::interrupt::free(|cs| {
if let Some(ref mut tim) = G_TIM.borrow(cs).borrow_mut().as_mut() {
let _ = tim.wait();
}
});
static mut SPI3: Option<TypeSpi3> = None;
static mut MOTOR: Option<TypeMotor> = None;
unsafe{
let mut spi3 = SPI3.get_or_insert_with(|| {
cortex_m::interrupt::free(|cs| {
G_SPI3.borrow(cs).replace(None).unwrap()
})
});
let device = pac::Peripherals::steal();
let gpioa = device.GPIOA.split();
let ncs = gpioa.pa3.into_push_pull_output();
let mut as5048: TypeEncoder = AS5048A::new(&mut spi3, ncs);
// AS5048A
let measured_angle = as5048.angle().unwrap();
let angle_offset = 650u16;
MOT_ANGLE = (measured_angle - angle_offset) % ENC_RESOLUTION;
let electric_angle = MOT_ANGLE % (ENC_RESOLUTION/MOT_POLE_PAIRS);
let motor = MOTOR.get_or_insert_with(|| {
cortex_m::interrupt::free(|cs| {
G_MOTOR.borrow(cs).replace(None).unwrap()
})
});
// Velocity control
const REF_VELOCITY: f32 = - 100.0;
const VELLOCITY_PGAIN: f32 = 0.1;
const VELOCITY_IGAIN: f32 = 0.00001;
let res_velocity =
if (ENC_RESOLUTION-1000) < MOT_ANGLE_OLD && MOT_ANGLE < 1000 {
( MOT_ANGLE as f32 - MOT_ANGLE_OLD as f32 + ENC_RESOLUTION as f32 ) * TIM2_FREQ_KHZ as f32
} else if MOT_ANGLE_OLD < 1000 && MOT_ANGLE > (ENC_RESOLUTION-1000) {
( MOT_ANGLE as f32 - MOT_ANGLE_OLD as f32 - ENC_RESOLUTION as f32 ) * TIM2_FREQ_KHZ as f32
} else {
( MOT_ANGLE as f32 - MOT_ANGLE_OLD as f32 ) * TIM2_FREQ_KHZ as f32
};
let alpha = 0.1;
MOT_VELOCITY = alpha * res_velocity + (1.0 - alpha) * MOT_VELOCITY_OLD;
ERR_VELOCITY = MOT_VELOCITY - REF_VELOCITY;
ERR_VELOCITY_INT += ERR_VELOCITY;
REF_CURR_D = 0.0;
REF_CURR_Q = VELLOCITY_PGAIN * ERR_VELOCITY + VELOCITY_IGAIN * ERR_VELOCITY_INT;
REF_CURR_Q = -1.0 * REF_CURR_Q;
MOT_ANGLE_OLD = MOT_ANGLE;
MOT_VELOCITY_OLD = MOT_VELOCITY;
// select control mode
//motor.drive_profile().unwrap();
//motor.drive_sixstep().unwrap();
//motor.drive_anglebased_sixstep(electric_angle).unwrap();
motor.drive_foc(electric_angle, CURRENT_A, CURRENT_B, CURRENT_C, REF_CURR_D, REF_CURR_Q).unwrap();
}
/*
// LED Debug
cortex_m::interrupt::free(|cs| {
if let Some(ref mut led) = G_LED.borrow(cs).borrow_mut().as_mut() {
led.toggle().unwrap();
}
});
*/
}
#[entry]
fn main() ->! {
let dp = pac::Peripherals::take().unwrap();
let cp = cortex_m::peripheral::Peripherals::take().unwrap();
let rcc = dp.RCC.constrain();
let clocks = rcc
.cfgr
.use_hse(8.mhz())
.sysclk(168.mhz())
.hclk(168.mhz())
.pclk1(42.mhz())
.pclk2(84.mhz())
.require_pll48clk()
.freeze();
let mut delay = hal::delay::Delay::new(cp.SYST, clocks);
let gpioa = dp.GPIOA.split();
let gpiob = dp.GPIOB.split();
let gpioc = dp.GPIOC.split();
// SPI3
let sck = gpioc.pc10.into_alternate_af6();
let miso = gpioc.pc11.into_alternate_af6();
let mosi = gpioc.pc12.into_alternate_af6();
let mut spi = Spi::spi3(
dp.SPI3,
(sck, miso, mosi),
MODE_1,
KiloHertz(2000).into(),
clocks,
);
// DRV8301
let ncs = gpioc.pc13.into_push_pull_output();
let en_gate = gpiob.pb12.into_push_pull_output();
let mut drv8301 = Drv8301::new(&mut spi, ncs, en_gate);
drv8301.init().unwrap();
// Move the pin into our global storage
cortex_m::interrupt::free(|cs| *G_SPI3.borrow(cs).borrow_mut() = Some(spi));
// PWM
let channels = (gpioa.pa8.into_alternate_af1(), gpioa.pa9.into_alternate_af1(), gpioa.pa10.into_alternate_af1(), gpioa.pa11.into_alternate_af1());
let pwm = pwm::tim1(dp.TIM1, channels, clocks, 16u32.khz());
let (ch1, ch2, ch3, ch4) = pwm;
let mut ch4 = ch4;
{
// Set complementary oututs mode as AF1
gpiob.pb13.into_alternate_af1();
gpiob.pb14.into_alternate_af1();
gpiob.pb15.into_alternate_af1();
unsafe {
let tim1_regb = &(*(TIM1::ptr()));
// Enable complementary outputs
tim1_regb.ccer.modify(|_, w| w.cc1ne().set_bit());
tim1_regb.ccer.modify(|_, w| w.cc2ne().set_bit());
tim1_regb.ccer.modify(|_, w| w.cc3ne().set_bit());
// Set dead time
tim1_regb.bdtr.modify(|_, w| w.dtg().bits(10));
// Center aligned
tim1_regb.cr1.modify(|_, w| w.cms().center_aligned1());
// OC4REF signal is used as trigger output
tim1_regb.cr2.modify(|_, w| w.mms().compare_oc4());
}
ch4.enable();
ch4.set_duty( (ch4.get_max_duty() as f32 * 0.99)as u16 );
}
delay.delay_ms(1u32);
// Motor
let mut motor = Motor::new(ch1, ch2, ch3, MOT_POLE_PAIRS, ENC_RESOLUTION);
motor.set_duty(0,0,0).unwrap();
motor.enable().unwrap();
delay.delay_ms(1u32);
/*
// for current sensing test
unsafe{
motor.set_hiz_c();
motor.set_duty((motor.max_duty as f32 * 0.6) as u16, (motor.max_duty as f32 * 0.4) as u16, 0u16).unwrap();
}
*/
cortex_m::interrupt::free(|cs| *G_MOTOR.borrow(cs).borrow_mut() = Some(motor));
// ADC1
gpioc.pc0.into_analog();
gpioc.pc1.into_analog();
unsafe {
// All ADCs share the same reset interface.
// NOTE(unsafe) this reference will only be used for atomic writes with no side effects.
let rcc = &(*pac::RCC::ptr());
// Enable the clock
pac::ADC1::enable(rcc);
pac::ADC1::reset(rcc);
let adcc_regb = &(*(ADC_COMMON::ptr()));
let adc1_regb = &(*(ADC1::ptr()));
// Probably unnecessary to disable the ADC in most cases but it shouldn't do any harm either
adc1_regb.cr2.modify(|_, w| w.adon().clear_bit());
// Config common
adcc_regb.ccr.modify(|_, w| w.adcpre().bits(Clock::Pclk2_div_2.into()));
// Config regular conversion
adc1_regb.cr1.modify(|_, w| w.res().bits(Resolution::Twelve.into()));
adc1_regb.cr1.modify(|_, w| w.scan().bit(Scan::Enabled.into()));
adc1_regb.cr2.modify(|_, w| w.align().bit(Align::Right.into()));
adc1_regb.cr2.modify(|_, w| w.cont().bit(Continuous::Single.into()));
// config injected conversion
adc1_regb.cr1.modify(|_, w | w.jeocie().enabled());
adc1_regb.cr2.modify(|_, w| w.jexten().rising_edge());
adc1_regb.cr2.modify(|_, w| w.jextsel().tim1cc4());
adc1_regb.jsqr.modify(|_, w| w.jl().bits(0b01));
adc1_regb.jsqr.modify(|_, w| w.jsq3().bits(10u8));
adc1_regb.jsqr.modify(|_, w| w.jsq4().bits(11u8));
adc1_regb.smpr1.modify(|_, w| w.smp10().cycles3());
adc1_regb.smpr1.modify(|_, w| w.smp11().cycles3());
// enable ADC
adc1_regb.cr2.modify(|_, w| w.adon().set_bit());
delay.delay_ms(1u32);
// enable interrupt
cortex_m::peripheral::NVIC::unmask(Interrupt::ADC);
}
/*
// Debug LED
let mut led = gpioa.pa2.into_push_pull_output();
let _ = led.set_high();
cortex_m::interrupt::free(|cs| *G_LED.borrow(cs).borrow_mut() = Some(led));
*/
// TIM2 Interrupt
let mut timer = Timer::tim2(dp.TIM2, TIM2_FREQ_KHZ.khz(), clocks);
timer.listen(Event::TimeOut);
cortex_m::interrupt::free(|cs| *G_TIM.borrow(cs).borrow_mut() = Some(timer));
//enable TIM2 interrupt
unsafe {
cortex_m::peripheral::NVIC::unmask(Interrupt::TIM2);
}
loop {
wfi();
/*
unsafe{
//hprintln!("CURRENT_A: {}A, CURRENT_B: {}A, CURRENT_C: {}A", CURRENT_A, CURRENT_B, CURRENT_C);
}
delay.delay_ms(1u32);
*/
}
}
| {
// current sensing
unsafe {
let max_sample:u32 = (1 << 12) - 1;
let device = pac::Peripherals::steal();
device.ADC1.sr.modify(|_, w| w.jeoc().clear_bit());
let jdr1_data = device.ADC1.jdr1.read().jdata().bits();
let jdr1_offset = 48u32;
let so1 = ( ( (u32::from(jdr1_data) + jdr1_offset) * VDDA_CALIB ) / max_sample) as u16;
let jdr2_data = device.ADC1.jdr2.read().jdata().bits();
let jdr2_offset = 118u32;
let so2 = ( ( (u32::from(jdr2_data) + jdr2_offset) * VDDA_CALIB ) / max_sample) as u16;
CURRENT_B = (so1 as f32 - 1650.0) / 200.0;
CURRENT_C = (so2 as f32 - 1650.0) / 200.0;
CURRENT_A = - CURRENT_B - CURRENT_C;
} | identifier_body |
main.rs | #![no_std]
#![no_main]
use core::cell::RefCell;
use cortex_m::{asm::wfi, interrupt::Mutex};
use cortex_m_rt::entry;
use embedded_hal::spi::MODE_1;
use stm32f4xx_hal as hal;
use hal::{
adc::config::{Align, Clock, Continuous, Resolution, Scan},
gpio::{gpioa, Output, PushPull, gpioc::{PC10, PC11, PC12}, Alternate},
pac,
pac::{ADC1, ADC_COMMON, interrupt, Interrupt, TIM1, TIM2},
prelude::*,
pwm,
signature::VDDA_CALIB,
time::KiloHertz,
timer::{Event, Timer},
};
use odrive_rs::spi::Spi;
extern crate drv8301;
use drv8301::drv8301::Drv8301;
use odrive_rs::as5048a::AS5048A;
use odrive_rs::motor::Motor;
use odrive_rs::rcc::{Enable, Reset};
use cortex_m_semihosting::{hprint, hprintln};
use panic_halt as _;
//type TypeLed = gpioa::PA2<Output<PushPull>>;
//static G_LED: Mutex<RefCell<Option<TypeLed>>> = Mutex::new(RefCell::new(None));
type TypeSpi3 = Spi<pac::SPI3, (PC10<Alternate<stm32f4xx_hal::gpio::AF6>>, PC11<Alternate<stm32f4xx_hal::gpio::AF6>>, PC12<Alternate<stm32f4xx_hal::gpio::AF6>>)>;
static G_SPI3: Mutex<RefCell<Option<TypeSpi3>>> = Mutex::new(RefCell::new(None));
type TypeEncoder<'a> = AS5048A<'a, TypeSpi3, gpioa::PA3<Output<PushPull>>>;
//static G_AS5048A: Mutex<RefCell<Option<TypeEncoder>>> = Mutex::new(RefCell::new(None));
type TypeMotor = Motor;
static G_MOTOR: Mutex<RefCell<Option<TypeMotor>>> = Mutex::new(RefCell::new(None));
static G_TIM: Mutex<RefCell<Option<Timer<TIM2>>>> = Mutex::new(RefCell::new(None));
static mut CURRENT_A: f32 = 0.0;
static mut CURRENT_B: f32 = 0.0;
static mut CURRENT_C: f32 = 0.0;
static mut MOT_ANGLE: u16 = 0;
static mut MOT_ANGLE_OLD: u16 = 0;
static mut MOT_VELOCITY: f32 = 0.0;
static mut MOT_VELOCITY_OLD: f32 = 0.0;
static mut ERR_VELOCITY: f32 = 0.0;
static mut ERR_VELOCITY_INT: f32 = 0.0;
static mut REF_CURR_D: f32 = 0.0;
static mut REF_CURR_Q: f32 = 0.0;
// System
const TIM2_FREQ_KHZ: u32 = 10;
// Motor
const MOT_POLE_PAIRS: u16 = 12;
// Encoder
const ENC_RESOLUTION: u16 = 16384;
#[interrupt]
fn | () {
// current sensing
unsafe {
let max_sample:u32 = (1 << 12) - 1;
let device = pac::Peripherals::steal();
device.ADC1.sr.modify(|_, w| w.jeoc().clear_bit());
let jdr1_data = device.ADC1.jdr1.read().jdata().bits();
let jdr1_offset = 48u32;
let so1 = ( ( (u32::from(jdr1_data) + jdr1_offset) * VDDA_CALIB ) / max_sample) as u16;
let jdr2_data = device.ADC1.jdr2.read().jdata().bits();
let jdr2_offset = 118u32;
let so2 = ( ( (u32::from(jdr2_data) + jdr2_offset) * VDDA_CALIB ) / max_sample) as u16;
CURRENT_B = (so1 as f32 - 1650.0) / 200.0;
CURRENT_C = (so2 as f32 - 1650.0) / 200.0;
CURRENT_A = - CURRENT_B - CURRENT_C;
}
/*
// LED Debug
cortex_m::interrupt::free(|cs| {
if let Some(ref mut led) = G_LED.borrow(cs).borrow_mut().as_mut() {
led.toggle().unwrap();
}
});
*/
}
#[interrupt]
fn TIM2() {
cortex_m::interrupt::free(|cs| {
if let Some(ref mut tim) = G_TIM.borrow(cs).borrow_mut().as_mut() {
let _ = tim.wait();
}
});
static mut SPI3: Option<TypeSpi3> = None;
static mut MOTOR: Option<TypeMotor> = None;
unsafe{
let mut spi3 = SPI3.get_or_insert_with(|| {
cortex_m::interrupt::free(|cs| {
G_SPI3.borrow(cs).replace(None).unwrap()
})
});
let device = pac::Peripherals::steal();
let gpioa = device.GPIOA.split();
let ncs = gpioa.pa3.into_push_pull_output();
let mut as5048: TypeEncoder = AS5048A::new(&mut spi3, ncs);
// AS5048A
let measured_angle = as5048.angle().unwrap();
let angle_offset = 650u16;
MOT_ANGLE = (measured_angle - angle_offset) % ENC_RESOLUTION;
let electric_angle = MOT_ANGLE % (ENC_RESOLUTION/MOT_POLE_PAIRS);
let motor = MOTOR.get_or_insert_with(|| {
cortex_m::interrupt::free(|cs| {
G_MOTOR.borrow(cs).replace(None).unwrap()
})
});
// Velocity control
const REF_VELOCITY: f32 = - 100.0;
const VELLOCITY_PGAIN: f32 = 0.1;
const VELOCITY_IGAIN: f32 = 0.00001;
let res_velocity =
if (ENC_RESOLUTION-1000) < MOT_ANGLE_OLD && MOT_ANGLE < 1000 {
( MOT_ANGLE as f32 - MOT_ANGLE_OLD as f32 + ENC_RESOLUTION as f32 ) * TIM2_FREQ_KHZ as f32
} else if MOT_ANGLE_OLD < 1000 && MOT_ANGLE > (ENC_RESOLUTION-1000) {
( MOT_ANGLE as f32 - MOT_ANGLE_OLD as f32 - ENC_RESOLUTION as f32 ) * TIM2_FREQ_KHZ as f32
} else {
( MOT_ANGLE as f32 - MOT_ANGLE_OLD as f32 ) * TIM2_FREQ_KHZ as f32
};
let alpha = 0.1;
MOT_VELOCITY = alpha * res_velocity + (1.0 - alpha) * MOT_VELOCITY_OLD;
ERR_VELOCITY = MOT_VELOCITY - REF_VELOCITY;
ERR_VELOCITY_INT += ERR_VELOCITY;
REF_CURR_D = 0.0;
REF_CURR_Q = VELLOCITY_PGAIN * ERR_VELOCITY + VELOCITY_IGAIN * ERR_VELOCITY_INT;
REF_CURR_Q = -1.0 * REF_CURR_Q;
MOT_ANGLE_OLD = MOT_ANGLE;
MOT_VELOCITY_OLD = MOT_VELOCITY;
// select control mode
//motor.drive_profile().unwrap();
//motor.drive_sixstep().unwrap();
//motor.drive_anglebased_sixstep(electric_angle).unwrap();
motor.drive_foc(electric_angle, CURRENT_A, CURRENT_B, CURRENT_C, REF_CURR_D, REF_CURR_Q).unwrap();
}
/*
// LED Debug
cortex_m::interrupt::free(|cs| {
if let Some(ref mut led) = G_LED.borrow(cs).borrow_mut().as_mut() {
led.toggle().unwrap();
}
});
*/
}
#[entry]
fn main() ->! {
let dp = pac::Peripherals::take().unwrap();
let cp = cortex_m::peripheral::Peripherals::take().unwrap();
let rcc = dp.RCC.constrain();
let clocks = rcc
.cfgr
.use_hse(8.mhz())
.sysclk(168.mhz())
.hclk(168.mhz())
.pclk1(42.mhz())
.pclk2(84.mhz())
.require_pll48clk()
.freeze();
let mut delay = hal::delay::Delay::new(cp.SYST, clocks);
let gpioa = dp.GPIOA.split();
let gpiob = dp.GPIOB.split();
let gpioc = dp.GPIOC.split();
// SPI3
let sck = gpioc.pc10.into_alternate_af6();
let miso = gpioc.pc11.into_alternate_af6();
let mosi = gpioc.pc12.into_alternate_af6();
let mut spi = Spi::spi3(
dp.SPI3,
(sck, miso, mosi),
MODE_1,
KiloHertz(2000).into(),
clocks,
);
// DRV8301
let ncs = gpioc.pc13.into_push_pull_output();
let en_gate = gpiob.pb12.into_push_pull_output();
let mut drv8301 = Drv8301::new(&mut spi, ncs, en_gate);
drv8301.init().unwrap();
// Move the pin into our global storage
cortex_m::interrupt::free(|cs| *G_SPI3.borrow(cs).borrow_mut() = Some(spi));
// PWM
let channels = (gpioa.pa8.into_alternate_af1(), gpioa.pa9.into_alternate_af1(), gpioa.pa10.into_alternate_af1(), gpioa.pa11.into_alternate_af1());
let pwm = pwm::tim1(dp.TIM1, channels, clocks, 16u32.khz());
let (ch1, ch2, ch3, ch4) = pwm;
let mut ch4 = ch4;
{
// Set complementary oututs mode as AF1
gpiob.pb13.into_alternate_af1();
gpiob.pb14.into_alternate_af1();
gpiob.pb15.into_alternate_af1();
unsafe {
let tim1_regb = &(*(TIM1::ptr()));
// Enable complementary outputs
tim1_regb.ccer.modify(|_, w| w.cc1ne().set_bit());
tim1_regb.ccer.modify(|_, w| w.cc2ne().set_bit());
tim1_regb.ccer.modify(|_, w| w.cc3ne().set_bit());
// Set dead time
tim1_regb.bdtr.modify(|_, w| w.dtg().bits(10));
// Center aligned
tim1_regb.cr1.modify(|_, w| w.cms().center_aligned1());
// OC4REF signal is used as trigger output
tim1_regb.cr2.modify(|_, w| w.mms().compare_oc4());
}
ch4.enable();
ch4.set_duty( (ch4.get_max_duty() as f32 * 0.99)as u16 );
}
delay.delay_ms(1u32);
// Motor
let mut motor = Motor::new(ch1, ch2, ch3, MOT_POLE_PAIRS, ENC_RESOLUTION);
motor.set_duty(0,0,0).unwrap();
motor.enable().unwrap();
delay.delay_ms(1u32);
/*
// for current sensing test
unsafe{
motor.set_hiz_c();
motor.set_duty((motor.max_duty as f32 * 0.6) as u16, (motor.max_duty as f32 * 0.4) as u16, 0u16).unwrap();
}
*/
cortex_m::interrupt::free(|cs| *G_MOTOR.borrow(cs).borrow_mut() = Some(motor));
// ADC1
gpioc.pc0.into_analog();
gpioc.pc1.into_analog();
unsafe {
// All ADCs share the same reset interface.
// NOTE(unsafe) this reference will only be used for atomic writes with no side effects.
let rcc = &(*pac::RCC::ptr());
// Enable the clock
pac::ADC1::enable(rcc);
pac::ADC1::reset(rcc);
let adcc_regb = &(*(ADC_COMMON::ptr()));
let adc1_regb = &(*(ADC1::ptr()));
// Probably unnecessary to disable the ADC in most cases but it shouldn't do any harm either
adc1_regb.cr2.modify(|_, w| w.adon().clear_bit());
// Config common
adcc_regb.ccr.modify(|_, w| w.adcpre().bits(Clock::Pclk2_div_2.into()));
// Config regular conversion
adc1_regb.cr1.modify(|_, w| w.res().bits(Resolution::Twelve.into()));
adc1_regb.cr1.modify(|_, w| w.scan().bit(Scan::Enabled.into()));
adc1_regb.cr2.modify(|_, w| w.align().bit(Align::Right.into()));
adc1_regb.cr2.modify(|_, w| w.cont().bit(Continuous::Single.into()));
// config injected conversion
adc1_regb.cr1.modify(|_, w | w.jeocie().enabled());
adc1_regb.cr2.modify(|_, w| w.jexten().rising_edge());
adc1_regb.cr2.modify(|_, w| w.jextsel().tim1cc4());
adc1_regb.jsqr.modify(|_, w| w.jl().bits(0b01));
adc1_regb.jsqr.modify(|_, w| w.jsq3().bits(10u8));
adc1_regb.jsqr.modify(|_, w| w.jsq4().bits(11u8));
adc1_regb.smpr1.modify(|_, w| w.smp10().cycles3());
adc1_regb.smpr1.modify(|_, w| w.smp11().cycles3());
// enable ADC
adc1_regb.cr2.modify(|_, w| w.adon().set_bit());
delay.delay_ms(1u32);
// enable interrupt
cortex_m::peripheral::NVIC::unmask(Interrupt::ADC);
}
/*
// Debug LED
let mut led = gpioa.pa2.into_push_pull_output();
let _ = led.set_high();
cortex_m::interrupt::free(|cs| *G_LED.borrow(cs).borrow_mut() = Some(led));
*/
// TIM2 Interrupt
let mut timer = Timer::tim2(dp.TIM2, TIM2_FREQ_KHZ.khz(), clocks);
timer.listen(Event::TimeOut);
cortex_m::interrupt::free(|cs| *G_TIM.borrow(cs).borrow_mut() = Some(timer));
//enable TIM2 interrupt
unsafe {
cortex_m::peripheral::NVIC::unmask(Interrupt::TIM2);
}
loop {
wfi();
/*
unsafe{
//hprintln!("CURRENT_A: {}A, CURRENT_B: {}A, CURRENT_C: {}A", CURRENT_A, CURRENT_B, CURRENT_C);
}
delay.delay_ms(1u32);
*/
}
}
| ADC | identifier_name |
main.rs | #![no_std]
#![no_main]
use core::cell::RefCell;
use cortex_m::{asm::wfi, interrupt::Mutex};
use cortex_m_rt::entry;
use embedded_hal::spi::MODE_1;
use stm32f4xx_hal as hal;
use hal::{
adc::config::{Align, Clock, Continuous, Resolution, Scan},
gpio::{gpioa, Output, PushPull, gpioc::{PC10, PC11, PC12}, Alternate},
pac,
pac::{ADC1, ADC_COMMON, interrupt, Interrupt, TIM1, TIM2},
prelude::*,
pwm,
signature::VDDA_CALIB,
time::KiloHertz,
timer::{Event, Timer},
};
use odrive_rs::spi::Spi;
extern crate drv8301;
use drv8301::drv8301::Drv8301;
use odrive_rs::as5048a::AS5048A;
use odrive_rs::motor::Motor;
use odrive_rs::rcc::{Enable, Reset};
use cortex_m_semihosting::{hprint, hprintln};
use panic_halt as _;
//type TypeLed = gpioa::PA2<Output<PushPull>>;
//static G_LED: Mutex<RefCell<Option<TypeLed>>> = Mutex::new(RefCell::new(None));
type TypeSpi3 = Spi<pac::SPI3, (PC10<Alternate<stm32f4xx_hal::gpio::AF6>>, PC11<Alternate<stm32f4xx_hal::gpio::AF6>>, PC12<Alternate<stm32f4xx_hal::gpio::AF6>>)>;
static G_SPI3: Mutex<RefCell<Option<TypeSpi3>>> = Mutex::new(RefCell::new(None));
| static G_MOTOR: Mutex<RefCell<Option<TypeMotor>>> = Mutex::new(RefCell::new(None));
static G_TIM: Mutex<RefCell<Option<Timer<TIM2>>>> = Mutex::new(RefCell::new(None));
static mut CURRENT_A: f32 = 0.0;
static mut CURRENT_B: f32 = 0.0;
static mut CURRENT_C: f32 = 0.0;
static mut MOT_ANGLE: u16 = 0;
static mut MOT_ANGLE_OLD: u16 = 0;
static mut MOT_VELOCITY: f32 = 0.0;
static mut MOT_VELOCITY_OLD: f32 = 0.0;
static mut ERR_VELOCITY: f32 = 0.0;
static mut ERR_VELOCITY_INT: f32 = 0.0;
static mut REF_CURR_D: f32 = 0.0;
static mut REF_CURR_Q: f32 = 0.0;
// System
const TIM2_FREQ_KHZ: u32 = 10;
// Motor
const MOT_POLE_PAIRS: u16 = 12;
// Encoder
const ENC_RESOLUTION: u16 = 16384;
#[interrupt]
fn ADC() {
// current sensing
unsafe {
let max_sample:u32 = (1 << 12) - 1;
let device = pac::Peripherals::steal();
device.ADC1.sr.modify(|_, w| w.jeoc().clear_bit());
let jdr1_data = device.ADC1.jdr1.read().jdata().bits();
let jdr1_offset = 48u32;
let so1 = ( ( (u32::from(jdr1_data) + jdr1_offset) * VDDA_CALIB ) / max_sample) as u16;
let jdr2_data = device.ADC1.jdr2.read().jdata().bits();
let jdr2_offset = 118u32;
let so2 = ( ( (u32::from(jdr2_data) + jdr2_offset) * VDDA_CALIB ) / max_sample) as u16;
CURRENT_B = (so1 as f32 - 1650.0) / 200.0;
CURRENT_C = (so2 as f32 - 1650.0) / 200.0;
CURRENT_A = - CURRENT_B - CURRENT_C;
}
/*
// LED Debug
cortex_m::interrupt::free(|cs| {
if let Some(ref mut led) = G_LED.borrow(cs).borrow_mut().as_mut() {
led.toggle().unwrap();
}
});
*/
}
#[interrupt]
fn TIM2() {
cortex_m::interrupt::free(|cs| {
if let Some(ref mut tim) = G_TIM.borrow(cs).borrow_mut().as_mut() {
let _ = tim.wait();
}
});
static mut SPI3: Option<TypeSpi3> = None;
static mut MOTOR: Option<TypeMotor> = None;
unsafe{
let mut spi3 = SPI3.get_or_insert_with(|| {
cortex_m::interrupt::free(|cs| {
G_SPI3.borrow(cs).replace(None).unwrap()
})
});
let device = pac::Peripherals::steal();
let gpioa = device.GPIOA.split();
let ncs = gpioa.pa3.into_push_pull_output();
let mut as5048: TypeEncoder = AS5048A::new(&mut spi3, ncs);
// AS5048A
let measured_angle = as5048.angle().unwrap();
let angle_offset = 650u16;
MOT_ANGLE = (measured_angle - angle_offset) % ENC_RESOLUTION;
let electric_angle = MOT_ANGLE % (ENC_RESOLUTION/MOT_POLE_PAIRS);
let motor = MOTOR.get_or_insert_with(|| {
cortex_m::interrupt::free(|cs| {
G_MOTOR.borrow(cs).replace(None).unwrap()
})
});
// Velocity control
const REF_VELOCITY: f32 = - 100.0;
const VELLOCITY_PGAIN: f32 = 0.1;
const VELOCITY_IGAIN: f32 = 0.00001;
let res_velocity =
if (ENC_RESOLUTION-1000) < MOT_ANGLE_OLD && MOT_ANGLE < 1000 {
( MOT_ANGLE as f32 - MOT_ANGLE_OLD as f32 + ENC_RESOLUTION as f32 ) * TIM2_FREQ_KHZ as f32
} else if MOT_ANGLE_OLD < 1000 && MOT_ANGLE > (ENC_RESOLUTION-1000) {
( MOT_ANGLE as f32 - MOT_ANGLE_OLD as f32 - ENC_RESOLUTION as f32 ) * TIM2_FREQ_KHZ as f32
} else {
( MOT_ANGLE as f32 - MOT_ANGLE_OLD as f32 ) * TIM2_FREQ_KHZ as f32
};
let alpha = 0.1;
MOT_VELOCITY = alpha * res_velocity + (1.0 - alpha) * MOT_VELOCITY_OLD;
ERR_VELOCITY = MOT_VELOCITY - REF_VELOCITY;
ERR_VELOCITY_INT += ERR_VELOCITY;
REF_CURR_D = 0.0;
REF_CURR_Q = VELLOCITY_PGAIN * ERR_VELOCITY + VELOCITY_IGAIN * ERR_VELOCITY_INT;
REF_CURR_Q = -1.0 * REF_CURR_Q;
MOT_ANGLE_OLD = MOT_ANGLE;
MOT_VELOCITY_OLD = MOT_VELOCITY;
// select control mode
//motor.drive_profile().unwrap();
//motor.drive_sixstep().unwrap();
//motor.drive_anglebased_sixstep(electric_angle).unwrap();
motor.drive_foc(electric_angle, CURRENT_A, CURRENT_B, CURRENT_C, REF_CURR_D, REF_CURR_Q).unwrap();
}
/*
// LED Debug
cortex_m::interrupt::free(|cs| {
if let Some(ref mut led) = G_LED.borrow(cs).borrow_mut().as_mut() {
led.toggle().unwrap();
}
});
*/
}
#[entry]
fn main() ->! {
let dp = pac::Peripherals::take().unwrap();
let cp = cortex_m::peripheral::Peripherals::take().unwrap();
let rcc = dp.RCC.constrain();
let clocks = rcc
.cfgr
.use_hse(8.mhz())
.sysclk(168.mhz())
.hclk(168.mhz())
.pclk1(42.mhz())
.pclk2(84.mhz())
.require_pll48clk()
.freeze();
let mut delay = hal::delay::Delay::new(cp.SYST, clocks);
let gpioa = dp.GPIOA.split();
let gpiob = dp.GPIOB.split();
let gpioc = dp.GPIOC.split();
// SPI3
let sck = gpioc.pc10.into_alternate_af6();
let miso = gpioc.pc11.into_alternate_af6();
let mosi = gpioc.pc12.into_alternate_af6();
let mut spi = Spi::spi3(
dp.SPI3,
(sck, miso, mosi),
MODE_1,
KiloHertz(2000).into(),
clocks,
);
// DRV8301
let ncs = gpioc.pc13.into_push_pull_output();
let en_gate = gpiob.pb12.into_push_pull_output();
let mut drv8301 = Drv8301::new(&mut spi, ncs, en_gate);
drv8301.init().unwrap();
// Move the pin into our global storage
cortex_m::interrupt::free(|cs| *G_SPI3.borrow(cs).borrow_mut() = Some(spi));
// PWM
let channels = (gpioa.pa8.into_alternate_af1(), gpioa.pa9.into_alternate_af1(), gpioa.pa10.into_alternate_af1(), gpioa.pa11.into_alternate_af1());
let pwm = pwm::tim1(dp.TIM1, channels, clocks, 16u32.khz());
let (ch1, ch2, ch3, ch4) = pwm;
let mut ch4 = ch4;
{
// Set complementary oututs mode as AF1
gpiob.pb13.into_alternate_af1();
gpiob.pb14.into_alternate_af1();
gpiob.pb15.into_alternate_af1();
unsafe {
let tim1_regb = &(*(TIM1::ptr()));
// Enable complementary outputs
tim1_regb.ccer.modify(|_, w| w.cc1ne().set_bit());
tim1_regb.ccer.modify(|_, w| w.cc2ne().set_bit());
tim1_regb.ccer.modify(|_, w| w.cc3ne().set_bit());
// Set dead time
tim1_regb.bdtr.modify(|_, w| w.dtg().bits(10));
// Center aligned
tim1_regb.cr1.modify(|_, w| w.cms().center_aligned1());
// OC4REF signal is used as trigger output
tim1_regb.cr2.modify(|_, w| w.mms().compare_oc4());
}
ch4.enable();
ch4.set_duty( (ch4.get_max_duty() as f32 * 0.99)as u16 );
}
delay.delay_ms(1u32);
// Motor
let mut motor = Motor::new(ch1, ch2, ch3, MOT_POLE_PAIRS, ENC_RESOLUTION);
motor.set_duty(0,0,0).unwrap();
motor.enable().unwrap();
delay.delay_ms(1u32);
/*
// for current sensing test
unsafe{
motor.set_hiz_c();
motor.set_duty((motor.max_duty as f32 * 0.6) as u16, (motor.max_duty as f32 * 0.4) as u16, 0u16).unwrap();
}
*/
cortex_m::interrupt::free(|cs| *G_MOTOR.borrow(cs).borrow_mut() = Some(motor));
// ADC1
gpioc.pc0.into_analog();
gpioc.pc1.into_analog();
unsafe {
// All ADCs share the same reset interface.
// NOTE(unsafe) this reference will only be used for atomic writes with no side effects.
let rcc = &(*pac::RCC::ptr());
// Enable the clock
pac::ADC1::enable(rcc);
pac::ADC1::reset(rcc);
let adcc_regb = &(*(ADC_COMMON::ptr()));
let adc1_regb = &(*(ADC1::ptr()));
// Probably unnecessary to disable the ADC in most cases but it shouldn't do any harm either
adc1_regb.cr2.modify(|_, w| w.adon().clear_bit());
// Config common
adcc_regb.ccr.modify(|_, w| w.adcpre().bits(Clock::Pclk2_div_2.into()));
// Config regular conversion
adc1_regb.cr1.modify(|_, w| w.res().bits(Resolution::Twelve.into()));
adc1_regb.cr1.modify(|_, w| w.scan().bit(Scan::Enabled.into()));
adc1_regb.cr2.modify(|_, w| w.align().bit(Align::Right.into()));
adc1_regb.cr2.modify(|_, w| w.cont().bit(Continuous::Single.into()));
// config injected conversion
adc1_regb.cr1.modify(|_, w | w.jeocie().enabled());
adc1_regb.cr2.modify(|_, w| w.jexten().rising_edge());
adc1_regb.cr2.modify(|_, w| w.jextsel().tim1cc4());
adc1_regb.jsqr.modify(|_, w| w.jl().bits(0b01));
adc1_regb.jsqr.modify(|_, w| w.jsq3().bits(10u8));
adc1_regb.jsqr.modify(|_, w| w.jsq4().bits(11u8));
adc1_regb.smpr1.modify(|_, w| w.smp10().cycles3());
adc1_regb.smpr1.modify(|_, w| w.smp11().cycles3());
// enable ADC
adc1_regb.cr2.modify(|_, w| w.adon().set_bit());
delay.delay_ms(1u32);
// enable interrupt
cortex_m::peripheral::NVIC::unmask(Interrupt::ADC);
}
/*
// Debug LED
let mut led = gpioa.pa2.into_push_pull_output();
let _ = led.set_high();
cortex_m::interrupt::free(|cs| *G_LED.borrow(cs).borrow_mut() = Some(led));
*/
// TIM2 Interrupt
let mut timer = Timer::tim2(dp.TIM2, TIM2_FREQ_KHZ.khz(), clocks);
timer.listen(Event::TimeOut);
cortex_m::interrupt::free(|cs| *G_TIM.borrow(cs).borrow_mut() = Some(timer));
//enable TIM2 interrupt
unsafe {
cortex_m::peripheral::NVIC::unmask(Interrupt::TIM2);
}
loop {
wfi();
/*
unsafe{
//hprintln!("CURRENT_A: {}A, CURRENT_B: {}A, CURRENT_C: {}A", CURRENT_A, CURRENT_B, CURRENT_C);
}
delay.delay_ms(1u32);
*/
}
} | type TypeEncoder<'a> = AS5048A<'a, TypeSpi3, gpioa::PA3<Output<PushPull>>>;
//static G_AS5048A: Mutex<RefCell<Option<TypeEncoder>>> = Mutex::new(RefCell::new(None));
type TypeMotor = Motor; | random_line_split |
wgl.rs | use crate::{conv, device::Device, native, Backend, GlContainer, PhysicalDevice, QueueFamily};
use std::{
ffi::{CString, OsStr},
iter,
mem,
os::{raw::c_void, windows::ffi::OsStrExt},
ptr,
};
use glow::Context as _;
use hal::{adapter::Adapter, format as f, image, window};
use arrayvec::ArrayVec;
use lazy_static::lazy_static;
use winapi::shared::minwindef::*;
use winapi::shared::windef::*;
use winapi::um::libloaderapi::*;
use winapi::um::wingdi::*;
use winapi::um::winuser::*;
pub mod wgl_sys {
include!(concat!(env!("OUT_DIR"), "/wgl_sys.rs"));
}
pub mod wgl_ext_sys {
include!(concat!(env!("OUT_DIR"), "/wgl_ext_sys.rs"));
}
#[link(name = "opengl32")]
extern "C" {}
#[cfg(feature = "winit")]
use winit;
pub(crate) struct Entry {
hwnd: HWND,
pub(crate) hdc: HDC,
pub(crate) wgl: wgl_ext_sys::Wgl,
lib: HMODULE,
}
unsafe impl Send for Entry {}
unsafe impl Sync for Entry {}
impl Entry {
pub fn new() -> Self {
unsafe {
let mut class: WNDCLASSEXW = mem::zeroed();
let instance = GetModuleHandleW(ptr::null());
let class_name = OsStr::new("gfx-rs wgl")
.encode_wide()
.chain(Some(0).into_iter())
.collect::<Vec<_>>();
class.cbSize = mem::size_of::<WNDCLASSEXW>() as UINT;
class.lpszClassName = class_name.as_ptr();
class.lpfnWndProc = Some(DefWindowProcW);
RegisterClassExW(&class);
let hwnd = CreateWindowExW(
0,
class_name.as_ptr(),
std::ptr::null(),
0,
CW_USEDEFAULT,
CW_USEDEFAULT,
CW_USEDEFAULT,
CW_USEDEFAULT,
std::ptr::null_mut(),
std::ptr::null_mut(),
instance,
std::ptr::null_mut(),
);
let hdc = GetDC(hwnd);
let desc = PIXELFORMATDESCRIPTOR {
nSize: std::mem::size_of::<PIXELFORMATDESCRIPTOR>() as u16,
nVersion: 1,
dwFlags: PFD_SUPPORT_OPENGL,
iPixelType: PFD_TYPE_RGBA,
cColorBits: 8,
cRedBits: 0,
cRedShift: 0,
cGreenBits: 0,
cGreenShift: 0,
cBlueBits: 0,
cBlueShift: 0,
cAlphaBits: 8,
cAlphaShift: 0,
cAccumBits: 0,
cAccumRedBits: 0,
cAccumGreenBits: 0,
cAccumBlueBits: 0,
cAccumAlphaBits: 0,
cDepthBits: 0,
cStencilBits: 0,
cAuxBuffers: 0,
iLayerType: PFD_MAIN_PLANE,
bReserved: 0,
dwLayerMask: 0,
dwVisibleMask: 0,
dwDamageMask: 0,
};
let format_id = ChoosePixelFormat(hdc, &desc);
SetPixelFormat(hdc, format_id, &desc);
let hglrc = wglCreateContext(hdc);
println!("{:?}", (hwnd, hdc, format_id, hglrc));
wglMakeCurrent(hdc, hglrc);
let name = OsStr::new("opengl32.dll")
.encode_wide()
.chain(Some(0).into_iter())
.collect::<Vec<_>>();
let lib = LoadLibraryW(name.as_ptr());
let wgl = wgl_ext_sys::Wgl::load_with(|sym| {
let sym = CString::new(sym.as_bytes()).unwrap();
let addr = wgl_sys::GetProcAddress(sym.as_ptr()) as *const ();
if!addr.is_null() {
addr as *const _
} else {
GetProcAddress(lib, sym.as_ptr()) as *const _
}
});
Entry {
hwnd,
hdc: hdc as _,
wgl,
lib,
}
}
}
}
impl Drop for Entry {
fn drop(&mut self) {
unsafe {
DestroyWindow(self.hwnd);
}
}
}
lazy_static! {
// Entry function pointers
pub(crate) static ref WGL_ENTRY: Entry = Entry::new();
}
pub struct Instance {
pub(crate) ctxt: DeviceContext,
}
impl Instance {
pub fn create(_name: &str, version: u32) -> Result<Self, hal::UnsupportedBackend> {
unsafe {
let glrc = WGL_ENTRY.wgl.CreateContextAttribsARB(
WGL_ENTRY.hdc as *const _,
ptr::null(),
ptr::null(),
) as HGLRC;
wglMakeCurrent(WGL_ENTRY.hdc as *mut _, glrc);
Ok(Instance {
ctxt: DeviceContext {
ctxt: Context { glrc },
hdc: WGL_ENTRY.hdc,
},
})
}
}
#[cfg(windows)]
pub fn create_surface_from_hwnd(&self, hwnd: *mut c_void) -> Surface {
Surface {
hwnd: hwnd as *mut _,
swapchain: None,
renderbuffer: None,
}
}
#[cfg(feature = "winit")]
pub fn create_surface(&self, window: &winit::window::Window) -> Surface {
use winit::platform::windows::WindowExtWindows;
let hwnd = window.hwnd();
self.create_surface_from_hwnd(hwnd as *mut _)
}
}
impl hal::Instance for Instance {
type Backend = Backend;
fn enumerate_adapters(&self) -> Vec<Adapter<Backend>> {
let gl_container = GlContainer::from_fn_proc(|s| unsafe {
let sym = CString::new(s.as_bytes()).unwrap();
let addr = wgl_sys::GetProcAddress(sym.as_ptr()) as *const ();
if!addr.is_null() {
addr as *const _
} else {
GetProcAddress(WGL_ENTRY.lib, sym.as_ptr()) as *const _
}
});
let adapter = PhysicalDevice::new_adapter(self.ctxt, gl_container);
vec![adapter]
}
}
#[derive(Debug)]
pub struct Surface {
pub(crate) hwnd: HWND,
pub(crate) swapchain: Option<Swapchain>,
renderbuffer: Option<native::Renderbuffer>,
}
// TODO: high -msiglreith
unsafe impl Send for Surface {}
unsafe impl Sync for Surface {}
impl window::Surface<Backend> for Surface {
fn compatibility(
&self,
physical_device: &PhysicalDevice,
) -> (
window::SurfaceCapabilities,
Option<Vec<f::Format>>,
Vec<window::PresentMode>,
) {
let extent = unsafe {
let mut rect: RECT = mem::zeroed();
GetClientRect(self.hwnd, &mut rect);
window::Extent2D {
width: (rect.right - rect.left) as _,
height: (rect.bottom - rect.top) as _,
}
};
let caps = window::SurfaceCapabilities {
image_count: 2..= 2,
current_extent: Some(extent),
extents: extent..= extent,
max_image_layers: 1,
usage: image::Usage::COLOR_ATTACHMENT | image::Usage::TRANSFER_SRC,
composite_alpha: window::CompositeAlpha::OPAQUE, //TODO
};
let present_modes = vec![
window::PresentMode::Fifo, //TODO
];
(
caps,
Some(vec![f::Format::Rgba8Srgb, f::Format::Bgra8Srgb]),
present_modes,
)
}
fn supports_queue_family(&self, _queue_family: &QueueFamily) -> bool {
true
}
}
impl window::PresentationSurface<Backend> for Surface {
type SwapchainImage = native::ImageView;
unsafe fn configure_swapchain(
&mut self,
device: &Device,
config: window::SwapchainConfig,
) -> Result<(), window::CreationError> {
let gl = &device.share.context;
let context = match self.swapchain.take() {
Some(old) => {
for fbo in old.fbos {
gl.delete_framebuffer(fbo);
}
old.context
}
None => PresentContext::new(self, &device.share.instance_context),
};
context.make_current();
if self.renderbuffer.is_none() {
self.renderbuffer = Some(gl.create_renderbuffer().unwrap());
}
let desc = conv::describe_format(config.format).unwrap();
gl.bind_renderbuffer(glow::RENDERBUFFER, self.renderbuffer);
gl.renderbuffer_storage(
glow::RENDERBUFFER,
desc.tex_internal,
config.extent.width as i32,
config.extent.height as i32,
);
let fbo = gl.create_framebuffer().unwrap();
gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(fbo));
gl.framebuffer_renderbuffer(
glow::READ_FRAMEBUFFER,
glow::COLOR_ATTACHMENT0,
glow::RENDERBUFFER,
self.renderbuffer, | extent: config.extent,
fbos: iter::once(fbo).collect(),
});
Ok(())
}
unsafe fn unconfigure_swapchain(&mut self, device: &Device) {
let gl = &device.share.context;
if let Some(old) = self.swapchain.take() {
for fbo in old.fbos {
gl.delete_framebuffer(fbo);
}
}
if let Some(rbo) = self.renderbuffer.take() {
gl.delete_renderbuffer(rbo);
}
}
unsafe fn acquire_image(
&mut self,
_timeout_ns: u64,
) -> Result<(Self::SwapchainImage, Option<window::Suboptimal>), window::AcquireError> {
let image = native::ImageView::Renderbuffer(self.renderbuffer.unwrap());
Ok((image, None))
}
}
#[derive(Debug)]
pub struct Swapchain {
pub(crate) fbos: ArrayVec<[native::RawFrameBuffer; 3]>,
pub(crate) context: PresentContext,
pub(crate) extent: window::Extent2D,
}
impl Swapchain {
pub(crate) fn make_current(&self) {
self.context.make_current();
}
pub(crate) fn swap_buffers(&self) {
self.context.swap_buffers();
}
}
impl window::Swapchain<Backend> for Swapchain {
unsafe fn acquire_image(
&mut self,
_timeout_ns: u64,
_semaphore: Option<&native::Semaphore>,
_fence: Option<&native::Fence>,
) -> Result<(window::SwapImageIndex, Option<window::Suboptimal>), window::AcquireError> {
Ok((0, None)) // TODO
}
}
/// Basic abstraction for wgl context handles.
#[derive(Debug, Copy, Clone)]
struct Context {
glrc: HGLRC,
}
impl Context {
unsafe fn make_current(&self, hdc: HDC) {
wglMakeCurrent(hdc, self.glrc);
}
}
/// Owned context for devices and instances.
#[derive(Debug, Copy, Clone)]
pub(crate) struct DeviceContext {
/// Owned wgl context.
ctxt: Context,
/// Device context owned by the corresponding instance.
///
/// This refers to either a pbuffer or dummy window. Therefore not used for actual presentation.
hdc: HDC,
}
// TODO
unsafe impl Send for DeviceContext {}
unsafe impl Sync for DeviceContext {}
impl DeviceContext {
pub(crate) fn make_current(&self) {
unsafe {
self.ctxt.make_current(self.hdc);
}
}
}
/// Owned context for swapchains which soley is required for presentation.
#[derive(Debug)]
pub(crate) struct PresentContext {
/// Owned wgl context.
ctxt: Context,
/// Device context of the corresponding presentation surface.
hdc: HDC,
}
// TODO
unsafe impl Send for PresentContext {}
unsafe impl Sync for PresentContext {}
impl PresentContext {
pub(crate) fn new(surface: &Surface, device_ctxt: &DeviceContext) -> Self {
// TODO: configuration options
unsafe {
let hdc = GetDC(surface.hwnd);
let desc = PIXELFORMATDESCRIPTOR {
nSize: std::mem::size_of::<PIXELFORMATDESCRIPTOR>() as u16,
nVersion: 1,
dwFlags: PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER,
iPixelType: PFD_TYPE_RGBA,
cColorBits: 32,
cRedBits: 0,
cRedShift: 0,
cGreenBits: 0,
cGreenShift: 0,
cBlueBits: 0,
cBlueShift: 0,
cAlphaBits: 8,
cAlphaShift: 0,
cAccumBits: 0,
cAccumRedBits: 0,
cAccumGreenBits: 0,
cAccumBlueBits: 0,
cAccumAlphaBits: 0,
cDepthBits: 0,
cStencilBits: 0,
cAuxBuffers: 0,
iLayerType: PFD_MAIN_PLANE,
bReserved: 0,
dwLayerMask: 0,
dwVisibleMask: 0,
dwDamageMask: 0,
};
let format_id = ChoosePixelFormat(hdc, &desc);
SetPixelFormat(hdc, format_id, &desc);
let glrc = WGL_ENTRY.wgl.CreateContextAttribsARB(
hdc as *const _,
device_ctxt.ctxt.glrc as _,
ptr::null(),
) as HGLRC;
wglMakeCurrent(hdc, glrc);
PresentContext {
ctxt: Context { glrc },
hdc,
}
}
}
pub(crate) fn make_current(&self) {
unsafe {
self.ctxt.make_current(self.hdc);
}
}
fn swap_buffers(&self) {
unsafe {
SwapBuffers(self.hdc);
}
}
} | );
self.swapchain = Some(Swapchain {
context, | random_line_split |
wgl.rs | use crate::{conv, device::Device, native, Backend, GlContainer, PhysicalDevice, QueueFamily};
use std::{
ffi::{CString, OsStr},
iter,
mem,
os::{raw::c_void, windows::ffi::OsStrExt},
ptr,
};
use glow::Context as _;
use hal::{adapter::Adapter, format as f, image, window};
use arrayvec::ArrayVec;
use lazy_static::lazy_static;
use winapi::shared::minwindef::*;
use winapi::shared::windef::*;
use winapi::um::libloaderapi::*;
use winapi::um::wingdi::*;
use winapi::um::winuser::*;
pub mod wgl_sys {
include!(concat!(env!("OUT_DIR"), "/wgl_sys.rs"));
}
pub mod wgl_ext_sys {
include!(concat!(env!("OUT_DIR"), "/wgl_ext_sys.rs"));
}
#[link(name = "opengl32")]
extern "C" {}
#[cfg(feature = "winit")]
use winit;
pub(crate) struct Entry {
hwnd: HWND,
pub(crate) hdc: HDC,
pub(crate) wgl: wgl_ext_sys::Wgl,
lib: HMODULE,
}
unsafe impl Send for Entry {}
unsafe impl Sync for Entry {}
impl Entry {
pub fn new() -> Self {
unsafe {
let mut class: WNDCLASSEXW = mem::zeroed();
let instance = GetModuleHandleW(ptr::null());
let class_name = OsStr::new("gfx-rs wgl")
.encode_wide()
.chain(Some(0).into_iter())
.collect::<Vec<_>>();
class.cbSize = mem::size_of::<WNDCLASSEXW>() as UINT;
class.lpszClassName = class_name.as_ptr();
class.lpfnWndProc = Some(DefWindowProcW);
RegisterClassExW(&class);
let hwnd = CreateWindowExW(
0,
class_name.as_ptr(),
std::ptr::null(),
0,
CW_USEDEFAULT,
CW_USEDEFAULT,
CW_USEDEFAULT,
CW_USEDEFAULT,
std::ptr::null_mut(),
std::ptr::null_mut(),
instance,
std::ptr::null_mut(),
);
let hdc = GetDC(hwnd);
let desc = PIXELFORMATDESCRIPTOR {
nSize: std::mem::size_of::<PIXELFORMATDESCRIPTOR>() as u16,
nVersion: 1,
dwFlags: PFD_SUPPORT_OPENGL,
iPixelType: PFD_TYPE_RGBA,
cColorBits: 8,
cRedBits: 0,
cRedShift: 0,
cGreenBits: 0,
cGreenShift: 0,
cBlueBits: 0,
cBlueShift: 0,
cAlphaBits: 8,
cAlphaShift: 0,
cAccumBits: 0,
cAccumRedBits: 0,
cAccumGreenBits: 0,
cAccumBlueBits: 0,
cAccumAlphaBits: 0,
cDepthBits: 0,
cStencilBits: 0,
cAuxBuffers: 0,
iLayerType: PFD_MAIN_PLANE,
bReserved: 0,
dwLayerMask: 0,
dwVisibleMask: 0,
dwDamageMask: 0,
};
let format_id = ChoosePixelFormat(hdc, &desc);
SetPixelFormat(hdc, format_id, &desc);
let hglrc = wglCreateContext(hdc);
println!("{:?}", (hwnd, hdc, format_id, hglrc));
wglMakeCurrent(hdc, hglrc);
let name = OsStr::new("opengl32.dll")
.encode_wide()
.chain(Some(0).into_iter())
.collect::<Vec<_>>();
let lib = LoadLibraryW(name.as_ptr());
let wgl = wgl_ext_sys::Wgl::load_with(|sym| {
let sym = CString::new(sym.as_bytes()).unwrap();
let addr = wgl_sys::GetProcAddress(sym.as_ptr()) as *const ();
if!addr.is_null() | else {
GetProcAddress(lib, sym.as_ptr()) as *const _
}
});
Entry {
hwnd,
hdc: hdc as _,
wgl,
lib,
}
}
}
}
impl Drop for Entry {
fn drop(&mut self) {
unsafe {
DestroyWindow(self.hwnd);
}
}
}
lazy_static! {
// Entry function pointers
pub(crate) static ref WGL_ENTRY: Entry = Entry::new();
}
pub struct Instance {
pub(crate) ctxt: DeviceContext,
}
impl Instance {
pub fn create(_name: &str, version: u32) -> Result<Self, hal::UnsupportedBackend> {
unsafe {
let glrc = WGL_ENTRY.wgl.CreateContextAttribsARB(
WGL_ENTRY.hdc as *const _,
ptr::null(),
ptr::null(),
) as HGLRC;
wglMakeCurrent(WGL_ENTRY.hdc as *mut _, glrc);
Ok(Instance {
ctxt: DeviceContext {
ctxt: Context { glrc },
hdc: WGL_ENTRY.hdc,
},
})
}
}
#[cfg(windows)]
pub fn create_surface_from_hwnd(&self, hwnd: *mut c_void) -> Surface {
Surface {
hwnd: hwnd as *mut _,
swapchain: None,
renderbuffer: None,
}
}
#[cfg(feature = "winit")]
pub fn create_surface(&self, window: &winit::window::Window) -> Surface {
use winit::platform::windows::WindowExtWindows;
let hwnd = window.hwnd();
self.create_surface_from_hwnd(hwnd as *mut _)
}
}
impl hal::Instance for Instance {
type Backend = Backend;
fn enumerate_adapters(&self) -> Vec<Adapter<Backend>> {
let gl_container = GlContainer::from_fn_proc(|s| unsafe {
let sym = CString::new(s.as_bytes()).unwrap();
let addr = wgl_sys::GetProcAddress(sym.as_ptr()) as *const ();
if!addr.is_null() {
addr as *const _
} else {
GetProcAddress(WGL_ENTRY.lib, sym.as_ptr()) as *const _
}
});
let adapter = PhysicalDevice::new_adapter(self.ctxt, gl_container);
vec![adapter]
}
}
#[derive(Debug)]
pub struct Surface {
pub(crate) hwnd: HWND,
pub(crate) swapchain: Option<Swapchain>,
renderbuffer: Option<native::Renderbuffer>,
}
// TODO: high -msiglreith
unsafe impl Send for Surface {}
unsafe impl Sync for Surface {}
impl window::Surface<Backend> for Surface {
fn compatibility(
&self,
physical_device: &PhysicalDevice,
) -> (
window::SurfaceCapabilities,
Option<Vec<f::Format>>,
Vec<window::PresentMode>,
) {
let extent = unsafe {
let mut rect: RECT = mem::zeroed();
GetClientRect(self.hwnd, &mut rect);
window::Extent2D {
width: (rect.right - rect.left) as _,
height: (rect.bottom - rect.top) as _,
}
};
let caps = window::SurfaceCapabilities {
image_count: 2..= 2,
current_extent: Some(extent),
extents: extent..= extent,
max_image_layers: 1,
usage: image::Usage::COLOR_ATTACHMENT | image::Usage::TRANSFER_SRC,
composite_alpha: window::CompositeAlpha::OPAQUE, //TODO
};
let present_modes = vec![
window::PresentMode::Fifo, //TODO
];
(
caps,
Some(vec![f::Format::Rgba8Srgb, f::Format::Bgra8Srgb]),
present_modes,
)
}
fn supports_queue_family(&self, _queue_family: &QueueFamily) -> bool {
true
}
}
impl window::PresentationSurface<Backend> for Surface {
type SwapchainImage = native::ImageView;
unsafe fn configure_swapchain(
&mut self,
device: &Device,
config: window::SwapchainConfig,
) -> Result<(), window::CreationError> {
let gl = &device.share.context;
let context = match self.swapchain.take() {
Some(old) => {
for fbo in old.fbos {
gl.delete_framebuffer(fbo);
}
old.context
}
None => PresentContext::new(self, &device.share.instance_context),
};
context.make_current();
if self.renderbuffer.is_none() {
self.renderbuffer = Some(gl.create_renderbuffer().unwrap());
}
let desc = conv::describe_format(config.format).unwrap();
gl.bind_renderbuffer(glow::RENDERBUFFER, self.renderbuffer);
gl.renderbuffer_storage(
glow::RENDERBUFFER,
desc.tex_internal,
config.extent.width as i32,
config.extent.height as i32,
);
let fbo = gl.create_framebuffer().unwrap();
gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(fbo));
gl.framebuffer_renderbuffer(
glow::READ_FRAMEBUFFER,
glow::COLOR_ATTACHMENT0,
glow::RENDERBUFFER,
self.renderbuffer,
);
self.swapchain = Some(Swapchain {
context,
extent: config.extent,
fbos: iter::once(fbo).collect(),
});
Ok(())
}
unsafe fn unconfigure_swapchain(&mut self, device: &Device) {
let gl = &device.share.context;
if let Some(old) = self.swapchain.take() {
for fbo in old.fbos {
gl.delete_framebuffer(fbo);
}
}
if let Some(rbo) = self.renderbuffer.take() {
gl.delete_renderbuffer(rbo);
}
}
unsafe fn acquire_image(
&mut self,
_timeout_ns: u64,
) -> Result<(Self::SwapchainImage, Option<window::Suboptimal>), window::AcquireError> {
let image = native::ImageView::Renderbuffer(self.renderbuffer.unwrap());
Ok((image, None))
}
}
#[derive(Debug)]
pub struct Swapchain {
pub(crate) fbos: ArrayVec<[native::RawFrameBuffer; 3]>,
pub(crate) context: PresentContext,
pub(crate) extent: window::Extent2D,
}
impl Swapchain {
pub(crate) fn make_current(&self) {
self.context.make_current();
}
pub(crate) fn swap_buffers(&self) {
self.context.swap_buffers();
}
}
impl window::Swapchain<Backend> for Swapchain {
unsafe fn acquire_image(
&mut self,
_timeout_ns: u64,
_semaphore: Option<&native::Semaphore>,
_fence: Option<&native::Fence>,
) -> Result<(window::SwapImageIndex, Option<window::Suboptimal>), window::AcquireError> {
Ok((0, None)) // TODO
}
}
/// Basic abstraction for wgl context handles.
#[derive(Debug, Copy, Clone)]
struct Context {
glrc: HGLRC,
}
impl Context {
unsafe fn make_current(&self, hdc: HDC) {
wglMakeCurrent(hdc, self.glrc);
}
}
/// Owned context for devices and instances.
#[derive(Debug, Copy, Clone)]
pub(crate) struct DeviceContext {
/// Owned wgl context.
ctxt: Context,
/// Device context owned by the corresponding instance.
///
/// This refers to either a pbuffer or dummy window. Therefore not used for actual presentation.
hdc: HDC,
}
// TODO
unsafe impl Send for DeviceContext {}
unsafe impl Sync for DeviceContext {}
impl DeviceContext {
pub(crate) fn make_current(&self) {
unsafe {
self.ctxt.make_current(self.hdc);
}
}
}
/// Owned context for swapchains which soley is required for presentation.
#[derive(Debug)]
pub(crate) struct PresentContext {
/// Owned wgl context.
ctxt: Context,
/// Device context of the corresponding presentation surface.
hdc: HDC,
}
// TODO
unsafe impl Send for PresentContext {}
unsafe impl Sync for PresentContext {}
impl PresentContext {
pub(crate) fn new(surface: &Surface, device_ctxt: &DeviceContext) -> Self {
// TODO: configuration options
unsafe {
let hdc = GetDC(surface.hwnd);
let desc = PIXELFORMATDESCRIPTOR {
nSize: std::mem::size_of::<PIXELFORMATDESCRIPTOR>() as u16,
nVersion: 1,
dwFlags: PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER,
iPixelType: PFD_TYPE_RGBA,
cColorBits: 32,
cRedBits: 0,
cRedShift: 0,
cGreenBits: 0,
cGreenShift: 0,
cBlueBits: 0,
cBlueShift: 0,
cAlphaBits: 8,
cAlphaShift: 0,
cAccumBits: 0,
cAccumRedBits: 0,
cAccumGreenBits: 0,
cAccumBlueBits: 0,
cAccumAlphaBits: 0,
cDepthBits: 0,
cStencilBits: 0,
cAuxBuffers: 0,
iLayerType: PFD_MAIN_PLANE,
bReserved: 0,
dwLayerMask: 0,
dwVisibleMask: 0,
dwDamageMask: 0,
};
let format_id = ChoosePixelFormat(hdc, &desc);
SetPixelFormat(hdc, format_id, &desc);
let glrc = WGL_ENTRY.wgl.CreateContextAttribsARB(
hdc as *const _,
device_ctxt.ctxt.glrc as _,
ptr::null(),
) as HGLRC;
wglMakeCurrent(hdc, glrc);
PresentContext {
ctxt: Context { glrc },
hdc,
}
}
}
pub(crate) fn make_current(&self) {
unsafe {
self.ctxt.make_current(self.hdc);
}
}
fn swap_buffers(&self) {
unsafe {
SwapBuffers(self.hdc);
}
}
}
| {
addr as *const _
} | conditional_block |
wgl.rs | use crate::{conv, device::Device, native, Backend, GlContainer, PhysicalDevice, QueueFamily};
use std::{
ffi::{CString, OsStr},
iter,
mem,
os::{raw::c_void, windows::ffi::OsStrExt},
ptr,
};
use glow::Context as _;
use hal::{adapter::Adapter, format as f, image, window};
use arrayvec::ArrayVec;
use lazy_static::lazy_static;
use winapi::shared::minwindef::*;
use winapi::shared::windef::*;
use winapi::um::libloaderapi::*;
use winapi::um::wingdi::*;
use winapi::um::winuser::*;
pub mod wgl_sys {
include!(concat!(env!("OUT_DIR"), "/wgl_sys.rs"));
}
pub mod wgl_ext_sys {
include!(concat!(env!("OUT_DIR"), "/wgl_ext_sys.rs"));
}
#[link(name = "opengl32")]
extern "C" {}
#[cfg(feature = "winit")]
use winit;
pub(crate) struct Entry {
hwnd: HWND,
pub(crate) hdc: HDC,
pub(crate) wgl: wgl_ext_sys::Wgl,
lib: HMODULE,
}
unsafe impl Send for Entry {}
unsafe impl Sync for Entry {}
impl Entry {
pub fn new() -> Self {
unsafe {
let mut class: WNDCLASSEXW = mem::zeroed();
let instance = GetModuleHandleW(ptr::null());
let class_name = OsStr::new("gfx-rs wgl")
.encode_wide()
.chain(Some(0).into_iter())
.collect::<Vec<_>>();
class.cbSize = mem::size_of::<WNDCLASSEXW>() as UINT;
class.lpszClassName = class_name.as_ptr();
class.lpfnWndProc = Some(DefWindowProcW);
RegisterClassExW(&class);
let hwnd = CreateWindowExW(
0,
class_name.as_ptr(),
std::ptr::null(),
0,
CW_USEDEFAULT,
CW_USEDEFAULT,
CW_USEDEFAULT,
CW_USEDEFAULT,
std::ptr::null_mut(),
std::ptr::null_mut(),
instance,
std::ptr::null_mut(),
);
let hdc = GetDC(hwnd);
let desc = PIXELFORMATDESCRIPTOR {
nSize: std::mem::size_of::<PIXELFORMATDESCRIPTOR>() as u16,
nVersion: 1,
dwFlags: PFD_SUPPORT_OPENGL,
iPixelType: PFD_TYPE_RGBA,
cColorBits: 8,
cRedBits: 0,
cRedShift: 0,
cGreenBits: 0,
cGreenShift: 0,
cBlueBits: 0,
cBlueShift: 0,
cAlphaBits: 8,
cAlphaShift: 0,
cAccumBits: 0,
cAccumRedBits: 0,
cAccumGreenBits: 0,
cAccumBlueBits: 0,
cAccumAlphaBits: 0,
cDepthBits: 0,
cStencilBits: 0,
cAuxBuffers: 0,
iLayerType: PFD_MAIN_PLANE,
bReserved: 0,
dwLayerMask: 0,
dwVisibleMask: 0,
dwDamageMask: 0,
};
let format_id = ChoosePixelFormat(hdc, &desc);
SetPixelFormat(hdc, format_id, &desc);
let hglrc = wglCreateContext(hdc);
println!("{:?}", (hwnd, hdc, format_id, hglrc));
wglMakeCurrent(hdc, hglrc);
let name = OsStr::new("opengl32.dll")
.encode_wide()
.chain(Some(0).into_iter())
.collect::<Vec<_>>();
let lib = LoadLibraryW(name.as_ptr());
let wgl = wgl_ext_sys::Wgl::load_with(|sym| {
let sym = CString::new(sym.as_bytes()).unwrap();
let addr = wgl_sys::GetProcAddress(sym.as_ptr()) as *const ();
if!addr.is_null() {
addr as *const _
} else {
GetProcAddress(lib, sym.as_ptr()) as *const _
}
});
Entry {
hwnd,
hdc: hdc as _,
wgl,
lib,
}
}
}
}
impl Drop for Entry {
fn drop(&mut self) {
unsafe {
DestroyWindow(self.hwnd);
}
}
}
lazy_static! {
// Entry function pointers
pub(crate) static ref WGL_ENTRY: Entry = Entry::new();
}
pub struct Instance {
pub(crate) ctxt: DeviceContext,
}
impl Instance {
pub fn create(_name: &str, version: u32) -> Result<Self, hal::UnsupportedBackend> {
unsafe {
let glrc = WGL_ENTRY.wgl.CreateContextAttribsARB(
WGL_ENTRY.hdc as *const _,
ptr::null(),
ptr::null(),
) as HGLRC;
wglMakeCurrent(WGL_ENTRY.hdc as *mut _, glrc);
Ok(Instance {
ctxt: DeviceContext {
ctxt: Context { glrc },
hdc: WGL_ENTRY.hdc,
},
})
}
}
#[cfg(windows)]
pub fn create_surface_from_hwnd(&self, hwnd: *mut c_void) -> Surface {
Surface {
hwnd: hwnd as *mut _,
swapchain: None,
renderbuffer: None,
}
}
#[cfg(feature = "winit")]
pub fn create_surface(&self, window: &winit::window::Window) -> Surface {
use winit::platform::windows::WindowExtWindows;
let hwnd = window.hwnd();
self.create_surface_from_hwnd(hwnd as *mut _)
}
}
impl hal::Instance for Instance {
type Backend = Backend;
fn enumerate_adapters(&self) -> Vec<Adapter<Backend>> {
let gl_container = GlContainer::from_fn_proc(|s| unsafe {
let sym = CString::new(s.as_bytes()).unwrap();
let addr = wgl_sys::GetProcAddress(sym.as_ptr()) as *const ();
if!addr.is_null() {
addr as *const _
} else {
GetProcAddress(WGL_ENTRY.lib, sym.as_ptr()) as *const _
}
});
let adapter = PhysicalDevice::new_adapter(self.ctxt, gl_container);
vec![adapter]
}
}
#[derive(Debug)]
pub struct Surface {
pub(crate) hwnd: HWND,
pub(crate) swapchain: Option<Swapchain>,
renderbuffer: Option<native::Renderbuffer>,
}
// TODO: high -msiglreith
unsafe impl Send for Surface {}
unsafe impl Sync for Surface {}
impl window::Surface<Backend> for Surface {
fn compatibility(
&self,
physical_device: &PhysicalDevice,
) -> (
window::SurfaceCapabilities,
Option<Vec<f::Format>>,
Vec<window::PresentMode>,
) {
let extent = unsafe {
let mut rect: RECT = mem::zeroed();
GetClientRect(self.hwnd, &mut rect);
window::Extent2D {
width: (rect.right - rect.left) as _,
height: (rect.bottom - rect.top) as _,
}
};
let caps = window::SurfaceCapabilities {
image_count: 2..= 2,
current_extent: Some(extent),
extents: extent..= extent,
max_image_layers: 1,
usage: image::Usage::COLOR_ATTACHMENT | image::Usage::TRANSFER_SRC,
composite_alpha: window::CompositeAlpha::OPAQUE, //TODO
};
let present_modes = vec![
window::PresentMode::Fifo, //TODO
];
(
caps,
Some(vec![f::Format::Rgba8Srgb, f::Format::Bgra8Srgb]),
present_modes,
)
}
fn supports_queue_family(&self, _queue_family: &QueueFamily) -> bool |
}
impl window::PresentationSurface<Backend> for Surface {
type SwapchainImage = native::ImageView;
unsafe fn configure_swapchain(
&mut self,
device: &Device,
config: window::SwapchainConfig,
) -> Result<(), window::CreationError> {
let gl = &device.share.context;
let context = match self.swapchain.take() {
Some(old) => {
for fbo in old.fbos {
gl.delete_framebuffer(fbo);
}
old.context
}
None => PresentContext::new(self, &device.share.instance_context),
};
context.make_current();
if self.renderbuffer.is_none() {
self.renderbuffer = Some(gl.create_renderbuffer().unwrap());
}
let desc = conv::describe_format(config.format).unwrap();
gl.bind_renderbuffer(glow::RENDERBUFFER, self.renderbuffer);
gl.renderbuffer_storage(
glow::RENDERBUFFER,
desc.tex_internal,
config.extent.width as i32,
config.extent.height as i32,
);
let fbo = gl.create_framebuffer().unwrap();
gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(fbo));
gl.framebuffer_renderbuffer(
glow::READ_FRAMEBUFFER,
glow::COLOR_ATTACHMENT0,
glow::RENDERBUFFER,
self.renderbuffer,
);
self.swapchain = Some(Swapchain {
context,
extent: config.extent,
fbos: iter::once(fbo).collect(),
});
Ok(())
}
unsafe fn unconfigure_swapchain(&mut self, device: &Device) {
let gl = &device.share.context;
if let Some(old) = self.swapchain.take() {
for fbo in old.fbos {
gl.delete_framebuffer(fbo);
}
}
if let Some(rbo) = self.renderbuffer.take() {
gl.delete_renderbuffer(rbo);
}
}
unsafe fn acquire_image(
&mut self,
_timeout_ns: u64,
) -> Result<(Self::SwapchainImage, Option<window::Suboptimal>), window::AcquireError> {
let image = native::ImageView::Renderbuffer(self.renderbuffer.unwrap());
Ok((image, None))
}
}
#[derive(Debug)]
pub struct Swapchain {
pub(crate) fbos: ArrayVec<[native::RawFrameBuffer; 3]>,
pub(crate) context: PresentContext,
pub(crate) extent: window::Extent2D,
}
impl Swapchain {
pub(crate) fn make_current(&self) {
self.context.make_current();
}
pub(crate) fn swap_buffers(&self) {
self.context.swap_buffers();
}
}
impl window::Swapchain<Backend> for Swapchain {
unsafe fn acquire_image(
&mut self,
_timeout_ns: u64,
_semaphore: Option<&native::Semaphore>,
_fence: Option<&native::Fence>,
) -> Result<(window::SwapImageIndex, Option<window::Suboptimal>), window::AcquireError> {
Ok((0, None)) // TODO
}
}
/// Basic abstraction for wgl context handles.
#[derive(Debug, Copy, Clone)]
struct Context {
glrc: HGLRC,
}
impl Context {
unsafe fn make_current(&self, hdc: HDC) {
wglMakeCurrent(hdc, self.glrc);
}
}
/// Owned context for devices and instances.
#[derive(Debug, Copy, Clone)]
pub(crate) struct DeviceContext {
/// Owned wgl context.
ctxt: Context,
/// Device context owned by the corresponding instance.
///
/// This refers to either a pbuffer or dummy window. Therefore not used for actual presentation.
hdc: HDC,
}
// TODO
unsafe impl Send for DeviceContext {}
unsafe impl Sync for DeviceContext {}
impl DeviceContext {
pub(crate) fn make_current(&self) {
unsafe {
self.ctxt.make_current(self.hdc);
}
}
}
/// Owned context for swapchains which soley is required for presentation.
#[derive(Debug)]
pub(crate) struct PresentContext {
/// Owned wgl context.
ctxt: Context,
/// Device context of the corresponding presentation surface.
hdc: HDC,
}
// TODO
unsafe impl Send for PresentContext {}
unsafe impl Sync for PresentContext {}
impl PresentContext {
pub(crate) fn new(surface: &Surface, device_ctxt: &DeviceContext) -> Self {
// TODO: configuration options
unsafe {
let hdc = GetDC(surface.hwnd);
let desc = PIXELFORMATDESCRIPTOR {
nSize: std::mem::size_of::<PIXELFORMATDESCRIPTOR>() as u16,
nVersion: 1,
dwFlags: PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER,
iPixelType: PFD_TYPE_RGBA,
cColorBits: 32,
cRedBits: 0,
cRedShift: 0,
cGreenBits: 0,
cGreenShift: 0,
cBlueBits: 0,
cBlueShift: 0,
cAlphaBits: 8,
cAlphaShift: 0,
cAccumBits: 0,
cAccumRedBits: 0,
cAccumGreenBits: 0,
cAccumBlueBits: 0,
cAccumAlphaBits: 0,
cDepthBits: 0,
cStencilBits: 0,
cAuxBuffers: 0,
iLayerType: PFD_MAIN_PLANE,
bReserved: 0,
dwLayerMask: 0,
dwVisibleMask: 0,
dwDamageMask: 0,
};
let format_id = ChoosePixelFormat(hdc, &desc);
SetPixelFormat(hdc, format_id, &desc);
let glrc = WGL_ENTRY.wgl.CreateContextAttribsARB(
hdc as *const _,
device_ctxt.ctxt.glrc as _,
ptr::null(),
) as HGLRC;
wglMakeCurrent(hdc, glrc);
PresentContext {
ctxt: Context { glrc },
hdc,
}
}
}
pub(crate) fn make_current(&self) {
unsafe {
self.ctxt.make_current(self.hdc);
}
}
fn swap_buffers(&self) {
unsafe {
SwapBuffers(self.hdc);
}
}
}
| {
true
} | identifier_body |
wgl.rs | use crate::{conv, device::Device, native, Backend, GlContainer, PhysicalDevice, QueueFamily};
use std::{
ffi::{CString, OsStr},
iter,
mem,
os::{raw::c_void, windows::ffi::OsStrExt},
ptr,
};
use glow::Context as _;
use hal::{adapter::Adapter, format as f, image, window};
use arrayvec::ArrayVec;
use lazy_static::lazy_static;
use winapi::shared::minwindef::*;
use winapi::shared::windef::*;
use winapi::um::libloaderapi::*;
use winapi::um::wingdi::*;
use winapi::um::winuser::*;
pub mod wgl_sys {
include!(concat!(env!("OUT_DIR"), "/wgl_sys.rs"));
}
pub mod wgl_ext_sys {
include!(concat!(env!("OUT_DIR"), "/wgl_ext_sys.rs"));
}
#[link(name = "opengl32")]
extern "C" {}
#[cfg(feature = "winit")]
use winit;
pub(crate) struct Entry {
hwnd: HWND,
pub(crate) hdc: HDC,
pub(crate) wgl: wgl_ext_sys::Wgl,
lib: HMODULE,
}
unsafe impl Send for Entry {}
unsafe impl Sync for Entry {}
impl Entry {
pub fn new() -> Self {
unsafe {
let mut class: WNDCLASSEXW = mem::zeroed();
let instance = GetModuleHandleW(ptr::null());
let class_name = OsStr::new("gfx-rs wgl")
.encode_wide()
.chain(Some(0).into_iter())
.collect::<Vec<_>>();
class.cbSize = mem::size_of::<WNDCLASSEXW>() as UINT;
class.lpszClassName = class_name.as_ptr();
class.lpfnWndProc = Some(DefWindowProcW);
RegisterClassExW(&class);
let hwnd = CreateWindowExW(
0,
class_name.as_ptr(),
std::ptr::null(),
0,
CW_USEDEFAULT,
CW_USEDEFAULT,
CW_USEDEFAULT,
CW_USEDEFAULT,
std::ptr::null_mut(),
std::ptr::null_mut(),
instance,
std::ptr::null_mut(),
);
let hdc = GetDC(hwnd);
let desc = PIXELFORMATDESCRIPTOR {
nSize: std::mem::size_of::<PIXELFORMATDESCRIPTOR>() as u16,
nVersion: 1,
dwFlags: PFD_SUPPORT_OPENGL,
iPixelType: PFD_TYPE_RGBA,
cColorBits: 8,
cRedBits: 0,
cRedShift: 0,
cGreenBits: 0,
cGreenShift: 0,
cBlueBits: 0,
cBlueShift: 0,
cAlphaBits: 8,
cAlphaShift: 0,
cAccumBits: 0,
cAccumRedBits: 0,
cAccumGreenBits: 0,
cAccumBlueBits: 0,
cAccumAlphaBits: 0,
cDepthBits: 0,
cStencilBits: 0,
cAuxBuffers: 0,
iLayerType: PFD_MAIN_PLANE,
bReserved: 0,
dwLayerMask: 0,
dwVisibleMask: 0,
dwDamageMask: 0,
};
let format_id = ChoosePixelFormat(hdc, &desc);
SetPixelFormat(hdc, format_id, &desc);
let hglrc = wglCreateContext(hdc);
println!("{:?}", (hwnd, hdc, format_id, hglrc));
wglMakeCurrent(hdc, hglrc);
let name = OsStr::new("opengl32.dll")
.encode_wide()
.chain(Some(0).into_iter())
.collect::<Vec<_>>();
let lib = LoadLibraryW(name.as_ptr());
let wgl = wgl_ext_sys::Wgl::load_with(|sym| {
let sym = CString::new(sym.as_bytes()).unwrap();
let addr = wgl_sys::GetProcAddress(sym.as_ptr()) as *const ();
if!addr.is_null() {
addr as *const _
} else {
GetProcAddress(lib, sym.as_ptr()) as *const _
}
});
Entry {
hwnd,
hdc: hdc as _,
wgl,
lib,
}
}
}
}
impl Drop for Entry {
fn drop(&mut self) {
unsafe {
DestroyWindow(self.hwnd);
}
}
}
lazy_static! {
// Entry function pointers
pub(crate) static ref WGL_ENTRY: Entry = Entry::new();
}
pub struct Instance {
pub(crate) ctxt: DeviceContext,
}
impl Instance {
pub fn create(_name: &str, version: u32) -> Result<Self, hal::UnsupportedBackend> {
unsafe {
let glrc = WGL_ENTRY.wgl.CreateContextAttribsARB(
WGL_ENTRY.hdc as *const _,
ptr::null(),
ptr::null(),
) as HGLRC;
wglMakeCurrent(WGL_ENTRY.hdc as *mut _, glrc);
Ok(Instance {
ctxt: DeviceContext {
ctxt: Context { glrc },
hdc: WGL_ENTRY.hdc,
},
})
}
}
#[cfg(windows)]
pub fn create_surface_from_hwnd(&self, hwnd: *mut c_void) -> Surface {
Surface {
hwnd: hwnd as *mut _,
swapchain: None,
renderbuffer: None,
}
}
#[cfg(feature = "winit")]
pub fn create_surface(&self, window: &winit::window::Window) -> Surface {
use winit::platform::windows::WindowExtWindows;
let hwnd = window.hwnd();
self.create_surface_from_hwnd(hwnd as *mut _)
}
}
impl hal::Instance for Instance {
type Backend = Backend;
fn enumerate_adapters(&self) -> Vec<Adapter<Backend>> {
let gl_container = GlContainer::from_fn_proc(|s| unsafe {
let sym = CString::new(s.as_bytes()).unwrap();
let addr = wgl_sys::GetProcAddress(sym.as_ptr()) as *const ();
if!addr.is_null() {
addr as *const _
} else {
GetProcAddress(WGL_ENTRY.lib, sym.as_ptr()) as *const _
}
});
let adapter = PhysicalDevice::new_adapter(self.ctxt, gl_container);
vec![adapter]
}
}
#[derive(Debug)]
pub struct Surface {
pub(crate) hwnd: HWND,
pub(crate) swapchain: Option<Swapchain>,
renderbuffer: Option<native::Renderbuffer>,
}
// TODO: high -msiglreith
unsafe impl Send for Surface {}
unsafe impl Sync for Surface {}
impl window::Surface<Backend> for Surface {
fn compatibility(
&self,
physical_device: &PhysicalDevice,
) -> (
window::SurfaceCapabilities,
Option<Vec<f::Format>>,
Vec<window::PresentMode>,
) {
let extent = unsafe {
let mut rect: RECT = mem::zeroed();
GetClientRect(self.hwnd, &mut rect);
window::Extent2D {
width: (rect.right - rect.left) as _,
height: (rect.bottom - rect.top) as _,
}
};
let caps = window::SurfaceCapabilities {
image_count: 2..= 2,
current_extent: Some(extent),
extents: extent..= extent,
max_image_layers: 1,
usage: image::Usage::COLOR_ATTACHMENT | image::Usage::TRANSFER_SRC,
composite_alpha: window::CompositeAlpha::OPAQUE, //TODO
};
let present_modes = vec![
window::PresentMode::Fifo, //TODO
];
(
caps,
Some(vec![f::Format::Rgba8Srgb, f::Format::Bgra8Srgb]),
present_modes,
)
}
fn supports_queue_family(&self, _queue_family: &QueueFamily) -> bool {
true
}
}
impl window::PresentationSurface<Backend> for Surface {
type SwapchainImage = native::ImageView;
unsafe fn configure_swapchain(
&mut self,
device: &Device,
config: window::SwapchainConfig,
) -> Result<(), window::CreationError> {
let gl = &device.share.context;
let context = match self.swapchain.take() {
Some(old) => {
for fbo in old.fbos {
gl.delete_framebuffer(fbo);
}
old.context
}
None => PresentContext::new(self, &device.share.instance_context),
};
context.make_current();
if self.renderbuffer.is_none() {
self.renderbuffer = Some(gl.create_renderbuffer().unwrap());
}
let desc = conv::describe_format(config.format).unwrap();
gl.bind_renderbuffer(glow::RENDERBUFFER, self.renderbuffer);
gl.renderbuffer_storage(
glow::RENDERBUFFER,
desc.tex_internal,
config.extent.width as i32,
config.extent.height as i32,
);
let fbo = gl.create_framebuffer().unwrap();
gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(fbo));
gl.framebuffer_renderbuffer(
glow::READ_FRAMEBUFFER,
glow::COLOR_ATTACHMENT0,
glow::RENDERBUFFER,
self.renderbuffer,
);
self.swapchain = Some(Swapchain {
context,
extent: config.extent,
fbos: iter::once(fbo).collect(),
});
Ok(())
}
unsafe fn unconfigure_swapchain(&mut self, device: &Device) {
let gl = &device.share.context;
if let Some(old) = self.swapchain.take() {
for fbo in old.fbos {
gl.delete_framebuffer(fbo);
}
}
if let Some(rbo) = self.renderbuffer.take() {
gl.delete_renderbuffer(rbo);
}
}
unsafe fn acquire_image(
&mut self,
_timeout_ns: u64,
) -> Result<(Self::SwapchainImage, Option<window::Suboptimal>), window::AcquireError> {
let image = native::ImageView::Renderbuffer(self.renderbuffer.unwrap());
Ok((image, None))
}
}
#[derive(Debug)]
pub struct Swapchain {
pub(crate) fbos: ArrayVec<[native::RawFrameBuffer; 3]>,
pub(crate) context: PresentContext,
pub(crate) extent: window::Extent2D,
}
impl Swapchain {
pub(crate) fn make_current(&self) {
self.context.make_current();
}
pub(crate) fn swap_buffers(&self) {
self.context.swap_buffers();
}
}
impl window::Swapchain<Backend> for Swapchain {
unsafe fn acquire_image(
&mut self,
_timeout_ns: u64,
_semaphore: Option<&native::Semaphore>,
_fence: Option<&native::Fence>,
) -> Result<(window::SwapImageIndex, Option<window::Suboptimal>), window::AcquireError> {
Ok((0, None)) // TODO
}
}
/// Basic abstraction for wgl context handles.
#[derive(Debug, Copy, Clone)]
struct Context {
glrc: HGLRC,
}
impl Context {
unsafe fn make_current(&self, hdc: HDC) {
wglMakeCurrent(hdc, self.glrc);
}
}
/// Owned context for devices and instances.
#[derive(Debug, Copy, Clone)]
pub(crate) struct DeviceContext {
/// Owned wgl context.
ctxt: Context,
/// Device context owned by the corresponding instance.
///
/// This refers to either a pbuffer or dummy window. Therefore not used for actual presentation.
hdc: HDC,
}
// TODO
unsafe impl Send for DeviceContext {}
unsafe impl Sync for DeviceContext {}
impl DeviceContext {
pub(crate) fn | (&self) {
unsafe {
self.ctxt.make_current(self.hdc);
}
}
}
/// Owned context for swapchains which soley is required for presentation.
#[derive(Debug)]
pub(crate) struct PresentContext {
/// Owned wgl context.
ctxt: Context,
/// Device context of the corresponding presentation surface.
hdc: HDC,
}
// TODO
unsafe impl Send for PresentContext {}
unsafe impl Sync for PresentContext {}
impl PresentContext {
pub(crate) fn new(surface: &Surface, device_ctxt: &DeviceContext) -> Self {
// TODO: configuration options
unsafe {
let hdc = GetDC(surface.hwnd);
let desc = PIXELFORMATDESCRIPTOR {
nSize: std::mem::size_of::<PIXELFORMATDESCRIPTOR>() as u16,
nVersion: 1,
dwFlags: PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER,
iPixelType: PFD_TYPE_RGBA,
cColorBits: 32,
cRedBits: 0,
cRedShift: 0,
cGreenBits: 0,
cGreenShift: 0,
cBlueBits: 0,
cBlueShift: 0,
cAlphaBits: 8,
cAlphaShift: 0,
cAccumBits: 0,
cAccumRedBits: 0,
cAccumGreenBits: 0,
cAccumBlueBits: 0,
cAccumAlphaBits: 0,
cDepthBits: 0,
cStencilBits: 0,
cAuxBuffers: 0,
iLayerType: PFD_MAIN_PLANE,
bReserved: 0,
dwLayerMask: 0,
dwVisibleMask: 0,
dwDamageMask: 0,
};
let format_id = ChoosePixelFormat(hdc, &desc);
SetPixelFormat(hdc, format_id, &desc);
let glrc = WGL_ENTRY.wgl.CreateContextAttribsARB(
hdc as *const _,
device_ctxt.ctxt.glrc as _,
ptr::null(),
) as HGLRC;
wglMakeCurrent(hdc, glrc);
PresentContext {
ctxt: Context { glrc },
hdc,
}
}
}
pub(crate) fn make_current(&self) {
unsafe {
self.ctxt.make_current(self.hdc);
}
}
fn swap_buffers(&self) {
unsafe {
SwapBuffers(self.hdc);
}
}
}
| make_current | identifier_name |
particle.rs | use std::{
mem::size_of,
num::{NonZeroU32, NonZeroU8},
};
use crate::{
client::{
entity::particle::Particle,
render::{
create_texture,
pipeline::{Pipeline, PushConstantUpdate},
world::{Camera, WorldPipelineBase},
Palette, TextureData,
},
},
common::{math::Angles, util::any_slice_as_bytes},
};
use bumpalo::Bump;
use cgmath::Matrix4;
lazy_static! {
static ref VERTEX_BUFFER_ATTRIBUTES: [Vec<wgpu::VertexAttribute>; 1] = [
wgpu::vertex_attr_array![
// position
0 => Float32x3,
// texcoord
1 => Float32x2,
].to_vec(),
];
}
#[rustfmt::skip]
const PARTICLE_TEXTURE_PIXELS: [u8; 64] = [
0, 0, 1, 1, 1, 1, 0, 0,
0, 1, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
0, 1, 1, 1, 1, 1, 1, 0,
0, 0, 1, 1, 1, 1, 0, 0,
];
pub struct ParticlePipeline {
pipeline: wgpu::RenderPipeline,
bind_group_layouts: Vec<wgpu::BindGroupLayout>,
vertex_buffer: wgpu::Buffer,
sampler: wgpu::Sampler,
textures: Vec<wgpu::Texture>,
texture_views: Vec<wgpu::TextureView>,
bind_group: wgpu::BindGroup,
}
impl ParticlePipeline {
pub fn new(
device: &wgpu::Device,
queue: &wgpu::Queue,
compiler: &mut shaderc::Compiler,
sample_count: u32,
palette: &Palette,
) -> ParticlePipeline {
let (pipeline, bind_group_layouts) =
ParticlePipeline::create(device, compiler, &[], sample_count);
use wgpu::util::DeviceExt as _;
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: unsafe { any_slice_as_bytes(&VERTICES) },
usage: wgpu::BufferUsage::VERTEX,
});
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("particle sampler"),
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Nearest,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Linear,
lod_min_clamp: -1000.0,
lod_max_clamp: 1000.0,
compare: None,
anisotropy_clamp: NonZeroU8::new(16),
border_color: None,
});
let textures: Vec<wgpu::Texture> = (0..256)
.map(|i| {
let mut pixels = PARTICLE_TEXTURE_PIXELS;
// set up palette translation
for pix in pixels.iter_mut() {
if *pix == 0 | else {
*pix *= i as u8;
}
}
let (diffuse_data, _) = palette.translate(&pixels);
create_texture(
device,
queue,
Some(&format!("particle texture {}", i)),
8,
8,
&TextureData::Diffuse(diffuse_data),
)
})
.collect();
let texture_views: Vec<wgpu::TextureView> = textures
.iter()
.map(|t| t.create_view(&Default::default()))
.collect();
let texture_view_refs = texture_views.iter().collect::<Vec<_>>();
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("particle bind group"),
layout: &bind_group_layouts[0],
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Sampler(&sampler),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureViewArray(&texture_view_refs[..]),
},
],
});
ParticlePipeline {
pipeline,
bind_group_layouts,
sampler,
textures,
texture_views,
bind_group,
vertex_buffer,
}
}
pub fn rebuild(
&mut self,
device: &wgpu::Device,
compiler: &mut shaderc::Compiler,
sample_count: u32,
) {
let layout_refs: Vec<_> = self.bind_group_layouts.iter().collect();
self.pipeline = ParticlePipeline::recreate(device, compiler, &layout_refs, sample_count);
}
pub fn pipeline(&self) -> &wgpu::RenderPipeline {
&self.pipeline
}
pub fn bind_group_layouts(&self) -> &[wgpu::BindGroupLayout] {
&self.bind_group_layouts
}
pub fn vertex_buffer(&self) -> &wgpu::Buffer {
&self.vertex_buffer
}
pub fn record_draw<'a, 'b, P>(
&'a self,
pass: &mut wgpu::RenderPass<'a>,
bump: &'a Bump,
camera: &Camera,
particles: P,
) where
P: Iterator<Item = &'b Particle>,
{
use PushConstantUpdate::*;
pass.set_pipeline(self.pipeline());
pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
pass.set_bind_group(0, &self.bind_group, &[]);
// face toward camera
let Angles { pitch, yaw, roll } = camera.angles();
let rotation = Angles {
pitch: -pitch,
yaw: -yaw,
roll: -roll,
}
.mat4_wgpu();
for particle in particles {
let q_origin = particle.origin();
let translation =
Matrix4::from_translation([-q_origin.y, q_origin.z, -q_origin.x].into());
Self::set_push_constants(
pass,
Update(bump.alloc(VertexPushConstants {
transform: camera.view_projection() * translation * rotation,
})),
Retain,
Update(bump.alloc(FragmentPushConstants {
color: particle.color() as u32,
})),
);
pass.draw(0..6, 0..1);
}
}
}
#[derive(Copy, Clone, Debug)]
pub struct VertexPushConstants {
pub transform: Matrix4<f32>,
}
#[derive(Copy, Clone, Debug)]
pub struct FragmentPushConstants {
pub color: u32,
}
const BIND_GROUP_LAYOUT_ENTRIES: &[wgpu::BindGroupLayoutEntry] = &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
filtering: true,
comparison: false,
},
count: None,
},
// per-index texture array
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
multisampled: false,
},
count: NonZeroU32::new(256),
},
];
lazy_static! {
static ref VERTEX_ATTRIBUTES: [[wgpu::VertexAttribute; 2]; 2] = [
wgpu::vertex_attr_array![
// position
0 => Float32x3,
// texcoord
1 => Float32x2,
],
wgpu::vertex_attr_array![
// instance position
2 => Float32x3,
// color index
3 => Uint32,
]
];
}
impl Pipeline for ParticlePipeline {
type VertexPushConstants = VertexPushConstants;
type SharedPushConstants = ();
type FragmentPushConstants = FragmentPushConstants;
fn name() -> &'static str {
"particle"
}
fn vertex_shader() -> &'static str {
include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/shaders/particle.vert"
))
}
fn fragment_shader() -> &'static str {
include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/shaders/particle.frag"
))
}
// NOTE: if any of the binding indices are changed, they must also be changed in
// the corresponding shaders and the BindGroupLayout generation functions.
fn bind_group_layout_descriptors() -> Vec<wgpu::BindGroupLayoutDescriptor<'static>> {
vec![
// group 0
wgpu::BindGroupLayoutDescriptor {
label: Some("particle bind group layout"),
entries: BIND_GROUP_LAYOUT_ENTRIES,
},
]
}
fn primitive_state() -> wgpu::PrimitiveState {
WorldPipelineBase::primitive_state()
}
fn color_target_states() -> Vec<wgpu::ColorTargetState> {
WorldPipelineBase::color_target_states()
}
fn depth_stencil_state() -> Option<wgpu::DepthStencilState> {
let mut desc = WorldPipelineBase::depth_stencil_state().unwrap();
desc.depth_write_enabled = false;
Some(desc)
}
// NOTE: if the vertex format is changed, this descriptor must also be changed accordingly.
fn vertex_buffer_layouts() -> Vec<wgpu::VertexBufferLayout<'static>> {
vec![wgpu::VertexBufferLayout {
array_stride: size_of::<ParticleVertex>() as u64,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &VERTEX_ATTRIBUTES[0],
}]
}
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct ParticleVertex {
position: [f32; 3],
texcoord: [f32; 2],
}
pub const VERTICES: [ParticleVertex; 6] = [
ParticleVertex {
position: [-1.0, -1.0, 0.0],
texcoord: [0.0, 1.0],
},
ParticleVertex {
position: [-1.0, 1.0, 0.0],
texcoord: [0.0, 0.0],
},
ParticleVertex {
position: [1.0, 1.0, 0.0],
texcoord: [1.0, 0.0],
},
ParticleVertex {
position: [-1.0, -1.0, 0.0],
texcoord: [0.0, 1.0],
},
ParticleVertex {
position: [1.0, 1.0, 0.0],
texcoord: [1.0, 0.0],
},
ParticleVertex {
position: [1.0, -1.0, 0.0],
texcoord: [1.0, 1.0],
},
];
#[repr(C)]
pub struct ParticleInstance {
color: u32,
}
| {
*pix = 0xFF;
} | conditional_block |
particle.rs | use std::{
mem::size_of,
num::{NonZeroU32, NonZeroU8},
};
use crate::{
client::{
entity::particle::Particle,
render::{
create_texture,
pipeline::{Pipeline, PushConstantUpdate},
world::{Camera, WorldPipelineBase},
Palette, TextureData,
},
},
common::{math::Angles, util::any_slice_as_bytes},
};
use bumpalo::Bump;
use cgmath::Matrix4;
lazy_static! {
static ref VERTEX_BUFFER_ATTRIBUTES: [Vec<wgpu::VertexAttribute>; 1] = [
wgpu::vertex_attr_array![
// position
0 => Float32x3,
// texcoord
1 => Float32x2,
].to_vec(),
];
}
#[rustfmt::skip]
const PARTICLE_TEXTURE_PIXELS: [u8; 64] = [
0, 0, 1, 1, 1, 1, 0, 0,
0, 1, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
0, 1, 1, 1, 1, 1, 1, 0,
0, 0, 1, 1, 1, 1, 0, 0,
];
pub struct ParticlePipeline {
pipeline: wgpu::RenderPipeline,
bind_group_layouts: Vec<wgpu::BindGroupLayout>,
vertex_buffer: wgpu::Buffer,
sampler: wgpu::Sampler,
textures: Vec<wgpu::Texture>,
texture_views: Vec<wgpu::TextureView>,
bind_group: wgpu::BindGroup,
}
impl ParticlePipeline {
pub fn new(
device: &wgpu::Device,
queue: &wgpu::Queue,
compiler: &mut shaderc::Compiler,
sample_count: u32,
palette: &Palette,
) -> ParticlePipeline {
let (pipeline, bind_group_layouts) =
ParticlePipeline::create(device, compiler, &[], sample_count);
use wgpu::util::DeviceExt as _;
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: unsafe { any_slice_as_bytes(&VERTICES) },
usage: wgpu::BufferUsage::VERTEX,
});
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("particle sampler"),
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Nearest,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Linear,
lod_min_clamp: -1000.0,
lod_max_clamp: 1000.0,
compare: None,
anisotropy_clamp: NonZeroU8::new(16),
border_color: None,
});
let textures: Vec<wgpu::Texture> = (0..256)
.map(|i| {
let mut pixels = PARTICLE_TEXTURE_PIXELS;
// set up palette translation
for pix in pixels.iter_mut() {
if *pix == 0 {
*pix = 0xFF;
} else {
*pix *= i as u8;
}
}
let (diffuse_data, _) = palette.translate(&pixels);
create_texture(
device,
queue,
Some(&format!("particle texture {}", i)),
8,
8,
&TextureData::Diffuse(diffuse_data),
)
})
.collect();
let texture_views: Vec<wgpu::TextureView> = textures
.iter()
.map(|t| t.create_view(&Default::default()))
.collect();
let texture_view_refs = texture_views.iter().collect::<Vec<_>>();
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("particle bind group"),
layout: &bind_group_layouts[0],
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Sampler(&sampler),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureViewArray(&texture_view_refs[..]),
},
],
});
ParticlePipeline {
pipeline,
bind_group_layouts,
sampler,
textures,
texture_views,
bind_group,
vertex_buffer,
}
}
pub fn rebuild(
&mut self,
device: &wgpu::Device,
compiler: &mut shaderc::Compiler,
sample_count: u32,
) {
let layout_refs: Vec<_> = self.bind_group_layouts.iter().collect();
self.pipeline = ParticlePipeline::recreate(device, compiler, &layout_refs, sample_count);
}
pub fn | (&self) -> &wgpu::RenderPipeline {
&self.pipeline
}
pub fn bind_group_layouts(&self) -> &[wgpu::BindGroupLayout] {
&self.bind_group_layouts
}
pub fn vertex_buffer(&self) -> &wgpu::Buffer {
&self.vertex_buffer
}
pub fn record_draw<'a, 'b, P>(
&'a self,
pass: &mut wgpu::RenderPass<'a>,
bump: &'a Bump,
camera: &Camera,
particles: P,
) where
P: Iterator<Item = &'b Particle>,
{
use PushConstantUpdate::*;
pass.set_pipeline(self.pipeline());
pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
pass.set_bind_group(0, &self.bind_group, &[]);
// face toward camera
let Angles { pitch, yaw, roll } = camera.angles();
let rotation = Angles {
pitch: -pitch,
yaw: -yaw,
roll: -roll,
}
.mat4_wgpu();
for particle in particles {
let q_origin = particle.origin();
let translation =
Matrix4::from_translation([-q_origin.y, q_origin.z, -q_origin.x].into());
Self::set_push_constants(
pass,
Update(bump.alloc(VertexPushConstants {
transform: camera.view_projection() * translation * rotation,
})),
Retain,
Update(bump.alloc(FragmentPushConstants {
color: particle.color() as u32,
})),
);
pass.draw(0..6, 0..1);
}
}
}
#[derive(Copy, Clone, Debug)]
pub struct VertexPushConstants {
pub transform: Matrix4<f32>,
}
#[derive(Copy, Clone, Debug)]
pub struct FragmentPushConstants {
pub color: u32,
}
const BIND_GROUP_LAYOUT_ENTRIES: &[wgpu::BindGroupLayoutEntry] = &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
filtering: true,
comparison: false,
},
count: None,
},
// per-index texture array
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
multisampled: false,
},
count: NonZeroU32::new(256),
},
];
lazy_static! {
static ref VERTEX_ATTRIBUTES: [[wgpu::VertexAttribute; 2]; 2] = [
wgpu::vertex_attr_array![
// position
0 => Float32x3,
// texcoord
1 => Float32x2,
],
wgpu::vertex_attr_array![
// instance position
2 => Float32x3,
// color index
3 => Uint32,
]
];
}
impl Pipeline for ParticlePipeline {
type VertexPushConstants = VertexPushConstants;
type SharedPushConstants = ();
type FragmentPushConstants = FragmentPushConstants;
fn name() -> &'static str {
"particle"
}
fn vertex_shader() -> &'static str {
include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/shaders/particle.vert"
))
}
fn fragment_shader() -> &'static str {
include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/shaders/particle.frag"
))
}
// NOTE: if any of the binding indices are changed, they must also be changed in
// the corresponding shaders and the BindGroupLayout generation functions.
fn bind_group_layout_descriptors() -> Vec<wgpu::BindGroupLayoutDescriptor<'static>> {
vec![
// group 0
wgpu::BindGroupLayoutDescriptor {
label: Some("particle bind group layout"),
entries: BIND_GROUP_LAYOUT_ENTRIES,
},
]
}
fn primitive_state() -> wgpu::PrimitiveState {
WorldPipelineBase::primitive_state()
}
fn color_target_states() -> Vec<wgpu::ColorTargetState> {
WorldPipelineBase::color_target_states()
}
fn depth_stencil_state() -> Option<wgpu::DepthStencilState> {
let mut desc = WorldPipelineBase::depth_stencil_state().unwrap();
desc.depth_write_enabled = false;
Some(desc)
}
// NOTE: if the vertex format is changed, this descriptor must also be changed accordingly.
fn vertex_buffer_layouts() -> Vec<wgpu::VertexBufferLayout<'static>> {
vec![wgpu::VertexBufferLayout {
array_stride: size_of::<ParticleVertex>() as u64,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &VERTEX_ATTRIBUTES[0],
}]
}
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct ParticleVertex {
position: [f32; 3],
texcoord: [f32; 2],
}
pub const VERTICES: [ParticleVertex; 6] = [
ParticleVertex {
position: [-1.0, -1.0, 0.0],
texcoord: [0.0, 1.0],
},
ParticleVertex {
position: [-1.0, 1.0, 0.0],
texcoord: [0.0, 0.0],
},
ParticleVertex {
position: [1.0, 1.0, 0.0],
texcoord: [1.0, 0.0],
},
ParticleVertex {
position: [-1.0, -1.0, 0.0],
texcoord: [0.0, 1.0],
},
ParticleVertex {
position: [1.0, 1.0, 0.0],
texcoord: [1.0, 0.0],
},
ParticleVertex {
position: [1.0, -1.0, 0.0],
texcoord: [1.0, 1.0],
},
];
#[repr(C)]
pub struct ParticleInstance {
color: u32,
}
| pipeline | identifier_name |
particle.rs | use std::{
mem::size_of,
num::{NonZeroU32, NonZeroU8},
};
use crate::{
client::{
entity::particle::Particle,
render::{
create_texture,
pipeline::{Pipeline, PushConstantUpdate},
world::{Camera, WorldPipelineBase},
Palette, TextureData,
},
},
common::{math::Angles, util::any_slice_as_bytes},
};
use bumpalo::Bump;
use cgmath::Matrix4;
lazy_static! {
static ref VERTEX_BUFFER_ATTRIBUTES: [Vec<wgpu::VertexAttribute>; 1] = [
wgpu::vertex_attr_array![
// position
0 => Float32x3,
// texcoord
1 => Float32x2,
].to_vec(),
];
}
#[rustfmt::skip]
const PARTICLE_TEXTURE_PIXELS: [u8; 64] = [
0, 0, 1, 1, 1, 1, 0, 0,
0, 1, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
0, 1, 1, 1, 1, 1, 1, 0,
0, 0, 1, 1, 1, 1, 0, 0,
];
pub struct ParticlePipeline {
pipeline: wgpu::RenderPipeline,
bind_group_layouts: Vec<wgpu::BindGroupLayout>,
vertex_buffer: wgpu::Buffer,
sampler: wgpu::Sampler,
textures: Vec<wgpu::Texture>,
texture_views: Vec<wgpu::TextureView>,
bind_group: wgpu::BindGroup,
}
impl ParticlePipeline {
pub fn new(
device: &wgpu::Device,
queue: &wgpu::Queue,
compiler: &mut shaderc::Compiler,
sample_count: u32,
palette: &Palette,
) -> ParticlePipeline {
let (pipeline, bind_group_layouts) =
ParticlePipeline::create(device, compiler, &[], sample_count);
use wgpu::util::DeviceExt as _;
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: unsafe { any_slice_as_bytes(&VERTICES) },
usage: wgpu::BufferUsage::VERTEX,
});
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("particle sampler"),
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Nearest,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Linear,
lod_min_clamp: -1000.0,
lod_max_clamp: 1000.0,
compare: None,
anisotropy_clamp: NonZeroU8::new(16),
border_color: None,
});
let textures: Vec<wgpu::Texture> = (0..256)
.map(|i| {
let mut pixels = PARTICLE_TEXTURE_PIXELS;
// set up palette translation
for pix in pixels.iter_mut() {
if *pix == 0 {
*pix = 0xFF;
} else {
*pix *= i as u8;
}
}
let (diffuse_data, _) = palette.translate(&pixels);
create_texture(
device,
queue,
Some(&format!("particle texture {}", i)),
8,
8,
&TextureData::Diffuse(diffuse_data),
)
})
.collect();
let texture_views: Vec<wgpu::TextureView> = textures
.iter()
.map(|t| t.create_view(&Default::default()))
.collect();
let texture_view_refs = texture_views.iter().collect::<Vec<_>>();
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("particle bind group"),
layout: &bind_group_layouts[0],
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Sampler(&sampler),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureViewArray(&texture_view_refs[..]),
},
],
});
ParticlePipeline {
pipeline,
bind_group_layouts,
sampler,
textures,
texture_views,
bind_group,
vertex_buffer,
}
}
pub fn rebuild(
&mut self,
device: &wgpu::Device,
compiler: &mut shaderc::Compiler,
sample_count: u32,
) {
let layout_refs: Vec<_> = self.bind_group_layouts.iter().collect();
self.pipeline = ParticlePipeline::recreate(device, compiler, &layout_refs, sample_count);
}
pub fn pipeline(&self) -> &wgpu::RenderPipeline {
&self.pipeline
}
pub fn bind_group_layouts(&self) -> &[wgpu::BindGroupLayout] {
&self.bind_group_layouts
}
pub fn vertex_buffer(&self) -> &wgpu::Buffer {
&self.vertex_buffer
}
pub fn record_draw<'a, 'b, P>(
&'a self,
pass: &mut wgpu::RenderPass<'a>,
bump: &'a Bump,
camera: &Camera,
particles: P,
) where
P: Iterator<Item = &'b Particle>,
{
use PushConstantUpdate::*;
pass.set_pipeline(self.pipeline());
pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
pass.set_bind_group(0, &self.bind_group, &[]);
// face toward camera
let Angles { pitch, yaw, roll } = camera.angles();
let rotation = Angles {
pitch: -pitch,
yaw: -yaw,
roll: -roll,
}
.mat4_wgpu();
for particle in particles {
let q_origin = particle.origin();
let translation =
Matrix4::from_translation([-q_origin.y, q_origin.z, -q_origin.x].into());
Self::set_push_constants(
pass,
Update(bump.alloc(VertexPushConstants {
transform: camera.view_projection() * translation * rotation,
})),
Retain,
Update(bump.alloc(FragmentPushConstants {
color: particle.color() as u32,
})),
);
pass.draw(0..6, 0..1);
}
}
}
#[derive(Copy, Clone, Debug)]
pub struct VertexPushConstants {
pub transform: Matrix4<f32>,
}
#[derive(Copy, Clone, Debug)]
pub struct FragmentPushConstants {
pub color: u32, | visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
filtering: true,
comparison: false,
},
count: None,
},
// per-index texture array
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
multisampled: false,
},
count: NonZeroU32::new(256),
},
];
lazy_static! {
static ref VERTEX_ATTRIBUTES: [[wgpu::VertexAttribute; 2]; 2] = [
wgpu::vertex_attr_array![
// position
0 => Float32x3,
// texcoord
1 => Float32x2,
],
wgpu::vertex_attr_array![
// instance position
2 => Float32x3,
// color index
3 => Uint32,
]
];
}
impl Pipeline for ParticlePipeline {
type VertexPushConstants = VertexPushConstants;
type SharedPushConstants = ();
type FragmentPushConstants = FragmentPushConstants;
fn name() -> &'static str {
"particle"
}
fn vertex_shader() -> &'static str {
include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/shaders/particle.vert"
))
}
fn fragment_shader() -> &'static str {
include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/shaders/particle.frag"
))
}
// NOTE: if any of the binding indices are changed, they must also be changed in
// the corresponding shaders and the BindGroupLayout generation functions.
fn bind_group_layout_descriptors() -> Vec<wgpu::BindGroupLayoutDescriptor<'static>> {
vec![
// group 0
wgpu::BindGroupLayoutDescriptor {
label: Some("particle bind group layout"),
entries: BIND_GROUP_LAYOUT_ENTRIES,
},
]
}
fn primitive_state() -> wgpu::PrimitiveState {
WorldPipelineBase::primitive_state()
}
fn color_target_states() -> Vec<wgpu::ColorTargetState> {
WorldPipelineBase::color_target_states()
}
fn depth_stencil_state() -> Option<wgpu::DepthStencilState> {
let mut desc = WorldPipelineBase::depth_stencil_state().unwrap();
desc.depth_write_enabled = false;
Some(desc)
}
// NOTE: if the vertex format is changed, this descriptor must also be changed accordingly.
fn vertex_buffer_layouts() -> Vec<wgpu::VertexBufferLayout<'static>> {
vec![wgpu::VertexBufferLayout {
array_stride: size_of::<ParticleVertex>() as u64,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &VERTEX_ATTRIBUTES[0],
}]
}
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct ParticleVertex {
position: [f32; 3],
texcoord: [f32; 2],
}
pub const VERTICES: [ParticleVertex; 6] = [
ParticleVertex {
position: [-1.0, -1.0, 0.0],
texcoord: [0.0, 1.0],
},
ParticleVertex {
position: [-1.0, 1.0, 0.0],
texcoord: [0.0, 0.0],
},
ParticleVertex {
position: [1.0, 1.0, 0.0],
texcoord: [1.0, 0.0],
},
ParticleVertex {
position: [-1.0, -1.0, 0.0],
texcoord: [0.0, 1.0],
},
ParticleVertex {
position: [1.0, 1.0, 0.0],
texcoord: [1.0, 0.0],
},
ParticleVertex {
position: [1.0, -1.0, 0.0],
texcoord: [1.0, 1.0],
},
];
#[repr(C)]
pub struct ParticleInstance {
color: u32,
} | }
const BIND_GROUP_LAYOUT_ENTRIES: &[wgpu::BindGroupLayoutEntry] = &[
wgpu::BindGroupLayoutEntry {
binding: 0, | random_line_split |
particle.rs | use std::{
mem::size_of,
num::{NonZeroU32, NonZeroU8},
};
use crate::{
client::{
entity::particle::Particle,
render::{
create_texture,
pipeline::{Pipeline, PushConstantUpdate},
world::{Camera, WorldPipelineBase},
Palette, TextureData,
},
},
common::{math::Angles, util::any_slice_as_bytes},
};
use bumpalo::Bump;
use cgmath::Matrix4;
lazy_static! {
static ref VERTEX_BUFFER_ATTRIBUTES: [Vec<wgpu::VertexAttribute>; 1] = [
wgpu::vertex_attr_array![
// position
0 => Float32x3,
// texcoord
1 => Float32x2,
].to_vec(),
];
}
#[rustfmt::skip]
const PARTICLE_TEXTURE_PIXELS: [u8; 64] = [
0, 0, 1, 1, 1, 1, 0, 0,
0, 1, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
0, 1, 1, 1, 1, 1, 1, 0,
0, 0, 1, 1, 1, 1, 0, 0,
];
pub struct ParticlePipeline {
pipeline: wgpu::RenderPipeline,
bind_group_layouts: Vec<wgpu::BindGroupLayout>,
vertex_buffer: wgpu::Buffer,
sampler: wgpu::Sampler,
textures: Vec<wgpu::Texture>,
texture_views: Vec<wgpu::TextureView>,
bind_group: wgpu::BindGroup,
}
impl ParticlePipeline {
pub fn new(
device: &wgpu::Device,
queue: &wgpu::Queue,
compiler: &mut shaderc::Compiler,
sample_count: u32,
palette: &Palette,
) -> ParticlePipeline {
let (pipeline, bind_group_layouts) =
ParticlePipeline::create(device, compiler, &[], sample_count);
use wgpu::util::DeviceExt as _;
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: unsafe { any_slice_as_bytes(&VERTICES) },
usage: wgpu::BufferUsage::VERTEX,
});
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("particle sampler"),
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Nearest,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Linear,
lod_min_clamp: -1000.0,
lod_max_clamp: 1000.0,
compare: None,
anisotropy_clamp: NonZeroU8::new(16),
border_color: None,
});
let textures: Vec<wgpu::Texture> = (0..256)
.map(|i| {
let mut pixels = PARTICLE_TEXTURE_PIXELS;
// set up palette translation
for pix in pixels.iter_mut() {
if *pix == 0 {
*pix = 0xFF;
} else {
*pix *= i as u8;
}
}
let (diffuse_data, _) = palette.translate(&pixels);
create_texture(
device,
queue,
Some(&format!("particle texture {}", i)),
8,
8,
&TextureData::Diffuse(diffuse_data),
)
})
.collect();
let texture_views: Vec<wgpu::TextureView> = textures
.iter()
.map(|t| t.create_view(&Default::default()))
.collect();
let texture_view_refs = texture_views.iter().collect::<Vec<_>>();
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("particle bind group"),
layout: &bind_group_layouts[0],
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Sampler(&sampler),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureViewArray(&texture_view_refs[..]),
},
],
});
ParticlePipeline {
pipeline,
bind_group_layouts,
sampler,
textures,
texture_views,
bind_group,
vertex_buffer,
}
}
pub fn rebuild(
&mut self,
device: &wgpu::Device,
compiler: &mut shaderc::Compiler,
sample_count: u32,
) {
let layout_refs: Vec<_> = self.bind_group_layouts.iter().collect();
self.pipeline = ParticlePipeline::recreate(device, compiler, &layout_refs, sample_count);
}
pub fn pipeline(&self) -> &wgpu::RenderPipeline {
&self.pipeline
}
pub fn bind_group_layouts(&self) -> &[wgpu::BindGroupLayout] {
&self.bind_group_layouts
}
pub fn vertex_buffer(&self) -> &wgpu::Buffer {
&self.vertex_buffer
}
pub fn record_draw<'a, 'b, P>(
&'a self,
pass: &mut wgpu::RenderPass<'a>,
bump: &'a Bump,
camera: &Camera,
particles: P,
) where
P: Iterator<Item = &'b Particle>,
| Self::set_push_constants(
pass,
Update(bump.alloc(VertexPushConstants {
transform: camera.view_projection() * translation * rotation,
})),
Retain,
Update(bump.alloc(FragmentPushConstants {
color: particle.color() as u32,
})),
);
pass.draw(0..6, 0..1);
}
}
}
#[derive(Copy, Clone, Debug)]
pub struct VertexPushConstants {
pub transform: Matrix4<f32>,
}
#[derive(Copy, Clone, Debug)]
pub struct FragmentPushConstants {
pub color: u32,
}
const BIND_GROUP_LAYOUT_ENTRIES: &[wgpu::BindGroupLayoutEntry] = &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
filtering: true,
comparison: false,
},
count: None,
},
// per-index texture array
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
multisampled: false,
},
count: NonZeroU32::new(256),
},
];
lazy_static! {
static ref VERTEX_ATTRIBUTES: [[wgpu::VertexAttribute; 2]; 2] = [
wgpu::vertex_attr_array![
// position
0 => Float32x3,
// texcoord
1 => Float32x2,
],
wgpu::vertex_attr_array![
// instance position
2 => Float32x3,
// color index
3 => Uint32,
]
];
}
impl Pipeline for ParticlePipeline {
type VertexPushConstants = VertexPushConstants;
type SharedPushConstants = ();
type FragmentPushConstants = FragmentPushConstants;
fn name() -> &'static str {
"particle"
}
fn vertex_shader() -> &'static str {
include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/shaders/particle.vert"
))
}
fn fragment_shader() -> &'static str {
include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/shaders/particle.frag"
))
}
// NOTE: if any of the binding indices are changed, they must also be changed in
// the corresponding shaders and the BindGroupLayout generation functions.
fn bind_group_layout_descriptors() -> Vec<wgpu::BindGroupLayoutDescriptor<'static>> {
vec![
// group 0
wgpu::BindGroupLayoutDescriptor {
label: Some("particle bind group layout"),
entries: BIND_GROUP_LAYOUT_ENTRIES,
},
]
}
fn primitive_state() -> wgpu::PrimitiveState {
WorldPipelineBase::primitive_state()
}
fn color_target_states() -> Vec<wgpu::ColorTargetState> {
WorldPipelineBase::color_target_states()
}
fn depth_stencil_state() -> Option<wgpu::DepthStencilState> {
let mut desc = WorldPipelineBase::depth_stencil_state().unwrap();
desc.depth_write_enabled = false;
Some(desc)
}
// NOTE: if the vertex format is changed, this descriptor must also be changed accordingly.
fn vertex_buffer_layouts() -> Vec<wgpu::VertexBufferLayout<'static>> {
vec![wgpu::VertexBufferLayout {
array_stride: size_of::<ParticleVertex>() as u64,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &VERTEX_ATTRIBUTES[0],
}]
}
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct ParticleVertex {
position: [f32; 3],
texcoord: [f32; 2],
}
pub const VERTICES: [ParticleVertex; 6] = [
ParticleVertex {
position: [-1.0, -1.0, 0.0],
texcoord: [0.0, 1.0],
},
ParticleVertex {
position: [-1.0, 1.0, 0.0],
texcoord: [0.0, 0.0],
},
ParticleVertex {
position: [1.0, 1.0, 0.0],
texcoord: [1.0, 0.0],
},
ParticleVertex {
position: [-1.0, -1.0, 0.0],
texcoord: [0.0, 1.0],
},
ParticleVertex {
position: [1.0, 1.0, 0.0],
texcoord: [1.0, 0.0],
},
ParticleVertex {
position: [1.0, -1.0, 0.0],
texcoord: [1.0, 1.0],
},
];
#[repr(C)]
pub struct ParticleInstance {
color: u32,
}
| {
use PushConstantUpdate::*;
pass.set_pipeline(self.pipeline());
pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
pass.set_bind_group(0, &self.bind_group, &[]);
// face toward camera
let Angles { pitch, yaw, roll } = camera.angles();
let rotation = Angles {
pitch: -pitch,
yaw: -yaw,
roll: -roll,
}
.mat4_wgpu();
for particle in particles {
let q_origin = particle.origin();
let translation =
Matrix4::from_translation([-q_origin.y, q_origin.z, -q_origin.x].into()); | identifier_body |
main.rs | fmt::{Display, Formatter, Result as FmtResult};
use std::fs::File;
use std::io::prelude::*;
use std::io::BufReader;
use std::str::FromStr;
use anyhow::{anyhow, Error as AnyErr, Result as AnyResult};
use clap::{App, Arg};
use log::debug;
use slotmap::SlotMap;
use aoc::grid::{Compass, Position, Turn};
use aoc::intcomp::{IntComp, OutputVec, Stopped};
#[derive(Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub struct Room {
pub name: String,
pub message: String,
pub items: BTreeSet<String>,
pub directions: Vec<Compass>,
}
impl Display for Room {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
write!(f, "Room[{}, doors=", self.name)?;
if!self.directions.is_empty() {
write!(f, "doors=")?;
for &d in &self.directions {
write!(f, "{}", d)?;
}
write!(f, ", ")?;
}
for (ix, item) in self.items.iter().enumerate() {
if ix == 0 {
write!(f, "items={}", item)?;
} else {
write!(f, ",{}", item)?;
}
if ix == self.items.len() - 1 {
write!(f, ", ")?;
}
}
write!(f, "message='{}']", self.message)?;
Ok(())
}
}
fn str_to_compass(s: &str) -> AnyResult<Compass> {
Ok(match s {
"north" => Compass::North,
"south" => Compass::South,
"east" => Compass::East,
"west" => Compass::West,
_ => return Err(anyhow!("'{s}' is not a compoass direction")),
})
}
#[derive(Debug, thiserror::Error)]
#[error("Ejection: {}", _0)]
struct Ejection(String);
impl FromStr for Room {
type Err = AnyErr;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut lines = s.lines();
let mut first = lines.next().ok_or_else(|| anyhow!("No first line"))?.trim();
while first.is_empty() {
first = lines
.next()
.ok_or_else(|| anyhow!("No non-empty first line"))?
.trim();
}
assert!(first.starts_with("== "));
assert!(first.ends_with(" =="));
let name = first
.trim_end_matches(" ==")
.trim_start_matches("== ")
.to_owned();
let message = lines
.next()
.ok_or_else(|| anyhow!("No second line"))?
.trim()
.to_owned();
assert!(!message.is_empty(), "Expected non-empty message");
let next = lines.next().ok_or_else(|| anyhow!("No third line"))?.trim();
assert!(next.is_empty(), "Expected third line to be empty");
let next = lines
.next()
.ok_or_else(|| anyhow!("No fourth line"))?
.trim();
assert!(
next == "Doors here lead:",
"Expected third line to be 'Doors here lead:'"
);
let mut directions = Vec::new();
let mut next = lines
.next()
.ok_or_else(|| anyhow!("No line after doors"))?
.trim();
while next.starts_with("- ") {
let dir = next.trim_start_matches("- ");
let dir = str_to_compass(dir)?;
directions.push(dir);
next = lines
.next()
.ok_or_else(|| anyhow!("No line after directions"))?
.trim();
}
assert!(
next.is_empty(),
"Expected line after directions to be empty"
);
next = lines
.next()
.ok_or_else(|| anyhow!("No line after directions + empty"))?
.trim();
let mut items = BTreeSet::new();
if next == "Items here:" {
next = lines
.next()
.ok_or_else(|| anyhow!("No line after items"))?
.trim();
while next.starts_with("- ") {
let item = next.trim_start_matches("- ");
items.insert(item.to_owned());
next = lines
.next() | .ok_or_else(|| anyhow!("No line after items"))?
.trim();
}
assert!(
next.is_empty(),
"Expected line after items to be empty, got '{}'",
next
);
next = lines
.next()
.ok_or_else(|| anyhow!("No line after items + empty"))?
.trim();
}
if next.contains("Alert!") {
return Err(Ejection(next.into()).into());
}
assert_eq!(
next, "Command?",
"Expected line after items to be 'Command?'"
);
assert!(lines.next().is_none());
Ok(Room {
name,
message,
items,
directions,
})
}
}
#[derive(Debug, Clone)]
pub struct Explorer {
comp: IntComp,
room: Key,
direction: Compass,
carrying: BTreeSet<String>,
map: Map,
}
impl Explorer {
fn new(mut comp: IntComp) -> AnyResult<Self> {
let mut output = OutputVec::new();
comp.run_to_input(&mut output)?;
let out = output.as_string()?;
let room = Room::from_str(&out)?;
let mut map: Map = Default::default();
let key = map.add_room(room);
let exp = Explorer {
comp,
room: key,
direction: Compass::North,
carrying: Default::default(),
map,
};
Ok(exp)
}
fn see_room(&self) -> &Room {
self.map.get(self.room)
}
fn process_input_str(&mut self, output: &mut OutputVec, input: &str) -> anyhow::Result<String> {
log::debug!("Process 1: '{}'", input);
self.comp
.process_ascii(input, output)?
.expect(Stopped::Input)?;
log::debug!("Process 2: '\\n'");
self.comp
.process_ascii("\n", output)?
.expect(Stopped::Input)?;
log::debug!("Processed: '\\n'");
Ok(output.as_string()?)
}
fn process_str(&mut self, input: &str) -> anyhow::Result<String> {
let mut out = OutputVec::new();
match self.process_input_str(&mut out, input) {
Ok(v) => Ok(v),
Err(e) => {
let output = out.as_string()?;
log::warn!("process_str failure on input {}, output: {}", input, output);
Err(e)
}
}
}
// fn north(&mut self) -> anyhow::Result<String> {
// self.process_str("north")
// }
// fn south(&mut self) -> anyhow::Result<String> {
// self.process_str("south")
// }
// fn east(&mut self) -> anyhow::Result<String> {
// self.process_str("east")
// }
// fn west(&mut self) -> anyhow::Result<String> {
// self.process_str("west")
// }
pub fn step(&mut self, direction: Compass) -> anyhow::Result<()> {
let input = match direction {
Compass::East => "east",
Compass::North => "north",
Compass::South => "south",
Compass::West => "west",
};
log::debug!("Taking step {}", input);
let output = self.process_str(input)?;
log::debug!("Took step:\n{}\n", output);
let room = Room::from_str(&output)?;
let new = self.map.add_room(room);
self.map.add_door(self.room, direction, new);
self.room = new;
self.direction = direction;
Ok(())
}
pub fn take(&mut self, item: &str) -> anyhow::Result<String> {
log::debug!("Taking {}", item);
let mut s = String::from("take ");
s.push_str(item);
let result = self.process_str(&s)?;
let new = self.carrying.insert(item.to_string());
let room = self.map.rooms.get_mut(self.room).unwrap();
room.items.remove(item);
assert!(new, "Expected to add {}", item);
log::debug!(" took {}", item);
Ok(result)
}
pub fn drop(&mut self, item: &str) -> anyhow::Result<String> {
let found = self.carrying.remove(item);
assert!(!found, "Expected to drop {}", item);
let mut s = String::from("drop ");
s.push_str(item);
self.process_str(&s)
}
pub fn inventory(&mut self) -> anyhow::Result<String> {
self.process_str("inv")
}
fn left_wall_step(&mut self) -> AnyResult<()> {
let mut dir = self.direction + Turn::Left;
for _ in 0..4 {
log::debug!("Checking {} -> {}", self.direction, dir);
if self.see_room().directions.contains(&dir) {
break;
}
dir = dir + Turn::Right;
}
assert!(self.see_room().directions.contains(&dir));
self.step(dir)?;
log::debug!("Stepped {}, {}", dir, self.see_room().name);
Ok(())
}
fn explore_and_take(&mut self, items: &BTreeSet<String>) -> AnyResult<()> {
let start = self.room;
let mut start_directions = self.see_room().directions.clone();
start_directions.reverse();
loop {
let overlap: BTreeSet<String> = items
.intersection(&self.see_room().items)
.map(|s| s.to_owned())
.collect();
for item in overlap {
let _output = self.take(&item)?;
// println!("Took {}, output: {}", item, output.trim());
}
if self.see_room().name == "Security Checkpoint" {
// println!("inv: {}", self.inventory()?);
log::info!("Turning around at security checkpoint");
self.step(self.direction + Turn::Reverse)?;
continue;
}
if self.room == start {
let dir = match start_directions.pop() {
None => return Ok(()),
Some(d) => d,
};
self.step(dir)?;
continue;
}
self.left_wall_step()?;
}
}
pub fn goto(&mut self, room: &str) -> AnyResult<()> {
loop {
if self.see_room().name == room {
return Ok(());
}
self.left_wall_step()?;
}
}
}
type Key = slotmap::DefaultKey;
#[derive(Default, Debug, Clone)]
pub struct Map {
rooms_by_name: HashMap<String, Key>,
rooms: SlotMap<Key, Room>,
doors: HashMap<Key, BTreeMap<Compass, Key>>,
unvisited: HashMap<Key, BTreeSet<Compass>>,
}
impl Map {
fn add_room(&mut self, room: Room) -> Key {
if let Some(&key) = self.rooms_by_name.get(&room.name) {
return key;
}
let name = room.name.clone();
let directions = room.directions.clone();
let key = self.rooms.insert(room);
self.rooms_by_name.insert(name, key);
let unvisited = self.unvisited.insert(key, Default::default());
assert!(unvisited.is_none());
let unvisited = self.unvisited.get_mut(&key).unwrap();
for dir in directions {
unvisited.insert(dir);
}
key
}
fn visit(&mut self, room: Key, direction: Compass) {
if let Occupied(mut o) = self.unvisited.entry(room) {
o.get_mut().remove(&direction);
if o.get().is_empty() {
o.remove();
}
}
}
fn add_door(&mut self, first: Key, direction: Compass, second: Key) {
self.doors
.entry(first)
.or_default()
.insert(direction, second);
self.doors
.entry(second)
.or_default()
.insert(direction + Turn::Reverse, first);
self.visit(first, direction);
self.visit(second, direction + Turn::Reverse);
}
pub fn len(&self) -> usize {
self.rooms.len()
}
pub fn is_empty(&self) -> bool {
self.rooms.is_empty()
}
pub fn contains(&self, room: &Room) -> bool {
self.rooms_by_name.contains_key(&room.name)
}
fn get(&self, key: Key) -> &Room {
self.rooms.get(key).unwrap()
}
#[allow(dead_code)]
fn to_coords(&self, origin: Option<Key>) -> HashMap<Position, Key> {
let start = match (origin, self.rooms.iter().next()) {
(Some(k), _) => k,
(None, None) => return Default::default(),
(None, Some((k, _r))) => k,
};
let mut queue = vec![(Position(0, 0), start)];
let mut seen = HashSet::new();
let mut coords = HashMap::new();
while let Some((pos, r)) = queue.pop() {
match coords.entry(pos) {
Occupied(o) => {
assert!(seen.contains(&r));
assert!(*o.get() == r);
}
Vacant(v) => {
assert!(!seen.contains(&r));
seen.insert(r);
v.insert(r);
let neighbors = self.doors.get(&r).unwrap();
for (&d, &r) in neighbors {
queue.push((pos + d, r));
}
}
}
}
coords
}
}
/*
NV
||
SB KT=GW=PS
|| ||
CQ=HD
||
|| OB=ST
|| ||
SG HB=EG=WD=AR=SL
|| ||
HW=HC=CO
||
SC
AR: Arcade
CO: Corridor
CQ: Crew Quarters
EG: Engineering
GW: Gift Wrapping Center
HB: Hull Breach
HC: Hot Chocolate Fountain
HD: Holodeck
HW: Hallway
KT: Kitchen
NV: Navigation
OB: Observatory
SB: Sick Bay
SC: Security Checkpoint
SG: Storage
SL: Science Lab
ST: Stables
WD: Warp Drive Maintenance
*/
fn try_item_combos(initial_explorer: Explorer, items: Vec<String>) -> AnyResult<Explorer> {
let total = 1 << items.len();
for n in 0..total {
let mut explorer = initial_explorer.clone();
let cur_items: BTreeSet<String> = items
.iter()
.enumerate()
.filter_map(|(i, item)| {
if (n & (1 << i)) == 0 {
None
} else {
Some(item.clone())
}
})
.collect();
log::info!("Items: {:?}", cur_items);
explorer.explore_and_take(&cur_items)?;
assert_eq!(explorer.carrying, cur_items);
explorer.goto("Security Checkpoint")?;
let err = match explorer.left_wall_step() {
Ok(()) => return Ok(explorer),
Err(e) => e,
};
match err.downcast::<Ejection>() {
Ok(e) => log::info!(" {}", e),
Err(e) => return Err(e),
}
}
Err(anyhow::anyhow!("Got to end, found nothing!"))
}
#[allow(dead_code)]
fn explore_around(explorer: &mut Explorer) -> AnyResult<()> {
explorer.explore_and_take(&Default::default())?;
println!(
"Visited, back to start. Unvisited: {} Visited {} rooms with {} doors",
explorer.map.unvisited.len(),
explorer.map.rooms.len(),
explorer.map.doors.len()
);
println!("Items:");
for (_, room) in &explorer.map.rooms {
for item in &room.items {
println!(" - {}: {}", room.name, item);
}
}
println!("\nDoors:");
for (&ra, doors) in &explorer.map.doors {
for (dir, &rb) in doors {
let ra = explorer.map.rooms.get(ra).unwrap();
let rb = explorer.map.rooms.get(rb).unwrap();
println!(" {}: {} -> {}", dir, ra.name, rb.name);
}
}
Ok(())
}
fn main() -> anyhow::Result<()> {
env_logger::init();
let matches = App::new("Day 25")
.arg(
Arg::with_name("input")
.short("i")
.long("input")
.value_name("INPUT")
.takes_value(true),
)
.get_matches();
let input_path = matches.value_of("INPUT").unwrap_or("inputs/day25.txt");
debug!("Using input {}", input_path);
let file = File::open(input_path)?;
let buf_reader = BufReader::new(file);
let line: String = buf_reader
.lines()
.next()
.ok_or_else(|| anyhow::format_err!("No line found"))??;
let cp: IntComp = str::parse(&line)?;
let initial_explorer = Explorer::new(cp)?;
let all_items = vec![
// "food ration".to_owned(),
"candy cane".to_owned(),
"mouse".to_owned(),
// "mug".to_owned(),
"coin".to_owned(),
// "ornament".to_owned(),
"semiconductor".to_owned(),
// "mutex".to_owned(),
];
try_item_combos(initial_explorer, all_items)?;
Ok(())
}
#[cfg(test)] | random_line_split |
|
main.rs | ::{Display, Formatter, Result as FmtResult};
use std::fs::File;
use std::io::prelude::*;
use std::io::BufReader;
use std::str::FromStr;
use anyhow::{anyhow, Error as AnyErr, Result as AnyResult};
use clap::{App, Arg};
use log::debug;
use slotmap::SlotMap;
use aoc::grid::{Compass, Position, Turn};
use aoc::intcomp::{IntComp, OutputVec, Stopped};
#[derive(Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub struct Room {
pub name: String,
pub message: String,
pub items: BTreeSet<String>,
pub directions: Vec<Compass>,
}
impl Display for Room {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
write!(f, "Room[{}, doors=", self.name)?;
if!self.directions.is_empty() {
write!(f, "doors=")?;
for &d in &self.directions {
write!(f, "{}", d)?;
}
write!(f, ", ")?;
}
for (ix, item) in self.items.iter().enumerate() {
if ix == 0 {
write!(f, "items={}", item)?;
} else {
write!(f, ",{}", item)?;
}
if ix == self.items.len() - 1 {
write!(f, ", ")?;
}
}
write!(f, "message='{}']", self.message)?;
Ok(())
}
}
fn str_to_compass(s: &str) -> AnyResult<Compass> {
Ok(match s {
"north" => Compass::North,
"south" => Compass::South,
"east" => Compass::East,
"west" => Compass::West,
_ => return Err(anyhow!("'{s}' is not a compoass direction")),
})
}
#[derive(Debug, thiserror::Error)]
#[error("Ejection: {}", _0)]
struct Ejection(String);
impl FromStr for Room {
type Err = AnyErr;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut lines = s.lines();
let mut first = lines.next().ok_or_else(|| anyhow!("No first line"))?.trim();
while first.is_empty() {
first = lines
.next()
.ok_or_else(|| anyhow!("No non-empty first line"))?
.trim();
}
assert!(first.starts_with("== "));
assert!(first.ends_with(" =="));
let name = first
.trim_end_matches(" ==")
.trim_start_matches("== ")
.to_owned();
let message = lines
.next()
.ok_or_else(|| anyhow!("No second line"))?
.trim()
.to_owned();
assert!(!message.is_empty(), "Expected non-empty message");
let next = lines.next().ok_or_else(|| anyhow!("No third line"))?.trim();
assert!(next.is_empty(), "Expected third line to be empty");
let next = lines
.next()
.ok_or_else(|| anyhow!("No fourth line"))?
.trim();
assert!(
next == "Doors here lead:",
"Expected third line to be 'Doors here lead:'"
);
let mut directions = Vec::new();
let mut next = lines
.next()
.ok_or_else(|| anyhow!("No line after doors"))?
.trim();
while next.starts_with("- ") {
let dir = next.trim_start_matches("- ");
let dir = str_to_compass(dir)?;
directions.push(dir);
next = lines
.next()
.ok_or_else(|| anyhow!("No line after directions"))?
.trim();
}
assert!(
next.is_empty(),
"Expected line after directions to be empty"
);
next = lines
.next()
.ok_or_else(|| anyhow!("No line after directions + empty"))?
.trim();
let mut items = BTreeSet::new();
if next == "Items here:" {
next = lines
.next()
.ok_or_else(|| anyhow!("No line after items"))?
.trim();
while next.starts_with("- ") {
let item = next.trim_start_matches("- ");
items.insert(item.to_owned());
next = lines
.next()
.ok_or_else(|| anyhow!("No line after items"))?
.trim();
}
assert!(
next.is_empty(),
"Expected line after items to be empty, got '{}'",
next
);
next = lines
.next()
.ok_or_else(|| anyhow!("No line after items + empty"))?
.trim();
}
if next.contains("Alert!") {
return Err(Ejection(next.into()).into());
}
assert_eq!(
next, "Command?",
"Expected line after items to be 'Command?'"
);
assert!(lines.next().is_none());
Ok(Room {
name,
message,
items,
directions,
})
}
}
#[derive(Debug, Clone)]
pub struct Explorer {
comp: IntComp,
room: Key,
direction: Compass,
carrying: BTreeSet<String>,
map: Map,
}
impl Explorer {
fn new(mut comp: IntComp) -> AnyResult<Self> {
let mut output = OutputVec::new();
comp.run_to_input(&mut output)?;
let out = output.as_string()?;
let room = Room::from_str(&out)?;
let mut map: Map = Default::default();
let key = map.add_room(room);
let exp = Explorer {
comp,
room: key,
direction: Compass::North,
carrying: Default::default(),
map,
};
Ok(exp)
}
fn see_room(&self) -> &Room {
self.map.get(self.room)
}
fn process_input_str(&mut self, output: &mut OutputVec, input: &str) -> anyhow::Result<String> {
log::debug!("Process 1: '{}'", input);
self.comp
.process_ascii(input, output)?
.expect(Stopped::Input)?;
log::debug!("Process 2: '\\n'");
self.comp
.process_ascii("\n", output)?
.expect(Stopped::Input)?;
log::debug!("Processed: '\\n'");
Ok(output.as_string()?)
}
fn process_str(&mut self, input: &str) -> anyhow::Result<String> {
let mut out = OutputVec::new();
match self.process_input_str(&mut out, input) {
Ok(v) => Ok(v),
Err(e) => {
let output = out.as_string()?;
log::warn!("process_str failure on input {}, output: {}", input, output);
Err(e)
}
}
}
// fn north(&mut self) -> anyhow::Result<String> {
// self.process_str("north")
// }
// fn south(&mut self) -> anyhow::Result<String> {
// self.process_str("south")
// }
// fn east(&mut self) -> anyhow::Result<String> {
// self.process_str("east")
// }
// fn west(&mut self) -> anyhow::Result<String> {
// self.process_str("west")
// }
pub fn step(&mut self, direction: Compass) -> anyhow::Result<()> {
let input = match direction {
Compass::East => "east",
Compass::North => "north",
Compass::South => "south",
Compass::West => "west",
};
log::debug!("Taking step {}", input);
let output = self.process_str(input)?;
log::debug!("Took step:\n{}\n", output);
let room = Room::from_str(&output)?;
let new = self.map.add_room(room);
self.map.add_door(self.room, direction, new);
self.room = new;
self.direction = direction;
Ok(())
}
pub fn take(&mut self, item: &str) -> anyhow::Result<String> {
log::debug!("Taking {}", item);
let mut s = String::from("take ");
s.push_str(item);
let result = self.process_str(&s)?;
let new = self.carrying.insert(item.to_string());
let room = self.map.rooms.get_mut(self.room).unwrap();
room.items.remove(item);
assert!(new, "Expected to add {}", item);
log::debug!(" took {}", item);
Ok(result)
}
pub fn drop(&mut self, item: &str) -> anyhow::Result<String> {
let found = self.carrying.remove(item);
assert!(!found, "Expected to drop {}", item);
let mut s = String::from("drop ");
s.push_str(item);
self.process_str(&s)
}
pub fn inventory(&mut self) -> anyhow::Result<String> {
self.process_str("inv")
}
fn left_wall_step(&mut self) -> AnyResult<()> {
let mut dir = self.direction + Turn::Left;
for _ in 0..4 {
log::debug!("Checking {} -> {}", self.direction, dir);
if self.see_room().directions.contains(&dir) {
break;
}
dir = dir + Turn::Right;
}
assert!(self.see_room().directions.contains(&dir));
self.step(dir)?;
log::debug!("Stepped {}, {}", dir, self.see_room().name);
Ok(())
}
fn explore_and_take(&mut self, items: &BTreeSet<String>) -> AnyResult<()> {
let start = self.room;
let mut start_directions = self.see_room().directions.clone();
start_directions.reverse();
loop {
let overlap: BTreeSet<String> = items
.intersection(&self.see_room().items)
.map(|s| s.to_owned())
.collect();
for item in overlap {
let _output = self.take(&item)?;
// println!("Took {}, output: {}", item, output.trim());
}
if self.see_room().name == "Security Checkpoint" {
// println!("inv: {}", self.inventory()?);
log::info!("Turning around at security checkpoint");
self.step(self.direction + Turn::Reverse)?;
continue;
}
if self.room == start {
let dir = match start_directions.pop() {
None => return Ok(()),
Some(d) => d,
};
self.step(dir)?;
continue;
}
self.left_wall_step()?;
}
}
pub fn goto(&mut self, room: &str) -> AnyResult<()> {
loop {
if self.see_room().name == room {
return Ok(());
}
self.left_wall_step()?;
}
}
}
type Key = slotmap::DefaultKey;
#[derive(Default, Debug, Clone)]
pub struct Map {
rooms_by_name: HashMap<String, Key>,
rooms: SlotMap<Key, Room>,
doors: HashMap<Key, BTreeMap<Compass, Key>>,
unvisited: HashMap<Key, BTreeSet<Compass>>,
}
impl Map {
fn add_room(&mut self, room: Room) -> Key {
if let Some(&key) = self.rooms_by_name.get(&room.name) {
return key;
}
let name = room.name.clone();
let directions = room.directions.clone();
let key = self.rooms.insert(room);
self.rooms_by_name.insert(name, key);
let unvisited = self.unvisited.insert(key, Default::default());
assert!(unvisited.is_none());
let unvisited = self.unvisited.get_mut(&key).unwrap();
for dir in directions {
unvisited.insert(dir);
}
key
}
fn visit(&mut self, room: Key, direction: Compass) {
if let Occupied(mut o) = self.unvisited.entry(room) {
o.get_mut().remove(&direction);
if o.get().is_empty() {
o.remove();
}
}
}
fn add_door(&mut self, first: Key, direction: Compass, second: Key) {
self.doors
.entry(first)
.or_default()
.insert(direction, second);
self.doors
.entry(second)
.or_default()
.insert(direction + Turn::Reverse, first);
self.visit(first, direction);
self.visit(second, direction + Turn::Reverse);
}
pub fn len(&self) -> usize {
self.rooms.len()
}
pub fn is_empty(&self) -> bool {
self.rooms.is_empty()
}
pub fn contains(&self, room: &Room) -> bool {
self.rooms_by_name.contains_key(&room.name)
}
fn get(&self, key: Key) -> &Room {
self.rooms.get(key).unwrap()
}
#[allow(dead_code)]
fn to_coords(&self, origin: Option<Key>) -> HashMap<Position, Key> {
let start = match (origin, self.rooms.iter().next()) {
(Some(k), _) => k,
(None, None) => return Default::default(),
(None, Some((k, _r))) => k,
};
let mut queue = vec![(Position(0, 0), start)];
let mut seen = HashSet::new();
let mut coords = HashMap::new();
while let Some((pos, r)) = queue.pop() {
match coords.entry(pos) {
Occupied(o) => {
assert!(seen.contains(&r));
assert!(*o.get() == r);
}
Vacant(v) => {
assert!(!seen.contains(&r));
seen.insert(r);
v.insert(r);
let neighbors = self.doors.get(&r).unwrap();
for (&d, &r) in neighbors {
queue.push((pos + d, r));
}
}
}
}
coords
}
}
/*
NV
||
SB KT=GW=PS
|| ||
CQ=HD
||
|| OB=ST
|| ||
SG HB=EG=WD=AR=SL
|| ||
HW=HC=CO
||
SC
AR: Arcade
CO: Corridor
CQ: Crew Quarters
EG: Engineering
GW: Gift Wrapping Center
HB: Hull Breach
HC: Hot Chocolate Fountain
HD: Holodeck
HW: Hallway
KT: Kitchen
NV: Navigation
OB: Observatory
SB: Sick Bay
SC: Security Checkpoint
SG: Storage
SL: Science Lab
ST: Stables
WD: Warp Drive Maintenance
*/
fn try_item_combos(initial_explorer: Explorer, items: Vec<String>) -> AnyResult<Explorer> | explorer.goto("Security Checkpoint")?;
let err = match explorer.left_wall_step() {
Ok(()) => return Ok(explorer),
Err(e) => e,
};
match err.downcast::<Ejection>() {
Ok(e) => log::info!(" {}", e),
Err(e) => return Err(e),
}
}
Err(anyhow::anyhow!("Got to end, found nothing!"))
}
#[allow(dead_code)]
fn explore_around(explorer: &mut Explorer) -> AnyResult<()> {
explorer.explore_and_take(&Default::default())?;
println!(
"Visited, back to start. Unvisited: {} Visited {} rooms with {} doors",
explorer.map.unvisited.len(),
explorer.map.rooms.len(),
explorer.map.doors.len()
);
println!("Items:");
for (_, room) in &explorer.map.rooms {
for item in &room.items {
println!(" - {}: {}", room.name, item);
}
}
println!("\nDoors:");
for (&ra, doors) in &explorer.map.doors {
for (dir, &rb) in doors {
let ra = explorer.map.rooms.get(ra).unwrap();
let rb = explorer.map.rooms.get(rb).unwrap();
println!(" {}: {} -> {}", dir, ra.name, rb.name);
}
}
Ok(())
}
fn main() -> anyhow::Result<()> {
env_logger::init();
let matches = App::new("Day 25")
.arg(
Arg::with_name("input")
.short("i")
.long("input")
.value_name("INPUT")
.takes_value(true),
)
.get_matches();
let input_path = matches.value_of("INPUT").unwrap_or("inputs/day25.txt");
debug!("Using input {}", input_path);
let file = File::open(input_path)?;
let buf_reader = BufReader::new(file);
let line: String = buf_reader
.lines()
.next()
.ok_or_else(|| anyhow::format_err!("No line found"))??;
let cp: IntComp = str::parse(&line)?;
let initial_explorer = Explorer::new(cp)?;
let all_items = vec![
// "food ration".to_owned(),
"candy cane".to_owned(),
"mouse".to_owned(),
// "mug".to_owned(),
"coin".to_owned(),
// "ornament".to_owned(),
"semiconductor".to_owned(),
// "mutex".to_owned(),
];
try_item_combos(initial_explorer, all_items)?;
Ok(())
}
#[cfg(test | {
let total = 1 << items.len();
for n in 0..total {
let mut explorer = initial_explorer.clone();
let cur_items: BTreeSet<String> = items
.iter()
.enumerate()
.filter_map(|(i, item)| {
if (n & (1 << i)) == 0 {
None
} else {
Some(item.clone())
}
})
.collect();
log::info!("Items: {:?}", cur_items);
explorer.explore_and_take(&cur_items)?;
assert_eq!(explorer.carrying, cur_items); | identifier_body |
main.rs | Display, Formatter, Result as FmtResult};
use std::fs::File;
use std::io::prelude::*;
use std::io::BufReader;
use std::str::FromStr;
use anyhow::{anyhow, Error as AnyErr, Result as AnyResult};
use clap::{App, Arg};
use log::debug;
use slotmap::SlotMap;
use aoc::grid::{Compass, Position, Turn};
use aoc::intcomp::{IntComp, OutputVec, Stopped};
#[derive(Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub struct Room {
pub name: String,
pub message: String,
pub items: BTreeSet<String>,
pub directions: Vec<Compass>,
}
impl Display for Room {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
write!(f, "Room[{}, doors=", self.name)?;
if!self.directions.is_empty() {
write!(f, "doors=")?;
for &d in &self.directions {
write!(f, "{}", d)?;
}
write!(f, ", ")?;
}
for (ix, item) in self.items.iter().enumerate() {
if ix == 0 {
write!(f, "items={}", item)?;
} else {
write!(f, ",{}", item)?;
}
if ix == self.items.len() - 1 {
write!(f, ", ")?;
}
}
write!(f, "message='{}']", self.message)?;
Ok(())
}
}
fn str_to_compass(s: &str) -> AnyResult<Compass> {
Ok(match s {
"north" => Compass::North,
"south" => Compass::South,
"east" => Compass::East,
"west" => Compass::West,
_ => return Err(anyhow!("'{s}' is not a compoass direction")),
})
}
#[derive(Debug, thiserror::Error)]
#[error("Ejection: {}", _0)]
struct Ejection(String);
impl FromStr for Room {
type Err = AnyErr;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut lines = s.lines();
let mut first = lines.next().ok_or_else(|| anyhow!("No first line"))?.trim();
while first.is_empty() {
first = lines
.next()
.ok_or_else(|| anyhow!("No non-empty first line"))?
.trim();
}
assert!(first.starts_with("== "));
assert!(first.ends_with(" =="));
let name = first
.trim_end_matches(" ==")
.trim_start_matches("== ")
.to_owned();
let message = lines
.next()
.ok_or_else(|| anyhow!("No second line"))?
.trim()
.to_owned();
assert!(!message.is_empty(), "Expected non-empty message");
let next = lines.next().ok_or_else(|| anyhow!("No third line"))?.trim();
assert!(next.is_empty(), "Expected third line to be empty");
let next = lines
.next()
.ok_or_else(|| anyhow!("No fourth line"))?
.trim();
assert!(
next == "Doors here lead:",
"Expected third line to be 'Doors here lead:'"
);
let mut directions = Vec::new();
let mut next = lines
.next()
.ok_or_else(|| anyhow!("No line after doors"))?
.trim();
while next.starts_with("- ") {
let dir = next.trim_start_matches("- ");
let dir = str_to_compass(dir)?;
directions.push(dir);
next = lines
.next()
.ok_or_else(|| anyhow!("No line after directions"))?
.trim();
}
assert!(
next.is_empty(),
"Expected line after directions to be empty"
);
next = lines
.next()
.ok_or_else(|| anyhow!("No line after directions + empty"))?
.trim();
let mut items = BTreeSet::new();
if next == "Items here:" {
next = lines
.next()
.ok_or_else(|| anyhow!("No line after items"))?
.trim();
while next.starts_with("- ") {
let item = next.trim_start_matches("- ");
items.insert(item.to_owned());
next = lines
.next()
.ok_or_else(|| anyhow!("No line after items"))?
.trim();
}
assert!(
next.is_empty(),
"Expected line after items to be empty, got '{}'",
next
);
next = lines
.next()
.ok_or_else(|| anyhow!("No line after items + empty"))?
.trim();
}
if next.contains("Alert!") {
return Err(Ejection(next.into()).into());
}
assert_eq!(
next, "Command?",
"Expected line after items to be 'Command?'"
);
assert!(lines.next().is_none());
Ok(Room {
name,
message,
items,
directions,
})
}
}
#[derive(Debug, Clone)]
pub struct Explorer {
comp: IntComp,
room: Key,
direction: Compass,
carrying: BTreeSet<String>,
map: Map,
}
impl Explorer {
fn new(mut comp: IntComp) -> AnyResult<Self> {
let mut output = OutputVec::new();
comp.run_to_input(&mut output)?;
let out = output.as_string()?;
let room = Room::from_str(&out)?;
let mut map: Map = Default::default();
let key = map.add_room(room);
let exp = Explorer {
comp,
room: key,
direction: Compass::North,
carrying: Default::default(),
map,
};
Ok(exp)
}
fn see_room(&self) -> &Room {
self.map.get(self.room)
}
fn process_input_str(&mut self, output: &mut OutputVec, input: &str) -> anyhow::Result<String> {
log::debug!("Process 1: '{}'", input);
self.comp
.process_ascii(input, output)?
.expect(Stopped::Input)?;
log::debug!("Process 2: '\\n'");
self.comp
.process_ascii("\n", output)?
.expect(Stopped::Input)?;
log::debug!("Processed: '\\n'");
Ok(output.as_string()?)
}
fn process_str(&mut self, input: &str) -> anyhow::Result<String> {
let mut out = OutputVec::new();
match self.process_input_str(&mut out, input) {
Ok(v) => Ok(v),
Err(e) => {
let output = out.as_string()?;
log::warn!("process_str failure on input {}, output: {}", input, output);
Err(e)
}
}
}
// fn north(&mut self) -> anyhow::Result<String> {
// self.process_str("north")
// }
// fn south(&mut self) -> anyhow::Result<String> {
// self.process_str("south")
// }
// fn east(&mut self) -> anyhow::Result<String> {
// self.process_str("east")
// }
// fn west(&mut self) -> anyhow::Result<String> {
// self.process_str("west")
// }
pub fn step(&mut self, direction: Compass) -> anyhow::Result<()> {
let input = match direction {
Compass::East => "east",
Compass::North => "north",
Compass::South => "south",
Compass::West => "west",
};
log::debug!("Taking step {}", input);
let output = self.process_str(input)?;
log::debug!("Took step:\n{}\n", output);
let room = Room::from_str(&output)?;
let new = self.map.add_room(room);
self.map.add_door(self.room, direction, new);
self.room = new;
self.direction = direction;
Ok(())
}
pub fn take(&mut self, item: &str) -> anyhow::Result<String> {
log::debug!("Taking {}", item);
let mut s = String::from("take ");
s.push_str(item);
let result = self.process_str(&s)?;
let new = self.carrying.insert(item.to_string());
let room = self.map.rooms.get_mut(self.room).unwrap();
room.items.remove(item);
assert!(new, "Expected to add {}", item);
log::debug!(" took {}", item);
Ok(result)
}
pub fn drop(&mut self, item: &str) -> anyhow::Result<String> {
let found = self.carrying.remove(item);
assert!(!found, "Expected to drop {}", item);
let mut s = String::from("drop ");
s.push_str(item);
self.process_str(&s)
}
pub fn inventory(&mut self) -> anyhow::Result<String> {
self.process_str("inv")
}
fn left_wall_step(&mut self) -> AnyResult<()> {
let mut dir = self.direction + Turn::Left;
for _ in 0..4 {
log::debug!("Checking {} -> {}", self.direction, dir);
if self.see_room().directions.contains(&dir) {
break;
}
dir = dir + Turn::Right;
}
assert!(self.see_room().directions.contains(&dir));
self.step(dir)?;
log::debug!("Stepped {}, {}", dir, self.see_room().name);
Ok(())
}
fn explore_and_take(&mut self, items: &BTreeSet<String>) -> AnyResult<()> {
let start = self.room;
let mut start_directions = self.see_room().directions.clone();
start_directions.reverse();
loop {
let overlap: BTreeSet<String> = items
.intersection(&self.see_room().items)
.map(|s| s.to_owned())
.collect();
for item in overlap {
let _output = self.take(&item)?;
// println!("Took {}, output: {}", item, output.trim());
}
if self.see_room().name == "Security Checkpoint" {
// println!("inv: {}", self.inventory()?);
log::info!("Turning around at security checkpoint");
self.step(self.direction + Turn::Reverse)?;
continue;
}
if self.room == start {
let dir = match start_directions.pop() {
None => return Ok(()),
Some(d) => d,
};
self.step(dir)?;
continue;
}
self.left_wall_step()?;
}
}
pub fn goto(&mut self, room: &str) -> AnyResult<()> {
loop {
if self.see_room().name == room {
return Ok(());
}
self.left_wall_step()?;
}
}
}
type Key = slotmap::DefaultKey;
#[derive(Default, Debug, Clone)]
pub struct Map {
rooms_by_name: HashMap<String, Key>,
rooms: SlotMap<Key, Room>,
doors: HashMap<Key, BTreeMap<Compass, Key>>,
unvisited: HashMap<Key, BTreeSet<Compass>>,
}
impl Map {
fn add_room(&mut self, room: Room) -> Key {
if let Some(&key) = self.rooms_by_name.get(&room.name) {
return key;
}
let name = room.name.clone();
let directions = room.directions.clone();
let key = self.rooms.insert(room);
self.rooms_by_name.insert(name, key);
let unvisited = self.unvisited.insert(key, Default::default());
assert!(unvisited.is_none());
let unvisited = self.unvisited.get_mut(&key).unwrap();
for dir in directions {
unvisited.insert(dir);
}
key
}
fn visit(&mut self, room: Key, direction: Compass) {
if let Occupied(mut o) = self.unvisited.entry(room) {
o.get_mut().remove(&direction);
if o.get().is_empty() {
o.remove();
}
}
}
fn add_door(&mut self, first: Key, direction: Compass, second: Key) {
self.doors
.entry(first)
.or_default()
.insert(direction, second);
self.doors
.entry(second)
.or_default()
.insert(direction + Turn::Reverse, first);
self.visit(first, direction);
self.visit(second, direction + Turn::Reverse);
}
pub fn | (&self) -> usize {
self.rooms.len()
}
pub fn is_empty(&self) -> bool {
self.rooms.is_empty()
}
pub fn contains(&self, room: &Room) -> bool {
self.rooms_by_name.contains_key(&room.name)
}
fn get(&self, key: Key) -> &Room {
self.rooms.get(key).unwrap()
}
#[allow(dead_code)]
fn to_coords(&self, origin: Option<Key>) -> HashMap<Position, Key> {
let start = match (origin, self.rooms.iter().next()) {
(Some(k), _) => k,
(None, None) => return Default::default(),
(None, Some((k, _r))) => k,
};
let mut queue = vec![(Position(0, 0), start)];
let mut seen = HashSet::new();
let mut coords = HashMap::new();
while let Some((pos, r)) = queue.pop() {
match coords.entry(pos) {
Occupied(o) => {
assert!(seen.contains(&r));
assert!(*o.get() == r);
}
Vacant(v) => {
assert!(!seen.contains(&r));
seen.insert(r);
v.insert(r);
let neighbors = self.doors.get(&r).unwrap();
for (&d, &r) in neighbors {
queue.push((pos + d, r));
}
}
}
}
coords
}
}
/*
NV
||
SB KT=GW=PS
|| ||
CQ=HD
||
|| OB=ST
|| ||
SG HB=EG=WD=AR=SL
|| ||
HW=HC=CO
||
SC
AR: Arcade
CO: Corridor
CQ: Crew Quarters
EG: Engineering
GW: Gift Wrapping Center
HB: Hull Breach
HC: Hot Chocolate Fountain
HD: Holodeck
HW: Hallway
KT: Kitchen
NV: Navigation
OB: Observatory
SB: Sick Bay
SC: Security Checkpoint
SG: Storage
SL: Science Lab
ST: Stables
WD: Warp Drive Maintenance
*/
fn try_item_combos(initial_explorer: Explorer, items: Vec<String>) -> AnyResult<Explorer> {
let total = 1 << items.len();
for n in 0..total {
let mut explorer = initial_explorer.clone();
let cur_items: BTreeSet<String> = items
.iter()
.enumerate()
.filter_map(|(i, item)| {
if (n & (1 << i)) == 0 {
None
} else {
Some(item.clone())
}
})
.collect();
log::info!("Items: {:?}", cur_items);
explorer.explore_and_take(&cur_items)?;
assert_eq!(explorer.carrying, cur_items);
explorer.goto("Security Checkpoint")?;
let err = match explorer.left_wall_step() {
Ok(()) => return Ok(explorer),
Err(e) => e,
};
match err.downcast::<Ejection>() {
Ok(e) => log::info!(" {}", e),
Err(e) => return Err(e),
}
}
Err(anyhow::anyhow!("Got to end, found nothing!"))
}
#[allow(dead_code)]
fn explore_around(explorer: &mut Explorer) -> AnyResult<()> {
explorer.explore_and_take(&Default::default())?;
println!(
"Visited, back to start. Unvisited: {} Visited {} rooms with {} doors",
explorer.map.unvisited.len(),
explorer.map.rooms.len(),
explorer.map.doors.len()
);
println!("Items:");
for (_, room) in &explorer.map.rooms {
for item in &room.items {
println!(" - {}: {}", room.name, item);
}
}
println!("\nDoors:");
for (&ra, doors) in &explorer.map.doors {
for (dir, &rb) in doors {
let ra = explorer.map.rooms.get(ra).unwrap();
let rb = explorer.map.rooms.get(rb).unwrap();
println!(" {}: {} -> {}", dir, ra.name, rb.name);
}
}
Ok(())
}
fn main() -> anyhow::Result<()> {
env_logger::init();
let matches = App::new("Day 25")
.arg(
Arg::with_name("input")
.short("i")
.long("input")
.value_name("INPUT")
.takes_value(true),
)
.get_matches();
let input_path = matches.value_of("INPUT").unwrap_or("inputs/day25.txt");
debug!("Using input {}", input_path);
let file = File::open(input_path)?;
let buf_reader = BufReader::new(file);
let line: String = buf_reader
.lines()
.next()
.ok_or_else(|| anyhow::format_err!("No line found"))??;
let cp: IntComp = str::parse(&line)?;
let initial_explorer = Explorer::new(cp)?;
let all_items = vec![
// "food ration".to_owned(),
"candy cane".to_owned(),
"mouse".to_owned(),
// "mug".to_owned(),
"coin".to_owned(),
// "ornament".to_owned(),
"semiconductor".to_owned(),
// "mutex".to_owned(),
];
try_item_combos(initial_explorer, all_items)?;
Ok(())
}
#[cfg(tes | len | identifier_name |
main.rs | ::{Display, Formatter, Result as FmtResult};
use std::fs::File;
use std::io::prelude::*;
use std::io::BufReader;
use std::str::FromStr;
use anyhow::{anyhow, Error as AnyErr, Result as AnyResult};
use clap::{App, Arg};
use log::debug;
use slotmap::SlotMap;
use aoc::grid::{Compass, Position, Turn};
use aoc::intcomp::{IntComp, OutputVec, Stopped};
#[derive(Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub struct Room {
pub name: String,
pub message: String,
pub items: BTreeSet<String>,
pub directions: Vec<Compass>,
}
impl Display for Room {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
write!(f, "Room[{}, doors=", self.name)?;
if!self.directions.is_empty() {
write!(f, "doors=")?;
for &d in &self.directions {
write!(f, "{}", d)?;
}
write!(f, ", ")?;
}
for (ix, item) in self.items.iter().enumerate() {
if ix == 0 {
write!(f, "items={}", item)?;
} else {
write!(f, ",{}", item)?;
}
if ix == self.items.len() - 1 {
write!(f, ", ")?;
}
}
write!(f, "message='{}']", self.message)?;
Ok(())
}
}
fn str_to_compass(s: &str) -> AnyResult<Compass> {
Ok(match s {
"north" => Compass::North,
"south" => Compass::South,
"east" => Compass::East,
"west" => Compass::West,
_ => return Err(anyhow!("'{s}' is not a compoass direction")),
})
}
#[derive(Debug, thiserror::Error)]
#[error("Ejection: {}", _0)]
struct Ejection(String);
impl FromStr for Room {
type Err = AnyErr;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut lines = s.lines();
let mut first = lines.next().ok_or_else(|| anyhow!("No first line"))?.trim();
while first.is_empty() {
first = lines
.next()
.ok_or_else(|| anyhow!("No non-empty first line"))?
.trim();
}
assert!(first.starts_with("== "));
assert!(first.ends_with(" =="));
let name = first
.trim_end_matches(" ==")
.trim_start_matches("== ")
.to_owned();
let message = lines
.next()
.ok_or_else(|| anyhow!("No second line"))?
.trim()
.to_owned();
assert!(!message.is_empty(), "Expected non-empty message");
let next = lines.next().ok_or_else(|| anyhow!("No third line"))?.trim();
assert!(next.is_empty(), "Expected third line to be empty");
let next = lines
.next()
.ok_or_else(|| anyhow!("No fourth line"))?
.trim();
assert!(
next == "Doors here lead:",
"Expected third line to be 'Doors here lead:'"
);
let mut directions = Vec::new();
let mut next = lines
.next()
.ok_or_else(|| anyhow!("No line after doors"))?
.trim();
while next.starts_with("- ") {
let dir = next.trim_start_matches("- ");
let dir = str_to_compass(dir)?;
directions.push(dir);
next = lines
.next()
.ok_or_else(|| anyhow!("No line after directions"))?
.trim();
}
assert!(
next.is_empty(),
"Expected line after directions to be empty"
);
next = lines
.next()
.ok_or_else(|| anyhow!("No line after directions + empty"))?
.trim();
let mut items = BTreeSet::new();
if next == "Items here:" {
next = lines
.next()
.ok_or_else(|| anyhow!("No line after items"))?
.trim();
while next.starts_with("- ") {
let item = next.trim_start_matches("- ");
items.insert(item.to_owned());
next = lines
.next()
.ok_or_else(|| anyhow!("No line after items"))?
.trim();
}
assert!(
next.is_empty(),
"Expected line after items to be empty, got '{}'",
next
);
next = lines
.next()
.ok_or_else(|| anyhow!("No line after items + empty"))?
.trim();
}
if next.contains("Alert!") {
return Err(Ejection(next.into()).into());
}
assert_eq!(
next, "Command?",
"Expected line after items to be 'Command?'"
);
assert!(lines.next().is_none());
Ok(Room {
name,
message,
items,
directions,
})
}
}
#[derive(Debug, Clone)]
pub struct Explorer {
comp: IntComp,
room: Key,
direction: Compass,
carrying: BTreeSet<String>,
map: Map,
}
impl Explorer {
fn new(mut comp: IntComp) -> AnyResult<Self> {
let mut output = OutputVec::new();
comp.run_to_input(&mut output)?;
let out = output.as_string()?;
let room = Room::from_str(&out)?;
let mut map: Map = Default::default();
let key = map.add_room(room);
let exp = Explorer {
comp,
room: key,
direction: Compass::North,
carrying: Default::default(),
map,
};
Ok(exp)
}
fn see_room(&self) -> &Room {
self.map.get(self.room)
}
fn process_input_str(&mut self, output: &mut OutputVec, input: &str) -> anyhow::Result<String> {
log::debug!("Process 1: '{}'", input);
self.comp
.process_ascii(input, output)?
.expect(Stopped::Input)?;
log::debug!("Process 2: '\\n'");
self.comp
.process_ascii("\n", output)?
.expect(Stopped::Input)?;
log::debug!("Processed: '\\n'");
Ok(output.as_string()?)
}
fn process_str(&mut self, input: &str) -> anyhow::Result<String> {
let mut out = OutputVec::new();
match self.process_input_str(&mut out, input) {
Ok(v) => Ok(v),
Err(e) => |
}
}
// fn north(&mut self) -> anyhow::Result<String> {
// self.process_str("north")
// }
// fn south(&mut self) -> anyhow::Result<String> {
// self.process_str("south")
// }
// fn east(&mut self) -> anyhow::Result<String> {
// self.process_str("east")
// }
// fn west(&mut self) -> anyhow::Result<String> {
// self.process_str("west")
// }
pub fn step(&mut self, direction: Compass) -> anyhow::Result<()> {
let input = match direction {
Compass::East => "east",
Compass::North => "north",
Compass::South => "south",
Compass::West => "west",
};
log::debug!("Taking step {}", input);
let output = self.process_str(input)?;
log::debug!("Took step:\n{}\n", output);
let room = Room::from_str(&output)?;
let new = self.map.add_room(room);
self.map.add_door(self.room, direction, new);
self.room = new;
self.direction = direction;
Ok(())
}
pub fn take(&mut self, item: &str) -> anyhow::Result<String> {
log::debug!("Taking {}", item);
let mut s = String::from("take ");
s.push_str(item);
let result = self.process_str(&s)?;
let new = self.carrying.insert(item.to_string());
let room = self.map.rooms.get_mut(self.room).unwrap();
room.items.remove(item);
assert!(new, "Expected to add {}", item);
log::debug!(" took {}", item);
Ok(result)
}
pub fn drop(&mut self, item: &str) -> anyhow::Result<String> {
let found = self.carrying.remove(item);
assert!(!found, "Expected to drop {}", item);
let mut s = String::from("drop ");
s.push_str(item);
self.process_str(&s)
}
pub fn inventory(&mut self) -> anyhow::Result<String> {
self.process_str("inv")
}
fn left_wall_step(&mut self) -> AnyResult<()> {
let mut dir = self.direction + Turn::Left;
for _ in 0..4 {
log::debug!("Checking {} -> {}", self.direction, dir);
if self.see_room().directions.contains(&dir) {
break;
}
dir = dir + Turn::Right;
}
assert!(self.see_room().directions.contains(&dir));
self.step(dir)?;
log::debug!("Stepped {}, {}", dir, self.see_room().name);
Ok(())
}
fn explore_and_take(&mut self, items: &BTreeSet<String>) -> AnyResult<()> {
let start = self.room;
let mut start_directions = self.see_room().directions.clone();
start_directions.reverse();
loop {
let overlap: BTreeSet<String> = items
.intersection(&self.see_room().items)
.map(|s| s.to_owned())
.collect();
for item in overlap {
let _output = self.take(&item)?;
// println!("Took {}, output: {}", item, output.trim());
}
if self.see_room().name == "Security Checkpoint" {
// println!("inv: {}", self.inventory()?);
log::info!("Turning around at security checkpoint");
self.step(self.direction + Turn::Reverse)?;
continue;
}
if self.room == start {
let dir = match start_directions.pop() {
None => return Ok(()),
Some(d) => d,
};
self.step(dir)?;
continue;
}
self.left_wall_step()?;
}
}
pub fn goto(&mut self, room: &str) -> AnyResult<()> {
loop {
if self.see_room().name == room {
return Ok(());
}
self.left_wall_step()?;
}
}
}
type Key = slotmap::DefaultKey;
#[derive(Default, Debug, Clone)]
pub struct Map {
rooms_by_name: HashMap<String, Key>,
rooms: SlotMap<Key, Room>,
doors: HashMap<Key, BTreeMap<Compass, Key>>,
unvisited: HashMap<Key, BTreeSet<Compass>>,
}
impl Map {
fn add_room(&mut self, room: Room) -> Key {
if let Some(&key) = self.rooms_by_name.get(&room.name) {
return key;
}
let name = room.name.clone();
let directions = room.directions.clone();
let key = self.rooms.insert(room);
self.rooms_by_name.insert(name, key);
let unvisited = self.unvisited.insert(key, Default::default());
assert!(unvisited.is_none());
let unvisited = self.unvisited.get_mut(&key).unwrap();
for dir in directions {
unvisited.insert(dir);
}
key
}
fn visit(&mut self, room: Key, direction: Compass) {
if let Occupied(mut o) = self.unvisited.entry(room) {
o.get_mut().remove(&direction);
if o.get().is_empty() {
o.remove();
}
}
}
fn add_door(&mut self, first: Key, direction: Compass, second: Key) {
self.doors
.entry(first)
.or_default()
.insert(direction, second);
self.doors
.entry(second)
.or_default()
.insert(direction + Turn::Reverse, first);
self.visit(first, direction);
self.visit(second, direction + Turn::Reverse);
}
pub fn len(&self) -> usize {
self.rooms.len()
}
pub fn is_empty(&self) -> bool {
self.rooms.is_empty()
}
pub fn contains(&self, room: &Room) -> bool {
self.rooms_by_name.contains_key(&room.name)
}
fn get(&self, key: Key) -> &Room {
self.rooms.get(key).unwrap()
}
#[allow(dead_code)]
fn to_coords(&self, origin: Option<Key>) -> HashMap<Position, Key> {
let start = match (origin, self.rooms.iter().next()) {
(Some(k), _) => k,
(None, None) => return Default::default(),
(None, Some((k, _r))) => k,
};
let mut queue = vec![(Position(0, 0), start)];
let mut seen = HashSet::new();
let mut coords = HashMap::new();
while let Some((pos, r)) = queue.pop() {
match coords.entry(pos) {
Occupied(o) => {
assert!(seen.contains(&r));
assert!(*o.get() == r);
}
Vacant(v) => {
assert!(!seen.contains(&r));
seen.insert(r);
v.insert(r);
let neighbors = self.doors.get(&r).unwrap();
for (&d, &r) in neighbors {
queue.push((pos + d, r));
}
}
}
}
coords
}
}
/*
NV
||
SB KT=GW=PS
|| ||
CQ=HD
||
|| OB=ST
|| ||
SG HB=EG=WD=AR=SL
|| ||
HW=HC=CO
||
SC
AR: Arcade
CO: Corridor
CQ: Crew Quarters
EG: Engineering
GW: Gift Wrapping Center
HB: Hull Breach
HC: Hot Chocolate Fountain
HD: Holodeck
HW: Hallway
KT: Kitchen
NV: Navigation
OB: Observatory
SB: Sick Bay
SC: Security Checkpoint
SG: Storage
SL: Science Lab
ST: Stables
WD: Warp Drive Maintenance
*/
fn try_item_combos(initial_explorer: Explorer, items: Vec<String>) -> AnyResult<Explorer> {
let total = 1 << items.len();
for n in 0..total {
let mut explorer = initial_explorer.clone();
let cur_items: BTreeSet<String> = items
.iter()
.enumerate()
.filter_map(|(i, item)| {
if (n & (1 << i)) == 0 {
None
} else {
Some(item.clone())
}
})
.collect();
log::info!("Items: {:?}", cur_items);
explorer.explore_and_take(&cur_items)?;
assert_eq!(explorer.carrying, cur_items);
explorer.goto("Security Checkpoint")?;
let err = match explorer.left_wall_step() {
Ok(()) => return Ok(explorer),
Err(e) => e,
};
match err.downcast::<Ejection>() {
Ok(e) => log::info!(" {}", e),
Err(e) => return Err(e),
}
}
Err(anyhow::anyhow!("Got to end, found nothing!"))
}
#[allow(dead_code)]
fn explore_around(explorer: &mut Explorer) -> AnyResult<()> {
explorer.explore_and_take(&Default::default())?;
println!(
"Visited, back to start. Unvisited: {} Visited {} rooms with {} doors",
explorer.map.unvisited.len(),
explorer.map.rooms.len(),
explorer.map.doors.len()
);
println!("Items:");
for (_, room) in &explorer.map.rooms {
for item in &room.items {
println!(" - {}: {}", room.name, item);
}
}
println!("\nDoors:");
for (&ra, doors) in &explorer.map.doors {
for (dir, &rb) in doors {
let ra = explorer.map.rooms.get(ra).unwrap();
let rb = explorer.map.rooms.get(rb).unwrap();
println!(" {}: {} -> {}", dir, ra.name, rb.name);
}
}
Ok(())
}
fn main() -> anyhow::Result<()> {
env_logger::init();
let matches = App::new("Day 25")
.arg(
Arg::with_name("input")
.short("i")
.long("input")
.value_name("INPUT")
.takes_value(true),
)
.get_matches();
let input_path = matches.value_of("INPUT").unwrap_or("inputs/day25.txt");
debug!("Using input {}", input_path);
let file = File::open(input_path)?;
let buf_reader = BufReader::new(file);
let line: String = buf_reader
.lines()
.next()
.ok_or_else(|| anyhow::format_err!("No line found"))??;
let cp: IntComp = str::parse(&line)?;
let initial_explorer = Explorer::new(cp)?;
let all_items = vec![
// "food ration".to_owned(),
"candy cane".to_owned(),
"mouse".to_owned(),
// "mug".to_owned(),
"coin".to_owned(),
// "ornament".to_owned(),
"semiconductor".to_owned(),
// "mutex".to_owned(),
];
try_item_combos(initial_explorer, all_items)?;
Ok(())
}
#[cfg(test | {
let output = out.as_string()?;
log::warn!("process_str failure on input {}, output: {}", input, output);
Err(e)
} | conditional_block |
gridview.rs | }
Some(result)
}
// this will take commands_to_finalize from the old snapshot into the new
// one if an error is found produced
pub fn correct(&mut self, blobs: &[impl Blob]) -> Option<Snapshot> {
let blob_matching = self.match_with_blobs(blobs)?;
let mut was_error = false;
let new_droplets: Map<_, _> = blob_matching
.iter()
.map(|(&id, blob)| {
let d = self.droplets.get_mut(&id).unwrap();
let d_new = blob.to_droplet(id);
if d.location!= d_new.location || d.dimensions!= d_new.dimensions {
info!("Found error in droplet {:?}", id);
debug!("Droplet error\n Expected: {:#?}\n Found: {:#?}", d, d_new);
was_error = true;
}
// HACK FIXME this mutation is not great
if (d.volume - d_new.volume).abs() > 1.0 {
info!(
"volume of {} changed: {} -> {}",
id.id, d.volume, d_new.volume
)
}
d.volume = d_new.volume;
(id, d_new)
}).collect();
if was_error {
let mut new_snapshot = Snapshot {
droplets: new_droplets,
commands_to_finalize: Vec::new(),
};
::std::mem::swap(
&mut new_snapshot.commands_to_finalize,
&mut self.commands_to_finalize,
);
Some(new_snapshot)
} else {
None
}
}
pub fn diff_droplet(&self, id: &DropletId, other: &Snapshot) -> DropletDiff {
use self::DropletDiff::*;
let droplet = self
.droplets
.get(id)
.expect("id should be in self snapshot");
if let Some(other_droplet) = other.droplets.get(id) {
// NOTE we only care about location diffs for now
let loc = droplet.location;
let other_loc = other_droplet.location;
if loc!= other_loc {
// for now, just assert that we are only moving one spot at a time
// FIXME HACK
// assert_eq!((&loc - &other_loc).norm(), 1);
Moved {
from: loc,
to: other_loc,
}
} else {
DidNotMove
}
} else {
Disappeared
}
}
pub fn get_error_edges(
&self,
planned_outcome: &Snapshot,
actual_outcome: &Snapshot,
) -> Vec<(Location, Location)> {
use self::DropletDiff::*;
self.droplets
.keys()
.filter_map(|id| {
let planned_diff = self.diff_droplet(id, planned_outcome);
let actual_diff = self.diff_droplet(id, actual_outcome);
match (planned_diff, actual_diff) {
(Moved { from, to }, DidNotMove) => {
if (&from - &to).norm() == 1 {
Some((from, to))
} else {
warn!("Droplet {} jumped from {} to {}!", id.id, from, to);
None
}
}
_ => None,
}
}).collect()
}
}
#[derive(Debug)]
pub enum ExecResponse {
Step(Snapshot),
NotReady,
Done,
}
impl GridView {
pub fn new(grid: Grid) -> GridView {
let mut planned = VecDeque::new();
planned.push_back(Snapshot::default());
#[cfg(feature = "pi")]
let pi = match ::std::env::var("PUDDLE_PI") {
Ok(s) => if s == "1" {
let mut pi = RaspberryPi::new().unwrap();
info!("Initialized the pi!");
Some(pi)
} else {
warn!("Couldn't read PUDDLE_PI={}", s);
None
},
Err(_) => {
info!("Did not start the pi!");
None
}
};
GridView {
grid: grid,
planned,
completed: Vec::new(),
done: false,
bad_edges: Set::new(),
#[cfg(feature = "pi")]
pi,
}
}
pub fn close(&mut self) {
info!("Marking gridview as DONE!");
self.done = true;
}
pub fn execute(&mut self) -> ExecResponse {
use self::ExecResponse::*;
// compare with len - 1 because we wouldn't want to "write out" a state
// that hasn't been fully planned
let resp = if let Some(planned_snapshot) = self.planned.pop_front() {
Step(planned_snapshot)
} else if self.done {
Done
} else {
NotReady
};
trace!(
"execute sending {:?}. Completed: {}, planned: {}.",
resp,
self.completed.len(),
self.planned.len(),
);
resp
}
pub fn commit_pending(&mut self, mut snapshot: Snapshot) {
#[cfg(not(feature = "pi"))]
snapshot.finalize();
#[cfg(feature = "pi")]
snapshot.finalize(self.pi.as_mut());
self.completed.push(snapshot);
}
pub fn snapshot(&self) -> &Snapshot {
self.planned.back().unwrap()
}
// TODO probably shouldn't provide this
pub fn snapshot_mut(&mut self) -> &mut Snapshot {
self.planned.back_mut().unwrap()
}
pub fn snapshot_ensure(&mut self) {
if self.planned.is_empty() {
let last = self.completed.last().unwrap();
self.planned.push_back(last.new_with_same_droplets())
}
}
pub fn exec_snapshot(&self) -> &Snapshot {
self.completed.last().unwrap()
}
fn tick(&mut self) {
let new_snapshot = {
let just_planned = self.planned.back().unwrap();
if let Some(col) = just_planned.get_collision() {
panic!("collision: {:#?}", col);
};
just_planned.new_with_same_droplets()
};
self.planned.push_back(new_snapshot);
trace!("TICK! len={}", self.planned.len());
}
fn update(&mut self, id: DropletId, func: impl FnOnce(&mut Droplet)) {
let now = self.planned.back_mut().unwrap();
let droplet = now
.droplets
.get_mut(&id)
.unwrap_or_else(|| panic!("Tried to remove a non-existent droplet: {:?}", id));
func(droplet);
}
pub fn plan_droplet_info(&self, pid_option: Option<ProcessId>) -> Vec<DropletInfo> {
// gets from the planner for now
self.planned.back().unwrap().droplet_info(pid_option)
}
pub fn take_paths(&mut self, paths: &Map<DropletId, Path>, final_tick: bool) {
let max_len = paths.values().map(|path| path.len()).max().unwrap_or(0);
// make sure that all droplets start where they are at this time step
for (id, path) in paths.iter() {
let snapshot = self.planned.back().unwrap();
let droplet = &snapshot.droplets[&id];
assert_eq!(droplet.location, path[0]);
}
for i in 1..max_len {
for (&id, path) in paths.iter() {
if i < path.len() {
self.update(id, |droplet| {
assert!(droplet.location.distance_to(&path[i]) <= 1);
droplet.location = path[i];
});
}
}
if i < max_len - 1 || final_tick {
self.tick();
}
}
}
pub fn subview(
&mut self,
ids: impl IntoIterator<Item = DropletId>,
mapping: Map<Location, Location>,
) -> GridSubView {
GridSubView {
backing_gridview: self,
mapping: mapping,
ids: ids.into_iter().collect(),
}
}
pub fn register(&mut self, cmd: Box<dyn Command>) {
// this goes in the *just planned* thing, not the one currently being planned.
let just_planned = self.planned.len() - 2;
self.planned[just_planned].commands_to_finalize.push(cmd)
}
pub fn rollback(&mut self, new_snapshot: &Snapshot) {
let old_planned: Vec<_> = self.planned.drain(..).collect();
self.planned
.push_back(new_snapshot.new_with_same_droplets());
assert_eq!(self.planned.len(), 1);
for planned_snapshot in old_planned {
planned_snapshot.abort(self)
}
}
pub fn perturb(&self, rng: &mut impl Rng, snapshot: &Snapshot) -> Option<Snapshot> {
let now = snapshot;
let then = self.completed.last()?;
let id = {
let ids: Vec<_> = now.droplets.keys().collect();
match rng.choose(ids.as_slice()) {
Some(&&id) => id,
None => return None,
}
};
let mut now2 = now.new_with_same_droplets();
if let Some(old_droplet) = then.droplets.get(&id) {
let was_there = now2.droplets.insert(id, old_droplet.clone());
assert!(was_there.is_some());
}
Some(now2)
}
pub fn add_error_edges(&mut self, planned: &Snapshot, actual: &Snapshot) {
let previous = self.completed.last().unwrap();
let edges = previous.get_error_edges(planned, actual);
let n_edges = edges.len();
warn!(
"Added error {} edges, now there are {}: {:?}",
n_edges,
self.bad_edges.len() / 2,
edges,
);
for (loc1, loc2) in edges {
// for now, insert edges both ways
self.bad_edges.insert((loc1, loc2));
self.bad_edges.insert((loc2, loc1));
}
}
}
pub struct GridSubView<'a> {
backing_gridview: &'a mut GridView,
mapping: Map<Location, Location>,
ids: Set<DropletId>,
}
impl<'a> GridSubView<'a> {
pub fn tick(&mut self) {
self.backing_gridview.tick()
}
#[cfg(feature = "pi")]
pub fn with_pi<T>(&mut self, f: impl FnOnce(&mut RaspberryPi) -> T) -> Option<T> {
self.backing_gridview.pi.as_mut().map(f)
}
pub fn get_electrode(&self, loc: &Location) -> Option<&Electrode> {
let actual_loc = self.mapping.get(loc)?;
self.backing_gridview.grid.get_cell(&actual_loc)
}
// TODO: translate or somehow hide the untranslated location of this
pub fn get(&self, id: &DropletId) -> &Droplet {
assert!(self.ids.contains(&id));
&self.backing_gridview.snapshot().droplets[id]
}
fn get_mut(&mut self, id: &DropletId) -> &mut Droplet {
assert!(self.ids.contains(&id));
self.backing_gridview
.snapshot_mut()
.droplets
.get_mut(id)
.unwrap()
}
pub fn insert(&mut self, mut droplet: Droplet) {
let new_loc = self.mapping.get(&droplet.location);
trace!("Inserting {:#?} at {:?}", droplet, new_loc);
droplet.location = *new_loc.unwrap();
let was_not_there = self.ids.insert(droplet.id);
assert!(was_not_there);
let snapshot = self.backing_gridview.snapshot_mut();
let was_there = snapshot.droplets.insert(droplet.id, droplet);
assert!(was_there.is_none());
}
pub fn remove(&mut self, id: &DropletId) -> Droplet {
let was_there = self.ids.remove(id);
assert!(was_there);
let snapshot = self.backing_gridview.snapshot_mut();
let mut droplet = snapshot.droplets.remove(id).unwrap();
// FIXME this is pretty dumb
let (unmapped_loc, _) = self
.mapping
.iter()
.find(|(_, &v)| v == droplet.location)
.unwrap();
droplet.location = *unmapped_loc;
droplet
}
fn check_droplet(&self, id: &DropletId) {
// TODO will this have translated or real location??
let droplet = self.get(id);
let mapped_to: Set<_> = self.mapping.values().collect();
// TODO this is pretty slow
for i in 0..droplet.dimensions.y {
for j in 0..droplet.dimensions.x {
let loc = Location {
y: droplet.location.y + i,
x: droplet.location.x + j,
};
if!mapped_to.contains(&loc) {
panic!("{} was unmapped!, mapping: {:#?}", loc, self.mapping);
}
}
}
}
fn update(&mut self, id: &DropletId, func: impl FnOnce(&mut Droplet)) {
func(self.get_mut(id));
self.check_droplet(id);
}
pub fn move_west(&mut self, id: DropletId) {
trace!("Moving droplet {:?} west", id);
self.update(&id, |droplet| {
droplet.location = droplet.location.west();
})
}
pub fn move_east(&mut self, id: DropletId) {
trace!("Moving droplet {:?} east", id);
self.update(&id, |droplet| {
droplet.location = droplet.location.east();
})
}
pub fn move_north(&mut self, id: DropletId) {
trace!("Moving droplet {:?} north", id);
self.update(&id, |droplet| {
droplet.location = droplet.location.north();
})
}
pub fn move_south(&mut self, id: DropletId) {
trace!("Moving droplet {:?} south", id);
self.update(&id, |droplet| {
droplet.location = droplet.location.south();
})
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use grid::parse::tests::parse_strings;
pub fn id2c(id: &DropletId) -> char {
assert!(id.id < 255);
(id.id as u8) as char
}
pub fn c2id(c: char) -> DropletId {
for u in 0x00u8..0xff {
let c2 = u as char;
if c == c2 {
return DropletId {
id: u as usize,
process_id: 0,
};
}
}
panic!("Can't make {} a u8", c);
}
pub fn parse_gridview(strs: &[&str]) -> GridView {
// same chars are guaranteed to have the same ids
let (grid, blobs) = parse_strings(&strs);
let mut snapshot = Snapshot::default();
for (ch, blob) in blobs.iter() {
let id = c2id(*ch);
snapshot.droplets.insert(id, blob.to_droplet(id));
}
let mut gv = GridView::new(grid);
gv.planned[0] = snapshot;
gv
}
pub fn parse_snapshot(strs: &[&str]) -> Snapshot {
let mut gv = parse_gridview(strs);
gv.planned.remove(0).unwrap()
}
fn check_all_matched(
snapshot_strs: &[&str],
blob_strs: &[&str],
) -> Option<Map<DropletId, SimpleBlob>> {
let snapshot = parse_snapshot(&snapshot_strs);
let (_, chip_blobs) = parse_strings(&blob_strs);
let blobs: Vec<SimpleBlob> = chip_blobs.values().cloned().collect();
let result: Map<DropletId, SimpleBlob> = snapshot.match_with_blobs(&blobs)?;
// create the expected map by mapping the ids in the snapshot
// to the associated blob which corresponds to the character
let mut expected: Map<DropletId, SimpleBlob> = Map::new();
for id in snapshot.droplets.keys() {
expected.insert(*id, chip_blobs[&id2c(id)].clone());
}
for id in expected.keys() {
// we can't compare blobs or droplets, so we get the droplet_info
assert_eq!(
result.get(id).map(|blob| blob.to_droplet(*id).info()),
expected.get(id).map(|blob| blob.to_droplet(*id).info())
)
}
Some(result)
}
#[test]
fn test_no_diff() {
let strs = vec![
"aa..........c",
".....bb......",
".............",
".............",
];
assert!(check_all_matched(&strs, &strs).is_some());
}
#[test]
fn test_location_diff() {
let exec_strs = vec![
"aa..........c",
".....bb......",
".............",
".............",
];
let chip_strs = vec![
"aa...........",
"............c",
".....bb......",
".............", | ]; | random_line_split |
|
gridview.rs | for mut cmd in self.commands_to_finalize.drain(..) {
debug!("Sending command back for replanning: {:#?}", cmd);
if let Err((mut cmd, err)) = gridview.plan(cmd) {
cmd.abort(err);
}
}
}
pub fn droplet_info(&self, pid_option: Option<ProcessId>) -> Vec<DropletInfo> {
self.droplets
.values()
.filter(|&d| pid_option.map_or(true, |pid| d.id.process_id == pid))
.map(|d| d.info())
.collect()
}
/// Returns an invalid droplet, if any.
fn get_collision(&self) -> Option<(i32, Droplet, Droplet)> {
for (id1, droplet1) in &self.droplets {
for (id2, droplet2) in &self.droplets {
if id1 == id2 {
continue;
}
if droplet1.collision_group == droplet2.collision_group {
continue;
}
let distance = droplet1.collision_distance(droplet2);
if distance <= 0 {
return Some((distance, droplet1.clone(), droplet2.clone()));
}
}
}
None
}
pub fn to_blobs(&self) -> Vec<SimpleBlob> {
self.droplets.values().map(|d| d.to_blob()).collect()
}
/// Takes a map of droplet ids to droplets (as in that
/// of the planner/executor view) and a vector of blobs
/// (as in that of the chip view) and returns a matching
/// of droplet ids to closest matching blobs.
///
/// Can currently only handle where both views contain
/// the same number of 'droplets'
fn match_with_blobs<B: Blob>(&self, blobs: &[B]) -> Option<Map<DropletId, B>> {
// Ensure lengths are the same
if self.droplets.len()!= blobs.len() {
error!("Expected and actual droplets are of different lengths");
return None;
}
let mut result = Map::new(); // to be returned
let mut ids = vec![]; // store corresponding ids to indeces
let mut matches = vec![]; // store similarity between blobs/droplets
let n = blobs.len();
// store the id of each droplet in its corresponding
// index in 'ids', then store the similarity of each
// droplet to each blob in'matches'
for (&id, droplet) in &self.droplets {
ids.push(id);
for blob in blobs {
let similarity = blob.get_similarity(&droplet);
// must be non-negative for the algorithm to work
assert!(similarity >= 0);
matches.push(similarity);
}
}
// convert the matches vector to a matrix
// input should be [1,2,3,4], where the output
// matrix is [[1,2],[3,4]]
let m: Matrix<i32> = Matrix::from_vec(n, n, matches);
// km is a vector of size n where the value at each index
// corresponds to the index of a blob
let (_c, km) = kuhn_munkres_min(&m);
for i in 0..n {
result.insert(ids[i], blobs[km[i]].clone());
}
Some(result)
}
// this will take commands_to_finalize from the old snapshot into the new
// one if an error is found produced
pub fn correct(&mut self, blobs: &[impl Blob]) -> Option<Snapshot> {
let blob_matching = self.match_with_blobs(blobs)?;
let mut was_error = false;
let new_droplets: Map<_, _> = blob_matching
.iter()
.map(|(&id, blob)| {
let d = self.droplets.get_mut(&id).unwrap();
let d_new = blob.to_droplet(id);
if d.location!= d_new.location || d.dimensions!= d_new.dimensions {
info!("Found error in droplet {:?}", id);
debug!("Droplet error\n Expected: {:#?}\n Found: {:#?}", d, d_new);
was_error = true;
}
// HACK FIXME this mutation is not great
if (d.volume - d_new.volume).abs() > 1.0 {
info!(
"volume of {} changed: {} -> {}",
id.id, d.volume, d_new.volume
)
}
d.volume = d_new.volume;
(id, d_new)
}).collect();
if was_error {
let mut new_snapshot = Snapshot {
droplets: new_droplets,
commands_to_finalize: Vec::new(),
};
::std::mem::swap(
&mut new_snapshot.commands_to_finalize,
&mut self.commands_to_finalize,
);
Some(new_snapshot)
} else {
None
}
}
pub fn diff_droplet(&self, id: &DropletId, other: &Snapshot) -> DropletDiff {
use self::DropletDiff::*;
let droplet = self
.droplets
.get(id)
.expect("id should be in self snapshot");
if let Some(other_droplet) = other.droplets.get(id) {
// NOTE we only care about location diffs for now
let loc = droplet.location;
let other_loc = other_droplet.location;
if loc!= other_loc {
// for now, just assert that we are only moving one spot at a time
// FIXME HACK
// assert_eq!((&loc - &other_loc).norm(), 1);
Moved {
from: loc,
to: other_loc,
}
} else {
DidNotMove
}
} else {
Disappeared
}
}
pub fn get_error_edges(
&self,
planned_outcome: &Snapshot,
actual_outcome: &Snapshot,
) -> Vec<(Location, Location)> {
use self::DropletDiff::*;
self.droplets
.keys()
.filter_map(|id| {
let planned_diff = self.diff_droplet(id, planned_outcome);
let actual_diff = self.diff_droplet(id, actual_outcome);
match (planned_diff, actual_diff) {
(Moved { from, to }, DidNotMove) => {
if (&from - &to).norm() == 1 {
Some((from, to))
} else {
warn!("Droplet {} jumped from {} to {}!", id.id, from, to);
None
}
}
_ => None,
}
}).collect()
}
}
#[derive(Debug)]
pub enum ExecResponse {
Step(Snapshot),
NotReady,
Done,
}
impl GridView {
pub fn new(grid: Grid) -> GridView {
let mut planned = VecDeque::new();
planned.push_back(Snapshot::default());
#[cfg(feature = "pi")]
let pi = match ::std::env::var("PUDDLE_PI") {
Ok(s) => if s == "1" {
let mut pi = RaspberryPi::new().unwrap();
info!("Initialized the pi!");
Some(pi)
} else {
warn!("Couldn't read PUDDLE_PI={}", s);
None
},
Err(_) => {
info!("Did not start the pi!");
None
}
};
GridView {
grid: grid,
planned,
completed: Vec::new(),
done: false,
bad_edges: Set::new(),
#[cfg(feature = "pi")]
pi,
}
}
pub fn close(&mut self) {
info!("Marking gridview as DONE!");
self.done = true;
}
pub fn execute(&mut self) -> ExecResponse {
use self::ExecResponse::*;
// compare with len - 1 because we wouldn't want to "write out" a state
// that hasn't been fully planned
let resp = if let Some(planned_snapshot) = self.planned.pop_front() {
Step(planned_snapshot)
} else if self.done {
Done
} else {
NotReady
};
trace!(
"execute sending {:?}. Completed: {}, planned: {}.",
resp,
self.completed.len(),
self.planned.len(),
);
resp
}
pub fn commit_pending(&mut self, mut snapshot: Snapshot) {
#[cfg(not(feature = "pi"))]
snapshot.finalize();
#[cfg(feature = "pi")]
snapshot.finalize(self.pi.as_mut());
self.completed.push(snapshot);
}
pub fn snapshot(&self) -> &Snapshot {
self.planned.back().unwrap()
}
// TODO probably shouldn't provide this
pub fn snapshot_mut(&mut self) -> &mut Snapshot |
pub fn snapshot_ensure(&mut self) {
if self.planned.is_empty() {
let last = self.completed.last().unwrap();
self.planned.push_back(last.new_with_same_droplets())
}
}
pub fn exec_snapshot(&self) -> &Snapshot {
self.completed.last().unwrap()
}
fn tick(&mut self) {
let new_snapshot = {
let just_planned = self.planned.back().unwrap();
if let Some(col) = just_planned.get_collision() {
panic!("collision: {:#?}", col);
};
just_planned.new_with_same_droplets()
};
self.planned.push_back(new_snapshot);
trace!("TICK! len={}", self.planned.len());
}
fn update(&mut self, id: DropletId, func: impl FnOnce(&mut Droplet)) {
let now = self.planned.back_mut().unwrap();
let droplet = now
.droplets
.get_mut(&id)
.unwrap_or_else(|| panic!("Tried to remove a non-existent droplet: {:?}", id));
func(droplet);
}
pub fn plan_droplet_info(&self, pid_option: Option<ProcessId>) -> Vec<DropletInfo> {
// gets from the planner for now
self.planned.back().unwrap().droplet_info(pid_option)
}
pub fn take_paths(&mut self, paths: &Map<DropletId, Path>, final_tick: bool) {
let max_len = paths.values().map(|path| path.len()).max().unwrap_or(0);
// make sure that all droplets start where they are at this time step
for (id, path) in paths.iter() {
let snapshot = self.planned.back().unwrap();
let droplet = &snapshot.droplets[&id];
assert_eq!(droplet.location, path[0]);
}
for i in 1..max_len {
for (&id, path) in paths.iter() {
if i < path.len() {
self.update(id, |droplet| {
assert!(droplet.location.distance_to(&path[i]) <= 1);
droplet.location = path[i];
});
}
}
if i < max_len - 1 || final_tick {
self.tick();
}
}
}
pub fn subview(
&mut self,
ids: impl IntoIterator<Item = DropletId>,
mapping: Map<Location, Location>,
) -> GridSubView {
GridSubView {
backing_gridview: self,
mapping: mapping,
ids: ids.into_iter().collect(),
}
}
pub fn register(&mut self, cmd: Box<dyn Command>) {
// this goes in the *just planned* thing, not the one currently being planned.
let just_planned = self.planned.len() - 2;
self.planned[just_planned].commands_to_finalize.push(cmd)
}
pub fn rollback(&mut self, new_snapshot: &Snapshot) {
let old_planned: Vec<_> = self.planned.drain(..).collect();
self.planned
.push_back(new_snapshot.new_with_same_droplets());
assert_eq!(self.planned.len(), 1);
for planned_snapshot in old_planned {
planned_snapshot.abort(self)
}
}
pub fn perturb(&self, rng: &mut impl Rng, snapshot: &Snapshot) -> Option<Snapshot> {
let now = snapshot;
let then = self.completed.last()?;
let id = {
let ids: Vec<_> = now.droplets.keys().collect();
match rng.choose(ids.as_slice()) {
Some(&&id) => id,
None => return None,
}
};
let mut now2 = now.new_with_same_droplets();
if let Some(old_droplet) = then.droplets.get(&id) {
let was_there = now2.droplets.insert(id, old_droplet.clone());
assert!(was_there.is_some());
}
Some(now2)
}
pub fn add_error_edges(&mut self, planned: &Snapshot, actual: &Snapshot) {
let previous = self.completed.last().unwrap();
let edges = previous.get_error_edges(planned, actual);
let n_edges = edges.len();
warn!(
"Added error {} edges, now there are {}: {:?}",
n_edges,
self.bad_edges.len() / 2,
edges,
);
for (loc1, loc2) in edges {
// for now, insert edges both ways
self.bad_edges.insert((loc1, loc2));
self.bad_edges.insert((loc2, loc1));
}
}
}
pub struct GridSubView<'a> {
backing_gridview: &'a mut GridView,
mapping: Map<Location, Location>,
ids: Set<DropletId>,
}
impl<'a> GridSubView<'a> {
pub fn tick(&mut self) {
self.backing_gridview.tick()
}
#[cfg(feature = "pi")]
pub fn with_pi<T>(&mut self, f: impl FnOnce(&mut RaspberryPi) -> T) -> Option<T> {
self.backing_gridview.pi.as_mut().map(f)
}
pub fn get_electrode(&self, loc: &Location) -> Option<&Electrode> {
let actual_loc = self.mapping.get(loc)?;
self.backing_gridview.grid.get_cell(&actual_loc)
}
// TODO: translate or somehow hide the untranslated location of this
pub fn get(&self, id: &DropletId) -> &Droplet {
assert!(self.ids.contains(&id));
&self.backing_gridview.snapshot().droplets[id]
}
fn get_mut(&mut self, id: &DropletId) -> &mut Droplet {
assert!(self.ids.contains(&id));
self.backing_gridview
.snapshot_mut()
.droplets
.get_mut(id)
.unwrap()
}
pub fn insert(&mut self, mut droplet: Droplet) {
let new_loc = self.mapping.get(&droplet.location);
trace!("Inserting {:#?} at {:?}", droplet, new_loc);
droplet.location = *new_loc.unwrap();
let was_not_there = self.ids.insert(droplet.id);
assert!(was_not_there);
let snapshot = self.backing_gridview.snapshot_mut();
let was_there = snapshot.droplets.insert(droplet.id, droplet);
assert!(was_there.is_none());
}
pub fn remove(&mut self, id: &DropletId) -> Droplet {
let was_there = self.ids.remove(id);
assert!(was_there);
let snapshot = self.backing_gridview.snapshot_mut();
let mut droplet = snapshot.droplets.remove(id).unwrap();
// FIXME this is pretty dumb
let (unmapped_loc, _) = self
.mapping
.iter()
.find(|(_, &v)| v == droplet.location)
.unwrap();
droplet.location = *unmapped_loc;
droplet
}
fn check_droplet(&self, id: &DropletId) {
// TODO will this have translated or real location??
let droplet = self.get(id);
let mapped_to: Set<_> = self.mapping.values().collect();
// TODO this is pretty slow
for i in 0..droplet.dimensions.y {
for j in 0..droplet.dimensions.x {
let loc = Location {
y: droplet.location.y + i,
x: droplet.location.x + j,
};
if!mapped_to.contains(&loc) {
panic!("{} was unmapped!, mapping: {:#?}", loc, self.mapping);
}
}
}
}
fn update(&mut self, id: &DropletId, func: impl FnOnce(&mut Droplet)) {
func(self.get_mut(id));
self.check_droplet(id);
}
pub fn move_west(&mut self, id: DropletId) {
trace!("Moving droplet {:?} west", id);
self.update(&id, |droplet| {
droplet.location = droplet.location.west();
})
}
pub fn move_east(&mut self, id: DropletId) {
trace!("Moving droplet {:?} east", id);
self.update(&id, |droplet| {
droplet.location = droplet.location.east();
})
}
pub fn move_north(&mut self, id: DropletId) {
trace!("Moving droplet {:?} north", id);
self.update(&id, |droplet| {
droplet.location = droplet.location.north();
})
}
pub fn move_south(&mut self, id: DropletId) {
trace!("Moving droplet {:?} south", id);
self.update(&id, |droplet| {
droplet.location | {
self.planned.back_mut().unwrap()
} | identifier_body |
gridview.rs | {
Disappeared,
DidNotMove,
Moved { from: Location, to: Location },
}
impl Snapshot {
pub fn new_with_same_droplets(&self) -> Snapshot {
let mut new_snapshot = Snapshot::default();
new_snapshot.droplets = self.droplets.clone();
// clear out the destination because we're doing to replan
for d in new_snapshot.droplets.values_mut() {
d.destination = None;
}
new_snapshot
}
#[cfg(not(feature = "pi"))]
fn finalize(&mut self) {
// we need to drain this so we can mutate the command without mutating
// self, as we need to pass self into cmd.finalize
// this feels pretty ugly....
let mut x: Vec<_> = self.commands_to_finalize.drain(..).collect();
for cmd in &mut x {
debug!("Finalizing command: {:#?}", cmd);
cmd.finalize(self)
}
self.commands_to_finalize = x;
}
#[cfg(feature = "pi")]
fn finalize(&mut self, pi: Option<&mut RaspberryPi>) {
// we need to drain this so we can mutate the command without mutating
// self, as we need to pass self into cmd.finalize
// this feels pretty ugly....
let mut x: Vec<_> = self.commands_to_finalize.drain(..).collect();
if let Some(pi) = pi {
for cmd in &mut x {
debug!("Finalizing command: {:#?}", cmd);
cmd.finalize(self, Some(pi))
}
} else {
for cmd in &mut x {
debug!("Finalizing command: {:#?}", cmd);
cmd.finalize(self, None)
}
}
self.commands_to_finalize = x;
}
pub fn abort(mut self, gridview: &mut GridView) {
for mut cmd in self.commands_to_finalize.drain(..) {
debug!("Sending command back for replanning: {:#?}", cmd);
if let Err((mut cmd, err)) = gridview.plan(cmd) {
cmd.abort(err);
}
}
}
pub fn droplet_info(&self, pid_option: Option<ProcessId>) -> Vec<DropletInfo> {
self.droplets
.values()
.filter(|&d| pid_option.map_or(true, |pid| d.id.process_id == pid))
.map(|d| d.info())
.collect()
}
/// Returns an invalid droplet, if any.
fn get_collision(&self) -> Option<(i32, Droplet, Droplet)> {
for (id1, droplet1) in &self.droplets {
for (id2, droplet2) in &self.droplets {
if id1 == id2 {
continue;
}
if droplet1.collision_group == droplet2.collision_group {
continue;
}
let distance = droplet1.collision_distance(droplet2);
if distance <= 0 {
return Some((distance, droplet1.clone(), droplet2.clone()));
}
}
}
None
}
pub fn to_blobs(&self) -> Vec<SimpleBlob> {
self.droplets.values().map(|d| d.to_blob()).collect()
}
/// Takes a map of droplet ids to droplets (as in that
/// of the planner/executor view) and a vector of blobs
/// (as in that of the chip view) and returns a matching
/// of droplet ids to closest matching blobs.
///
/// Can currently only handle where both views contain
/// the same number of 'droplets'
fn match_with_blobs<B: Blob>(&self, blobs: &[B]) -> Option<Map<DropletId, B>> {
// Ensure lengths are the same
if self.droplets.len()!= blobs.len() {
error!("Expected and actual droplets are of different lengths");
return None;
}
let mut result = Map::new(); // to be returned
let mut ids = vec![]; // store corresponding ids to indeces
let mut matches = vec![]; // store similarity between blobs/droplets
let n = blobs.len();
// store the id of each droplet in its corresponding
// index in 'ids', then store the similarity of each
// droplet to each blob in'matches'
for (&id, droplet) in &self.droplets {
ids.push(id);
for blob in blobs {
let similarity = blob.get_similarity(&droplet);
// must be non-negative for the algorithm to work
assert!(similarity >= 0);
matches.push(similarity);
}
}
// convert the matches vector to a matrix
// input should be [1,2,3,4], where the output
// matrix is [[1,2],[3,4]]
let m: Matrix<i32> = Matrix::from_vec(n, n, matches);
// km is a vector of size n where the value at each index
// corresponds to the index of a blob
let (_c, km) = kuhn_munkres_min(&m);
for i in 0..n {
result.insert(ids[i], blobs[km[i]].clone());
}
Some(result)
}
// this will take commands_to_finalize from the old snapshot into the new
// one if an error is found produced
pub fn correct(&mut self, blobs: &[impl Blob]) -> Option<Snapshot> {
let blob_matching = self.match_with_blobs(blobs)?;
let mut was_error = false;
let new_droplets: Map<_, _> = blob_matching
.iter()
.map(|(&id, blob)| {
let d = self.droplets.get_mut(&id).unwrap();
let d_new = blob.to_droplet(id);
if d.location!= d_new.location || d.dimensions!= d_new.dimensions {
info!("Found error in droplet {:?}", id);
debug!("Droplet error\n Expected: {:#?}\n Found: {:#?}", d, d_new);
was_error = true;
}
// HACK FIXME this mutation is not great
if (d.volume - d_new.volume).abs() > 1.0 {
info!(
"volume of {} changed: {} -> {}",
id.id, d.volume, d_new.volume
)
}
d.volume = d_new.volume;
(id, d_new)
}).collect();
if was_error {
let mut new_snapshot = Snapshot {
droplets: new_droplets,
commands_to_finalize: Vec::new(),
};
::std::mem::swap(
&mut new_snapshot.commands_to_finalize,
&mut self.commands_to_finalize,
);
Some(new_snapshot)
} else {
None
}
}
pub fn diff_droplet(&self, id: &DropletId, other: &Snapshot) -> DropletDiff {
use self::DropletDiff::*;
let droplet = self
.droplets
.get(id)
.expect("id should be in self snapshot");
if let Some(other_droplet) = other.droplets.get(id) {
// NOTE we only care about location diffs for now
let loc = droplet.location;
let other_loc = other_droplet.location;
if loc!= other_loc {
// for now, just assert that we are only moving one spot at a time
// FIXME HACK
// assert_eq!((&loc - &other_loc).norm(), 1);
Moved {
from: loc,
to: other_loc,
}
} else {
DidNotMove
}
} else {
Disappeared
}
}
pub fn get_error_edges(
&self,
planned_outcome: &Snapshot,
actual_outcome: &Snapshot,
) -> Vec<(Location, Location)> {
use self::DropletDiff::*;
self.droplets
.keys()
.filter_map(|id| {
let planned_diff = self.diff_droplet(id, planned_outcome);
let actual_diff = self.diff_droplet(id, actual_outcome);
match (planned_diff, actual_diff) {
(Moved { from, to }, DidNotMove) => {
if (&from - &to).norm() == 1 {
Some((from, to))
} else {
warn!("Droplet {} jumped from {} to {}!", id.id, from, to);
None
}
}
_ => None,
}
}).collect()
}
}
#[derive(Debug)]
pub enum ExecResponse {
Step(Snapshot),
NotReady,
Done,
}
impl GridView {
pub fn new(grid: Grid) -> GridView {
let mut planned = VecDeque::new();
planned.push_back(Snapshot::default());
#[cfg(feature = "pi")]
let pi = match ::std::env::var("PUDDLE_PI") {
Ok(s) => if s == "1" {
let mut pi = RaspberryPi::new().unwrap();
info!("Initialized the pi!");
Some(pi)
} else {
warn!("Couldn't read PUDDLE_PI={}", s);
None
},
Err(_) => {
info!("Did not start the pi!");
None
}
};
GridView {
grid: grid,
planned,
completed: Vec::new(),
done: false,
bad_edges: Set::new(),
#[cfg(feature = "pi")]
pi,
}
}
pub fn close(&mut self) {
info!("Marking gridview as DONE!");
self.done = true;
}
pub fn execute(&mut self) -> ExecResponse {
use self::ExecResponse::*;
// compare with len - 1 because we wouldn't want to "write out" a state
// that hasn't been fully planned
let resp = if let Some(planned_snapshot) = self.planned.pop_front() {
Step(planned_snapshot)
} else if self.done {
Done
} else {
NotReady
};
trace!(
"execute sending {:?}. Completed: {}, planned: {}.",
resp,
self.completed.len(),
self.planned.len(),
);
resp
}
pub fn commit_pending(&mut self, mut snapshot: Snapshot) {
#[cfg(not(feature = "pi"))]
snapshot.finalize();
#[cfg(feature = "pi")]
snapshot.finalize(self.pi.as_mut());
self.completed.push(snapshot);
}
pub fn snapshot(&self) -> &Snapshot {
self.planned.back().unwrap()
}
// TODO probably shouldn't provide this
pub fn snapshot_mut(&mut self) -> &mut Snapshot {
self.planned.back_mut().unwrap()
}
pub fn snapshot_ensure(&mut self) {
if self.planned.is_empty() {
let last = self.completed.last().unwrap();
self.planned.push_back(last.new_with_same_droplets())
}
}
pub fn exec_snapshot(&self) -> &Snapshot {
self.completed.last().unwrap()
}
fn tick(&mut self) {
let new_snapshot = {
let just_planned = self.planned.back().unwrap();
if let Some(col) = just_planned.get_collision() {
panic!("collision: {:#?}", col);
};
just_planned.new_with_same_droplets()
};
self.planned.push_back(new_snapshot);
trace!("TICK! len={}", self.planned.len());
}
fn update(&mut self, id: DropletId, func: impl FnOnce(&mut Droplet)) {
let now = self.planned.back_mut().unwrap();
let droplet = now
.droplets
.get_mut(&id)
.unwrap_or_else(|| panic!("Tried to remove a non-existent droplet: {:?}", id));
func(droplet);
}
pub fn plan_droplet_info(&self, pid_option: Option<ProcessId>) -> Vec<DropletInfo> {
// gets from the planner for now
self.planned.back().unwrap().droplet_info(pid_option)
}
pub fn take_paths(&mut self, paths: &Map<DropletId, Path>, final_tick: bool) {
let max_len = paths.values().map(|path| path.len()).max().unwrap_or(0);
// make sure that all droplets start where they are at this time step
for (id, path) in paths.iter() {
let snapshot = self.planned.back().unwrap();
let droplet = &snapshot.droplets[&id];
assert_eq!(droplet.location, path[0]);
}
for i in 1..max_len {
for (&id, path) in paths.iter() {
if i < path.len() {
self.update(id, |droplet| {
assert!(droplet.location.distance_to(&path[i]) <= 1);
droplet.location = path[i];
});
}
}
if i < max_len - 1 || final_tick {
self.tick();
}
}
}
pub fn subview(
&mut self,
ids: impl IntoIterator<Item = DropletId>,
mapping: Map<Location, Location>,
) -> GridSubView {
GridSubView {
backing_gridview: self,
mapping: mapping,
ids: ids.into_iter().collect(),
}
}
pub fn register(&mut self, cmd: Box<dyn Command>) {
// this goes in the *just planned* thing, not the one currently being planned.
let just_planned = self.planned.len() - 2;
self.planned[just_planned].commands_to_finalize.push(cmd)
}
pub fn rollback(&mut self, new_snapshot: &Snapshot) {
let old_planned: Vec<_> = self.planned.drain(..).collect();
self.planned
.push_back(new_snapshot.new_with_same_droplets());
assert_eq!(self.planned.len(), 1);
for planned_snapshot in old_planned {
planned_snapshot.abort(self)
}
}
pub fn perturb(&self, rng: &mut impl Rng, snapshot: &Snapshot) -> Option<Snapshot> {
let now = snapshot;
let then = self.completed.last()?;
let id = {
let ids: Vec<_> = now.droplets.keys().collect();
match rng.choose(ids.as_slice()) {
Some(&&id) => id,
None => return None,
}
};
let mut now2 = now.new_with_same_droplets();
if let Some(old_droplet) = then.droplets.get(&id) {
let was_there = now2.droplets.insert(id, old_droplet.clone());
assert!(was_there.is_some());
}
Some(now2)
}
pub fn add_error_edges(&mut self, planned: &Snapshot, actual: &Snapshot) {
let previous = self.completed.last().unwrap();
let edges = previous.get_error_edges(planned, actual);
let n_edges = edges.len();
warn!(
"Added error {} edges, now there are {}: {:?}",
n_edges,
self.bad_edges.len() / 2,
edges,
);
for (loc1, loc2) in edges {
// for now, insert edges both ways
self.bad_edges.insert((loc1, loc2));
self.bad_edges.insert((loc2, loc1));
}
}
}
pub struct GridSubView<'a> {
backing_gridview: &'a mut GridView,
mapping: Map<Location, Location>,
ids: Set<DropletId>,
}
impl<'a> GridSubView<'a> {
pub fn tick(&mut self) {
self.backing_gridview.tick()
}
#[cfg(feature = "pi")]
pub fn with_pi<T>(&mut self, f: impl FnOnce(&mut RaspberryPi) -> T) -> Option<T> {
self.backing_gridview.pi.as_mut().map(f)
}
pub fn get_electrode(&self, loc: &Location) -> Option<&Electrode> {
let actual_loc = self.mapping.get(loc)?;
self.backing_gridview.grid.get_cell(&actual_loc)
}
// TODO: translate or somehow hide the untranslated location of this
pub fn get(&self, id: &DropletId) -> &Droplet {
assert!(self.ids.contains(&id));
&self.backing_gridview.snapshot().droplets[id]
}
fn get_mut(&mut self, id: &DropletId) -> &mut Droplet {
assert!(self.ids.contains(&id));
self.backing_gridview
.snapshot_mut()
.droplets
.get_mut(id)
.unwrap()
}
pub fn insert(&mut self, mut droplet: Droplet) {
let new_loc = self.mapping.get(&droplet.location);
trace!("Inserting {:#?} at {:?}", droplet, new_loc);
droplet.location = *new_loc.unwrap();
let was_not_there = self.ids.insert(droplet.id);
assert!(was_not_there);
let snapshot = self.backing_gridview.snapshot_mut();
let was_there = snapshot.droplets.insert(droplet.id, droplet);
assert!(was_there.is_none());
}
pub fn remove(&mut self, id: &DropletId) -> Droplet {
let was_there = self.ids.remove(id);
assert!(was_there);
let snapshot = self.backing_gridview.snapshot_mut();
let mut droplet = snapshot.droplets.remove(id).unwrap();
// FIXME this is pretty dumb
let (unmapped_loc, _) = self
.mapping
.iter()
.find(|(_, &v)| v == droplet.location)
.unwrap();
droplet.location = *unmapped_loc;
droplet
}
fn check_droplet(&self, id: &DropletId) | DropletDiff | identifier_name |
|
wix.rs | use super::common;
use super::path_utils::{copy, Options};
use super::settings::Settings;
use handlebars::{to_json, Handlebars};
use lazy_static::lazy_static;
use regex::Regex;
use serde::Serialize;
use sha2::Digest;
use uuid::Uuid;
use zip::ZipArchive;
use std::collections::BTreeMap;
use std::fs::{create_dir_all, remove_dir_all, write, File};
use std::io::{Cursor, Read, Write};
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
// URLS for the WIX toolchain. Can be used for crossplatform compilation.
pub const WIX_URL: &str =
"https://github.com/wixtoolset/wix3/releases/download/wix3112rtm/wix311-binaries.zip";
pub const WIX_SHA256: &str = "2c1888d5d1dba377fc7fa14444cf556963747ff9a0a289a3599cf09da03b9e2e";
// For Cross Platform Complilation.
// const VC_REDIST_X86_URL: &str =
// "https://download.visualstudio.microsoft.com/download/pr/c8edbb87-c7ec-4500-a461-71e8912d25e9/99ba493d660597490cbb8b3211d2cae4/vc_redist.x86.exe";
// const VC_REDIST_X86_SHA256: &str =
// "3a43e8a55a3f3e4b73d01872c16d47a19dd825756784f4580187309e7d1fcb74";
// const VC_REDIST_X64_URL: &str =
// "https://download.visualstudio.microsoft.com/download/pr/9e04d214-5a9d-4515-9960-3d71398d98c3/1e1e62ab57bbb4bf5199e8ce88f040be/vc_redist.x64.exe";
// const VC_REDIST_X64_SHA256: &str =
// "d6cd2445f68815fe02489fafe0127819e44851e26dfbe702612bc0d223cbbc2b";
// A v4 UUID that was generated specifically for tauri-bundler, to be used as a
// namespace for generating v5 UUIDs from bundle identifier strings.
const UUID_NAMESPACE: [u8; 16] = [
0xfd, 0x85, 0x95, 0xa8, 0x17, 0xa3, 0x47, 0x4e, 0xa6, 0x16, 0x76, 0x14, 0x8d, 0xfa, 0x0c, 0x7b,
];
// setup for the main.wxs template file using handlebars. Dynamically changes the template on compilation based on the application metadata.
lazy_static! {
static ref HANDLEBARS: Handlebars<'static> = {
let mut handlebars = Handlebars::new();
handlebars
.register_template_string("main.wxs", include_str!("templates/main.wxs"))
.or_else(|e| Err(e.to_string()))
.expect("Failed to setup handlebar template");
handlebars
};
}
/// Mapper between a resource directory name and its ResourceDirectory descriptor.
type ResourceMap = BTreeMap<String, ResourceDirectory>;
/// A binary to bundle with WIX.
/// External binaries or additional project binaries are represented with this data structure.
/// This data structure is needed because WIX requires each path to have its own `id` and `guid`.
#[derive(Serialize)]
struct | {
/// the GUID to use on the WIX XML.
guid: String,
/// the id to use on the WIX XML.
id: String,
/// the binary path.
path: String,
}
/// A Resource file to bundle with WIX.
/// This data structure is needed because WIX requires each path to have its own `id` and `guid`.
#[derive(Serialize, Clone)]
struct ResourceFile {
/// the GUID to use on the WIX XML.
guid: String,
/// the id to use on the WIX XML.
id: String,
/// the file path.
path: String,
}
/// A resource directory to bundle with WIX.
/// This data structure is needed because WIX requires each path to have its own `id` and `guid`.
#[derive(Serialize)]
struct ResourceDirectory {
/// the directory name of the described resource.
name: String,
/// the files of the described resource directory.
files: Vec<ResourceFile>,
/// the directories that are children of the described resource directory.
directories: Vec<ResourceDirectory>,
}
impl ResourceDirectory {
/// Adds a file to this directory descriptor.
fn add_file(&mut self, file: ResourceFile) {
self.files.push(file);
}
/// Generates the wix XML string to bundle this directory resources recursively
fn get_wix_data(self) -> crate::Result<(String, Vec<String>)> {
let mut files = String::from("");
let mut file_ids = Vec::new();
for file in self.files {
file_ids.push(file.id.clone());
files.push_str(
format!(
r#"<Component Id="{id}" Guid="{guid}" Win64="$(var.Win64)" KeyPath="yes"><File Id="PathFile_{id}" Source="{path}" /></Component>"#,
id = file.id,
guid = file.guid,
path = file.path
).as_str()
);
}
let mut directories = String::from("");
for directory in self.directories {
let (wix_string, ids) = directory.get_wix_data()?;
for id in ids {
file_ids.push(id)
}
directories.push_str(wix_string.as_str());
}
let wix_string = format!(
r#"<Directory Id="{name}" Name="{name}">{contents}</Directory>"#,
name = self.name,
contents = format!("{}{}", files, directories)
);
Ok((wix_string, file_ids))
}
}
/// Copies the icons to the binary path, under the `resources` folder,
/// and returns the path to that directory.
fn copy_icons(settings: &Settings) -> crate::Result<PathBuf> {
let base_dir = settings.project_out_directory();
let resource_dir = base_dir.join("resources");
let mut image_path = PathBuf::from(settings.project_out_directory());
// pop off till in tauri_src dir
image_path.pop();
image_path.pop();
// get icon dir and icon file.
let image_path = image_path.join("icons");
let opts = super::path_utils::Options::default();
copy(
image_path,
&resource_dir,
&Options {
copy_files: true,
overwrite: true,
..opts
},
)?;
Ok(resource_dir)
}
/// Function used to download Wix and VC_REDIST. Checks SHA256 to verify the download.
fn download_and_verify(url: &str, hash: &str) -> crate::Result<Vec<u8>> {
common::print_info(format!("Downloading {}", url).as_str())?;
let response = attohttpc::get(url).send()?;
let data: Vec<u8> = response.bytes()?;
common::print_info("validating hash")?;
let mut hasher = sha2::Sha256::new();
hasher.update(&data);
let url_hash = hasher.finalize().to_vec();
let expected_hash = hex::decode(hash)?;
if expected_hash == url_hash {
Ok(data)
} else {
Err(crate::Error::HashError)
}
}
/// The installer directory of the app.
fn app_installer_dir(settings: &Settings) -> crate::Result<PathBuf> {
let arch = match settings.binary_arch() {
"x86" => "x86",
"x86_64" => "x64",
target => {
return Err(crate::Error::ArchError(format!(
"Unsupported architecture: {}",
target
)))
}
};
let package_base_name = format!(
"{}_{}_{}",
settings.main_binary_name().replace(".exe", ""),
settings.version_string(),
arch
);
Ok(
settings
.project_out_directory()
.to_path_buf()
.join(format!("bundle/msi/{}.msi", package_base_name)),
)
}
/// Extracts the zips from Wix and VC_REDIST into a useable path.
fn extract_zip(data: &Vec<u8>, path: &Path) -> crate::Result<()> {
let cursor = Cursor::new(data);
let mut zipa = ZipArchive::new(cursor)?;
for i in 0..zipa.len() {
let mut file = zipa.by_index(i)?;
let dest_path = path.join(file.name());
let parent = dest_path.parent().expect("Failed to get parent");
if!parent.exists() {
create_dir_all(parent)?;
}
let mut buff: Vec<u8> = Vec::new();
file.read_to_end(&mut buff)?;
let mut fileout = File::create(dest_path).expect("Failed to open file");
fileout.write_all(&buff)?;
}
Ok(())
}
/// Generates the UUID for the Wix template.
fn generate_package_guid(settings: &Settings) -> Uuid {
generate_guid(settings.bundle_identifier().as_bytes())
}
/// Generates a GUID.
fn generate_guid(key: &[u8]) -> Uuid {
let namespace = Uuid::from_bytes(UUID_NAMESPACE);
Uuid::new_v5(&namespace, key)
}
// Specifically goes and gets Wix and verifies the download via Sha256
pub fn get_and_extract_wix(path: &Path) -> crate::Result<()> {
common::print_info("Verifying wix package")?;
let data = download_and_verify(WIX_URL, WIX_SHA256)?;
common::print_info("extracting WIX")?;
extract_zip(&data, path)
}
// For if bundler needs DLL files.
// fn run_heat_exe(
// wix_toolset_path: &Path,
// build_path: &Path,
// harvest_dir: &Path,
// platform: &str,
// ) -> Result<(), String> {
// let mut args = vec!["dir"];
// let harvest_str = harvest_dir.display().to_string();
// args.push(&harvest_str);
// args.push("-platform");
// args.push(platform);
// args.push("-cg");
// args.push("AppFiles");
// args.push("-dr");
// args.push("APPLICATIONFOLDER");
// args.push("-gg");
// args.push("-srd");
// args.push("-out");
// args.push("appdir.wxs");
// args.push("-var");
// args.push("var.SourceDir");
// let heat_exe = wix_toolset_path.join("heat.exe");
// let mut cmd = Command::new(&heat_exe)
// .args(&args)
// .stdout(Stdio::piped())
// .current_dir(build_path)
// .spawn()
// .expect("error running heat.exe");
// {
// let stdout = cmd.stdout.as_mut().unwrap();
// let reader = BufReader::new(stdout);
// for line in reader.lines() {
// info!(logger, "{}", line.unwrap());
// }
// }
// let status = cmd.wait().unwrap();
// if status.success() {
// Ok(())
// } else {
// Err("error running heat.exe".to_string())
// }
// }
/// Runs the Candle.exe executable for Wix. Candle parses the wxs file and generates the code for building the installer.
fn run_candle(
settings: &Settings,
wix_toolset_path: &Path,
build_path: &Path,
wxs_file_name: &str,
) -> crate::Result<()> {
let arch = match settings.binary_arch() {
"x86_64" => "x64",
"x86" => "x86",
target => {
return Err(crate::Error::ArchError(format!(
"unsupported target: {}",
target
)))
}
};
let main_binary = settings
.binaries()
.iter()
.find(|bin| bin.main())
.ok_or_else(|| anyhow::anyhow!("Failed to get main binary"))?;
let args = vec![
"-arch".to_string(),
arch.to_string(),
wxs_file_name.to_string(),
format!(
"-dSourceDir={}",
settings.binary_path(main_binary).display()
),
];
let candle_exe = wix_toolset_path.join("candle.exe");
common::print_info(format!("running candle for {}", wxs_file_name).as_str())?;
let mut cmd = Command::new(&candle_exe);
cmd
.args(&args)
.stdout(Stdio::piped())
.current_dir(build_path);
common::print_info("running candle.exe")?;
common::execute_with_output(&mut cmd).map_err(|_| crate::Error::CandleError)
}
/// Runs the Light.exe file. Light takes the generated code from Candle and produces an MSI Installer.
fn run_light(
wix_toolset_path: &Path,
build_path: &Path,
wixobjs: &[&str],
output_path: &Path,
) -> crate::Result<PathBuf> {
let light_exe = wix_toolset_path.join("light.exe");
let mut args: Vec<String> = vec![
"-ext".to_string(),
"WixUIExtension".to_string(),
"-o".to_string(),
output_path.display().to_string(),
];
for p in wixobjs {
args.push(p.to_string());
}
let mut cmd = Command::new(&light_exe);
cmd
.args(&args)
.stdout(Stdio::piped())
.current_dir(build_path);
common::print_info(format!("running light to produce {}", output_path.display()).as_str())?;
common::execute_with_output(&mut cmd)
.map(|_| output_path.to_path_buf())
.map_err(|_| crate::Error::LightError)
}
// fn get_icon_data() -> crate::Result<()> {
// Ok(())
// }
// Entry point for bundling and creating the MSI installer. For now the only supported platform is Windows x64.
pub fn build_wix_app_installer(
settings: &Settings,
wix_toolset_path: &Path,
) -> crate::Result<PathBuf> {
let arch = match settings.binary_arch() {
"x86_64" => "x64",
"x86" => "x86",
target => {
return Err(crate::Error::ArchError(format!(
"unsupported target: {}",
target
)))
}
};
// target only supports x64.
common::print_info(format!("Target: {}", arch).as_str())?;
let output_path = settings
.project_out_directory()
.join("bundle/msi")
.join(arch);
let mut data = BTreeMap::new();
if let Ok(tauri_config) = crate::bundle::tauri_config::get() {
data.insert(
"embedded_server",
to_json(tauri_config.tauri.embedded_server.active),
);
}
data.insert("product_name", to_json(settings.bundle_name()));
data.insert("version", to_json(settings.version_string()));
let manufacturer = settings.bundle_identifier().to_string();
data.insert("manufacturer", to_json(manufacturer.as_str()));
let upgrade_code = Uuid::new_v5(
&Uuid::NAMESPACE_DNS,
format!("{}.app.x64", &settings.main_binary_name()).as_bytes(),
)
.to_string();
data.insert("upgrade_code", to_json(&upgrade_code.as_str()));
let path_guid = generate_package_guid(settings).to_string();
data.insert("path_component_guid", to_json(&path_guid.as_str()));
let shortcut_guid = generate_package_guid(settings).to_string();
data.insert("shortcut_guid", to_json(&shortcut_guid.as_str()));
let app_exe_name = settings.main_binary_name().to_string();
data.insert("app_exe_name", to_json(&app_exe_name));
let binaries = generate_binaries_data(&settings)?;
let binaries_json = to_json(&binaries);
data.insert("binaries", binaries_json);
let resources = generate_resource_data(&settings)?;
let mut resources_wix_string = String::from("");
let mut files_ids = Vec::new();
for (_, dir) in resources {
let (wix_string, ids) = dir.get_wix_data()?;
resources_wix_string.push_str(wix_string.as_str());
for id in ids {
files_ids.push(id);
}
}
data.insert("resources", to_json(resources_wix_string));
data.insert("resource_file_ids", to_json(files_ids));
let main_binary = settings
.binaries()
.iter()
.find(|bin| bin.main())
.ok_or_else(|| anyhow::anyhow!("Failed to get main binary"))?;
let app_exe_source = settings.binary_path(main_binary).display().to_string();
data.insert("app_exe_source", to_json(&app_exe_source));
// copy icons from icons folder to resource folder near msi
let image_path = copy_icons(&settings)?;
let path = image_path.join("icon.ico").display().to_string();
data.insert("icon_path", to_json(path.as_str()));
let temp = HANDLEBARS.render("main.wxs", &data)?;
if output_path.exists() {
remove_dir_all(&output_path).or_else(|e| Err(e))?;
}
create_dir_all(&output_path).or_else(|e| Err(e))?;
let main_wxs_path = output_path.join("main.wxs");
write(&main_wxs_path, temp).or_else(|e| Err(e))?;
let input_basenames = vec!["main"];
for basename in &input_basenames {
let wxs = format!("{}.wxs", basename);
run_candle(settings, &wix_toolset_path, &output_path, &wxs)?;
}
let wixobjs = vec!["main.wixobj"];
let target = run_light(
&wix_toolset_path,
&output_path,
&wixobjs,
&app_installer_dir(settings)?,
)?;
Ok(target)
}
/// Generates the data required for the external binaries and extra binaries bundling.
fn generate_binaries_data(settings: &Settings) -> crate::Result<Vec<Binary>> {
let mut binaries = Vec::new();
let regex = Regex::new(r"[^\w\d\.]")?;
let cwd = std::env::current_dir()?;
for src in settings.external_binaries() {
let src = src?;
let filename = src
.file_name()
.expect("failed to extract external binary filename")
.to_os_string()
.into_string()
.expect("failed to convert external binary filename to string");
let guid = generate_guid(filename.as_bytes()).to_string();
binaries.push(Binary {
guid,
path: cwd
.join(src)
.into_os_string()
.into_string()
.expect("failed to read external binary path"),
id: regex.replace_all(&filename, "").to_string(),
});
}
for bin in settings.binaries() {
let filename = bin.name();
let guid = generate_guid(filename.as_bytes()).to_string();
if!bin.main() {
binaries.push(Binary {
guid,
path: settings
.binary_path(bin)
.into_os_string()
.into_string()
.expect("failed to read binary path"),
id: regex.replace_all(&filename, "").to_string(),
})
}
}
Ok(binaries)
}
/// Generates the data required for the resource bundling on wix
fn generate_resource_data(settings: &Settings) -> crate::Result<ResourceMap> {
let mut resources = ResourceMap::new();
let regex = Regex::new(r"[^\w\d\.]")?;
let cwd = std::env::current_dir()?;
for src in settings.resource_files() {
let src = src?;
let filename = src
.file_name()
.expect("failed to extract resource filename")
.to_os_string()
.into_string()
.expect("failed to convert resource filename to string");
let resource_path = cwd
.join(src.clone())
.into_os_string()
.into_string()
.expect("failed to read resource path");
let resource_entry = ResourceFile {
guid: generate_guid(filename.as_bytes()).to_string(),
path: resource_path,
id: regex.replace_all(&filename, "").to_string(),
};
// split the resource path directories
let mut directories = src
.components()
.filter(|component| {
let comp = component.as_os_str();
comp!= "." && comp!= ".."
})
.collect::<Vec<_>>();
directories.truncate(directories.len() - 1);
// transform the directory structure to a chained vec structure
for directory in directories {
let directory_name = directory
.as_os_str()
.to_os_string()
.into_string()
.expect("failed to read resource folder name");
// if the directory is already on the map
if resources.contains_key(&directory_name) {
let directory_entry = &mut resources
.get_mut(&directory_name)
.expect("Unable to handle resources");
if directory_entry.name == directory_name {
// the directory entry is the root of the chain
directory_entry.add_file(resource_entry.clone());
} else {
let index = directory_entry
.directories
.iter()
.position(|f| f.name == directory_name);
if index.is_some() {
// the directory entry is already a part of the chain
let dir = directory_entry
.directories
.get_mut(index.expect("Unable to get index"))
.expect("Unable to get directory");
dir.add_file(resource_entry.clone());
} else {
// push it to the chain
directory_entry.directories.push(ResourceDirectory {
name: directory_name.clone(),
directories: vec![],
files: vec![resource_entry.clone()],
});
}
}
} else {
resources.insert(
directory_name.clone(),
ResourceDirectory {
name: directory_name.clone(),
directories: vec![],
files: vec![resource_entry.clone()],
},
);
}
}
}
Ok(resources)
}
| Binary | identifier_name |
wix.rs | use super::common;
use super::path_utils::{copy, Options};
use super::settings::Settings;
use handlebars::{to_json, Handlebars};
use lazy_static::lazy_static;
use regex::Regex;
use serde::Serialize;
use sha2::Digest;
use uuid::Uuid;
use zip::ZipArchive;
use std::collections::BTreeMap;
use std::fs::{create_dir_all, remove_dir_all, write, File};
use std::io::{Cursor, Read, Write};
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
// URLS for the WIX toolchain. Can be used for crossplatform compilation.
pub const WIX_URL: &str =
"https://github.com/wixtoolset/wix3/releases/download/wix3112rtm/wix311-binaries.zip";
pub const WIX_SHA256: &str = "2c1888d5d1dba377fc7fa14444cf556963747ff9a0a289a3599cf09da03b9e2e";
// For Cross Platform Complilation.
// const VC_REDIST_X86_URL: &str =
// "https://download.visualstudio.microsoft.com/download/pr/c8edbb87-c7ec-4500-a461-71e8912d25e9/99ba493d660597490cbb8b3211d2cae4/vc_redist.x86.exe";
// const VC_REDIST_X86_SHA256: &str =
// "3a43e8a55a3f3e4b73d01872c16d47a19dd825756784f4580187309e7d1fcb74";
// const VC_REDIST_X64_URL: &str =
// "https://download.visualstudio.microsoft.com/download/pr/9e04d214-5a9d-4515-9960-3d71398d98c3/1e1e62ab57bbb4bf5199e8ce88f040be/vc_redist.x64.exe";
// const VC_REDIST_X64_SHA256: &str =
// "d6cd2445f68815fe02489fafe0127819e44851e26dfbe702612bc0d223cbbc2b";
// A v4 UUID that was generated specifically for tauri-bundler, to be used as a
// namespace for generating v5 UUIDs from bundle identifier strings.
const UUID_NAMESPACE: [u8; 16] = [
0xfd, 0x85, 0x95, 0xa8, 0x17, 0xa3, 0x47, 0x4e, 0xa6, 0x16, 0x76, 0x14, 0x8d, 0xfa, 0x0c, 0x7b,
];
// setup for the main.wxs template file using handlebars. Dynamically changes the template on compilation based on the application metadata.
lazy_static! {
static ref HANDLEBARS: Handlebars<'static> = {
let mut handlebars = Handlebars::new();
handlebars
.register_template_string("main.wxs", include_str!("templates/main.wxs"))
.or_else(|e| Err(e.to_string()))
.expect("Failed to setup handlebar template");
handlebars
};
}
/// Mapper between a resource directory name and its ResourceDirectory descriptor.
type ResourceMap = BTreeMap<String, ResourceDirectory>;
/// A binary to bundle with WIX.
/// External binaries or additional project binaries are represented with this data structure.
/// This data structure is needed because WIX requires each path to have its own `id` and `guid`.
#[derive(Serialize)]
struct Binary {
/// the GUID to use on the WIX XML.
guid: String,
/// the id to use on the WIX XML.
id: String,
/// the binary path.
path: String,
}
/// A Resource file to bundle with WIX.
/// This data structure is needed because WIX requires each path to have its own `id` and `guid`.
#[derive(Serialize, Clone)]
struct ResourceFile {
/// the GUID to use on the WIX XML.
guid: String,
/// the id to use on the WIX XML.
id: String,
/// the file path.
path: String,
}
/// A resource directory to bundle with WIX.
/// This data structure is needed because WIX requires each path to have its own `id` and `guid`.
#[derive(Serialize)]
struct ResourceDirectory {
/// the directory name of the described resource.
name: String,
/// the files of the described resource directory.
files: Vec<ResourceFile>,
/// the directories that are children of the described resource directory.
directories: Vec<ResourceDirectory>,
}
impl ResourceDirectory {
/// Adds a file to this directory descriptor.
fn add_file(&mut self, file: ResourceFile) {
self.files.push(file);
}
/// Generates the wix XML string to bundle this directory resources recursively
fn get_wix_data(self) -> crate::Result<(String, Vec<String>)> {
let mut files = String::from("");
let mut file_ids = Vec::new();
for file in self.files {
file_ids.push(file.id.clone());
files.push_str(
format!(
r#"<Component Id="{id}" Guid="{guid}" Win64="$(var.Win64)" KeyPath="yes"><File Id="PathFile_{id}" Source="{path}" /></Component>"#,
id = file.id,
guid = file.guid,
path = file.path
).as_str()
);
}
let mut directories = String::from("");
for directory in self.directories {
let (wix_string, ids) = directory.get_wix_data()?;
for id in ids {
file_ids.push(id)
}
directories.push_str(wix_string.as_str());
}
let wix_string = format!(
r#"<Directory Id="{name}" Name="{name}">{contents}</Directory>"#,
name = self.name,
contents = format!("{}{}", files, directories)
);
Ok((wix_string, file_ids))
}
}
/// Copies the icons to the binary path, under the `resources` folder,
/// and returns the path to that directory.
fn copy_icons(settings: &Settings) -> crate::Result<PathBuf> {
let base_dir = settings.project_out_directory();
let resource_dir = base_dir.join("resources");
let mut image_path = PathBuf::from(settings.project_out_directory());
// pop off till in tauri_src dir
image_path.pop();
image_path.pop();
// get icon dir and icon file.
let image_path = image_path.join("icons");
let opts = super::path_utils::Options::default();
copy(
image_path,
&resource_dir,
&Options {
copy_files: true,
overwrite: true,
..opts
},
)?;
Ok(resource_dir)
}
/// Function used to download Wix and VC_REDIST. Checks SHA256 to verify the download.
fn download_and_verify(url: &str, hash: &str) -> crate::Result<Vec<u8>> {
common::print_info(format!("Downloading {}", url).as_str())?;
let response = attohttpc::get(url).send()?;
let data: Vec<u8> = response.bytes()?;
common::print_info("validating hash")?;
let mut hasher = sha2::Sha256::new();
hasher.update(&data);
let url_hash = hasher.finalize().to_vec();
let expected_hash = hex::decode(hash)?;
if expected_hash == url_hash {
Ok(data)
} else {
Err(crate::Error::HashError)
}
}
/// The installer directory of the app.
fn app_installer_dir(settings: &Settings) -> crate::Result<PathBuf> {
let arch = match settings.binary_arch() {
"x86" => "x86",
"x86_64" => "x64",
target => {
return Err(crate::Error::ArchError(format!(
"Unsupported architecture: {}",
target
)))
}
};
let package_base_name = format!(
"{}_{}_{}",
settings.main_binary_name().replace(".exe", ""),
settings.version_string(),
arch
);
Ok(
settings
.project_out_directory()
.to_path_buf()
.join(format!("bundle/msi/{}.msi", package_base_name)),
)
}
/// Extracts the zips from Wix and VC_REDIST into a useable path.
fn extract_zip(data: &Vec<u8>, path: &Path) -> crate::Result<()> {
let cursor = Cursor::new(data);
let mut zipa = ZipArchive::new(cursor)?;
for i in 0..zipa.len() {
let mut file = zipa.by_index(i)?;
let dest_path = path.join(file.name());
let parent = dest_path.parent().expect("Failed to get parent");
if!parent.exists() {
create_dir_all(parent)?;
}
let mut buff: Vec<u8> = Vec::new();
file.read_to_end(&mut buff)?;
let mut fileout = File::create(dest_path).expect("Failed to open file");
fileout.write_all(&buff)?;
}
Ok(())
}
/// Generates the UUID for the Wix template.
fn generate_package_guid(settings: &Settings) -> Uuid {
generate_guid(settings.bundle_identifier().as_bytes())
}
/// Generates a GUID.
fn generate_guid(key: &[u8]) -> Uuid {
let namespace = Uuid::from_bytes(UUID_NAMESPACE);
Uuid::new_v5(&namespace, key)
}
// Specifically goes and gets Wix and verifies the download via Sha256
pub fn get_and_extract_wix(path: &Path) -> crate::Result<()> {
common::print_info("Verifying wix package")?;
let data = download_and_verify(WIX_URL, WIX_SHA256)?;
common::print_info("extracting WIX")?;
extract_zip(&data, path)
}
// For if bundler needs DLL files.
// fn run_heat_exe(
// wix_toolset_path: &Path,
// build_path: &Path,
// harvest_dir: &Path,
// platform: &str,
// ) -> Result<(), String> {
// let mut args = vec!["dir"];
// let harvest_str = harvest_dir.display().to_string();
// args.push(&harvest_str);
// args.push("-platform");
// args.push(platform);
// args.push("-cg");
// args.push("AppFiles");
// args.push("-dr");
// args.push("APPLICATIONFOLDER");
// args.push("-gg");
// args.push("-srd");
// args.push("-out");
// args.push("appdir.wxs");
// args.push("-var");
// args.push("var.SourceDir");
// let heat_exe = wix_toolset_path.join("heat.exe");
// let mut cmd = Command::new(&heat_exe)
// .args(&args)
// .stdout(Stdio::piped())
// .current_dir(build_path)
// .spawn()
// .expect("error running heat.exe");
// {
// let stdout = cmd.stdout.as_mut().unwrap();
// let reader = BufReader::new(stdout);
// for line in reader.lines() {
// info!(logger, "{}", line.unwrap());
// }
// }
// let status = cmd.wait().unwrap();
// if status.success() {
// Ok(())
// } else {
// Err("error running heat.exe".to_string())
// }
// }
/// Runs the Candle.exe executable for Wix. Candle parses the wxs file and generates the code for building the installer.
fn run_candle(
settings: &Settings,
wix_toolset_path: &Path,
build_path: &Path,
wxs_file_name: &str,
) -> crate::Result<()> | arch.to_string(),
wxs_file_name.to_string(),
format!(
"-dSourceDir={}",
settings.binary_path(main_binary).display()
),
];
let candle_exe = wix_toolset_path.join("candle.exe");
common::print_info(format!("running candle for {}", wxs_file_name).as_str())?;
let mut cmd = Command::new(&candle_exe);
cmd
.args(&args)
.stdout(Stdio::piped())
.current_dir(build_path);
common::print_info("running candle.exe")?;
common::execute_with_output(&mut cmd).map_err(|_| crate::Error::CandleError)
}
/// Runs the Light.exe file. Light takes the generated code from Candle and produces an MSI Installer.
fn run_light(
wix_toolset_path: &Path,
build_path: &Path,
wixobjs: &[&str],
output_path: &Path,
) -> crate::Result<PathBuf> {
let light_exe = wix_toolset_path.join("light.exe");
let mut args: Vec<String> = vec![
"-ext".to_string(),
"WixUIExtension".to_string(),
"-o".to_string(),
output_path.display().to_string(),
];
for p in wixobjs {
args.push(p.to_string());
}
let mut cmd = Command::new(&light_exe);
cmd
.args(&args)
.stdout(Stdio::piped())
.current_dir(build_path);
common::print_info(format!("running light to produce {}", output_path.display()).as_str())?;
common::execute_with_output(&mut cmd)
.map(|_| output_path.to_path_buf())
.map_err(|_| crate::Error::LightError)
}
// fn get_icon_data() -> crate::Result<()> {
// Ok(())
// }
// Entry point for bundling and creating the MSI installer. For now the only supported platform is Windows x64.
pub fn build_wix_app_installer(
settings: &Settings,
wix_toolset_path: &Path,
) -> crate::Result<PathBuf> {
let arch = match settings.binary_arch() {
"x86_64" => "x64",
"x86" => "x86",
target => {
return Err(crate::Error::ArchError(format!(
"unsupported target: {}",
target
)))
}
};
// target only supports x64.
common::print_info(format!("Target: {}", arch).as_str())?;
let output_path = settings
.project_out_directory()
.join("bundle/msi")
.join(arch);
let mut data = BTreeMap::new();
if let Ok(tauri_config) = crate::bundle::tauri_config::get() {
data.insert(
"embedded_server",
to_json(tauri_config.tauri.embedded_server.active),
);
}
data.insert("product_name", to_json(settings.bundle_name()));
data.insert("version", to_json(settings.version_string()));
let manufacturer = settings.bundle_identifier().to_string();
data.insert("manufacturer", to_json(manufacturer.as_str()));
let upgrade_code = Uuid::new_v5(
&Uuid::NAMESPACE_DNS,
format!("{}.app.x64", &settings.main_binary_name()).as_bytes(),
)
.to_string();
data.insert("upgrade_code", to_json(&upgrade_code.as_str()));
let path_guid = generate_package_guid(settings).to_string();
data.insert("path_component_guid", to_json(&path_guid.as_str()));
let shortcut_guid = generate_package_guid(settings).to_string();
data.insert("shortcut_guid", to_json(&shortcut_guid.as_str()));
let app_exe_name = settings.main_binary_name().to_string();
data.insert("app_exe_name", to_json(&app_exe_name));
let binaries = generate_binaries_data(&settings)?;
let binaries_json = to_json(&binaries);
data.insert("binaries", binaries_json);
let resources = generate_resource_data(&settings)?;
let mut resources_wix_string = String::from("");
let mut files_ids = Vec::new();
for (_, dir) in resources {
let (wix_string, ids) = dir.get_wix_data()?;
resources_wix_string.push_str(wix_string.as_str());
for id in ids {
files_ids.push(id);
}
}
data.insert("resources", to_json(resources_wix_string));
data.insert("resource_file_ids", to_json(files_ids));
let main_binary = settings
.binaries()
.iter()
.find(|bin| bin.main())
.ok_or_else(|| anyhow::anyhow!("Failed to get main binary"))?;
let app_exe_source = settings.binary_path(main_binary).display().to_string();
data.insert("app_exe_source", to_json(&app_exe_source));
// copy icons from icons folder to resource folder near msi
let image_path = copy_icons(&settings)?;
let path = image_path.join("icon.ico").display().to_string();
data.insert("icon_path", to_json(path.as_str()));
let temp = HANDLEBARS.render("main.wxs", &data)?;
if output_path.exists() {
remove_dir_all(&output_path).or_else(|e| Err(e))?;
}
create_dir_all(&output_path).or_else(|e| Err(e))?;
let main_wxs_path = output_path.join("main.wxs");
write(&main_wxs_path, temp).or_else(|e| Err(e))?;
let input_basenames = vec!["main"];
for basename in &input_basenames {
let wxs = format!("{}.wxs", basename);
run_candle(settings, &wix_toolset_path, &output_path, &wxs)?;
}
let wixobjs = vec!["main.wixobj"];
let target = run_light(
&wix_toolset_path,
&output_path,
&wixobjs,
&app_installer_dir(settings)?,
)?;
Ok(target)
}
/// Generates the data required for the external binaries and extra binaries bundling.
fn generate_binaries_data(settings: &Settings) -> crate::Result<Vec<Binary>> {
let mut binaries = Vec::new();
let regex = Regex::new(r"[^\w\d\.]")?;
let cwd = std::env::current_dir()?;
for src in settings.external_binaries() {
let src = src?;
let filename = src
.file_name()
.expect("failed to extract external binary filename")
.to_os_string()
.into_string()
.expect("failed to convert external binary filename to string");
let guid = generate_guid(filename.as_bytes()).to_string();
binaries.push(Binary {
guid,
path: cwd
.join(src)
.into_os_string()
.into_string()
.expect("failed to read external binary path"),
id: regex.replace_all(&filename, "").to_string(),
});
}
for bin in settings.binaries() {
let filename = bin.name();
let guid = generate_guid(filename.as_bytes()).to_string();
if!bin.main() {
binaries.push(Binary {
guid,
path: settings
.binary_path(bin)
.into_os_string()
.into_string()
.expect("failed to read binary path"),
id: regex.replace_all(&filename, "").to_string(),
})
}
}
Ok(binaries)
}
/// Generates the data required for the resource bundling on wix
fn generate_resource_data(settings: &Settings) -> crate::Result<ResourceMap> {
let mut resources = ResourceMap::new();
let regex = Regex::new(r"[^\w\d\.]")?;
let cwd = std::env::current_dir()?;
for src in settings.resource_files() {
let src = src?;
let filename = src
.file_name()
.expect("failed to extract resource filename")
.to_os_string()
.into_string()
.expect("failed to convert resource filename to string");
let resource_path = cwd
.join(src.clone())
.into_os_string()
.into_string()
.expect("failed to read resource path");
let resource_entry = ResourceFile {
guid: generate_guid(filename.as_bytes()).to_string(),
path: resource_path,
id: regex.replace_all(&filename, "").to_string(),
};
// split the resource path directories
let mut directories = src
.components()
.filter(|component| {
let comp = component.as_os_str();
comp!= "." && comp!= ".."
})
.collect::<Vec<_>>();
directories.truncate(directories.len() - 1);
// transform the directory structure to a chained vec structure
for directory in directories {
let directory_name = directory
.as_os_str()
.to_os_string()
.into_string()
.expect("failed to read resource folder name");
// if the directory is already on the map
if resources.contains_key(&directory_name) {
let directory_entry = &mut resources
.get_mut(&directory_name)
.expect("Unable to handle resources");
if directory_entry.name == directory_name {
// the directory entry is the root of the chain
directory_entry.add_file(resource_entry.clone());
} else {
let index = directory_entry
.directories
.iter()
.position(|f| f.name == directory_name);
if index.is_some() {
// the directory entry is already a part of the chain
let dir = directory_entry
.directories
.get_mut(index.expect("Unable to get index"))
.expect("Unable to get directory");
dir.add_file(resource_entry.clone());
} else {
// push it to the chain
directory_entry.directories.push(ResourceDirectory {
name: directory_name.clone(),
directories: vec![],
files: vec![resource_entry.clone()],
});
}
}
} else {
resources.insert(
directory_name.clone(),
ResourceDirectory {
name: directory_name.clone(),
directories: vec![],
files: vec![resource_entry.clone()],
},
);
}
}
}
Ok(resources)
}
| {
let arch = match settings.binary_arch() {
"x86_64" => "x64",
"x86" => "x86",
target => {
return Err(crate::Error::ArchError(format!(
"unsupported target: {}",
target
)))
}
};
let main_binary = settings
.binaries()
.iter()
.find(|bin| bin.main())
.ok_or_else(|| anyhow::anyhow!("Failed to get main binary"))?;
let args = vec![
"-arch".to_string(), | identifier_body |
wix.rs | use super::common;
use super::path_utils::{copy, Options};
use super::settings::Settings;
use handlebars::{to_json, Handlebars};
use lazy_static::lazy_static;
use regex::Regex;
use serde::Serialize;
use sha2::Digest;
use uuid::Uuid;
use zip::ZipArchive;
use std::collections::BTreeMap;
use std::fs::{create_dir_all, remove_dir_all, write, File};
use std::io::{Cursor, Read, Write};
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
// URLS for the WIX toolchain. Can be used for crossplatform compilation.
pub const WIX_URL: &str =
"https://github.com/wixtoolset/wix3/releases/download/wix3112rtm/wix311-binaries.zip";
pub const WIX_SHA256: &str = "2c1888d5d1dba377fc7fa14444cf556963747ff9a0a289a3599cf09da03b9e2e";
// For Cross Platform Complilation.
// const VC_REDIST_X86_URL: &str =
// "https://download.visualstudio.microsoft.com/download/pr/c8edbb87-c7ec-4500-a461-71e8912d25e9/99ba493d660597490cbb8b3211d2cae4/vc_redist.x86.exe";
// const VC_REDIST_X86_SHA256: &str =
// "3a43e8a55a3f3e4b73d01872c16d47a19dd825756784f4580187309e7d1fcb74";
// const VC_REDIST_X64_URL: &str =
// "https://download.visualstudio.microsoft.com/download/pr/9e04d214-5a9d-4515-9960-3d71398d98c3/1e1e62ab57bbb4bf5199e8ce88f040be/vc_redist.x64.exe";
// const VC_REDIST_X64_SHA256: &str =
// "d6cd2445f68815fe02489fafe0127819e44851e26dfbe702612bc0d223cbbc2b";
// A v4 UUID that was generated specifically for tauri-bundler, to be used as a
// namespace for generating v5 UUIDs from bundle identifier strings.
const UUID_NAMESPACE: [u8; 16] = [
0xfd, 0x85, 0x95, 0xa8, 0x17, 0xa3, 0x47, 0x4e, 0xa6, 0x16, 0x76, 0x14, 0x8d, 0xfa, 0x0c, 0x7b,
];
// setup for the main.wxs template file using handlebars. Dynamically changes the template on compilation based on the application metadata.
lazy_static! {
static ref HANDLEBARS: Handlebars<'static> = {
let mut handlebars = Handlebars::new();
handlebars
.register_template_string("main.wxs", include_str!("templates/main.wxs"))
.or_else(|e| Err(e.to_string()))
.expect("Failed to setup handlebar template");
handlebars
};
}
/// Mapper between a resource directory name and its ResourceDirectory descriptor.
type ResourceMap = BTreeMap<String, ResourceDirectory>;
/// A binary to bundle with WIX.
/// External binaries or additional project binaries are represented with this data structure.
/// This data structure is needed because WIX requires each path to have its own `id` and `guid`.
#[derive(Serialize)]
struct Binary {
/// the GUID to use on the WIX XML.
guid: String,
/// the id to use on the WIX XML.
id: String,
/// the binary path.
path: String,
}
/// A Resource file to bundle with WIX.
/// This data structure is needed because WIX requires each path to have its own `id` and `guid`.
#[derive(Serialize, Clone)]
struct ResourceFile {
/// the GUID to use on the WIX XML.
guid: String,
/// the id to use on the WIX XML.
id: String,
/// the file path.
path: String,
}
/// A resource directory to bundle with WIX.
/// This data structure is needed because WIX requires each path to have its own `id` and `guid`.
#[derive(Serialize)]
struct ResourceDirectory {
/// the directory name of the described resource.
name: String,
/// the files of the described resource directory.
files: Vec<ResourceFile>,
/// the directories that are children of the described resource directory.
directories: Vec<ResourceDirectory>,
}
impl ResourceDirectory {
/// Adds a file to this directory descriptor.
fn add_file(&mut self, file: ResourceFile) {
self.files.push(file);
}
/// Generates the wix XML string to bundle this directory resources recursively
fn get_wix_data(self) -> crate::Result<(String, Vec<String>)> {
let mut files = String::from("");
let mut file_ids = Vec::new();
for file in self.files {
file_ids.push(file.id.clone());
files.push_str(
format!(
r#"<Component Id="{id}" Guid="{guid}" Win64="$(var.Win64)" KeyPath="yes"><File Id="PathFile_{id}" Source="{path}" /></Component>"#,
id = file.id,
guid = file.guid,
path = file.path
).as_str()
);
}
let mut directories = String::from("");
for directory in self.directories {
let (wix_string, ids) = directory.get_wix_data()?;
for id in ids {
file_ids.push(id)
}
directories.push_str(wix_string.as_str());
}
let wix_string = format!(
r#"<Directory Id="{name}" Name="{name}">{contents}</Directory>"#,
name = self.name,
contents = format!("{}{}", files, directories)
);
Ok((wix_string, file_ids))
}
}
/// Copies the icons to the binary path, under the `resources` folder,
/// and returns the path to that directory.
fn copy_icons(settings: &Settings) -> crate::Result<PathBuf> {
let base_dir = settings.project_out_directory();
let resource_dir = base_dir.join("resources");
let mut image_path = PathBuf::from(settings.project_out_directory());
// pop off till in tauri_src dir
image_path.pop();
image_path.pop();
// get icon dir and icon file.
let image_path = image_path.join("icons");
let opts = super::path_utils::Options::default();
copy(
image_path,
&resource_dir,
&Options {
copy_files: true,
overwrite: true,
..opts
},
)?;
Ok(resource_dir)
}
/// Function used to download Wix and VC_REDIST. Checks SHA256 to verify the download.
fn download_and_verify(url: &str, hash: &str) -> crate::Result<Vec<u8>> {
common::print_info(format!("Downloading {}", url).as_str())?;
let response = attohttpc::get(url).send()?;
let data: Vec<u8> = response.bytes()?;
common::print_info("validating hash")?;
let mut hasher = sha2::Sha256::new();
hasher.update(&data);
let url_hash = hasher.finalize().to_vec();
let expected_hash = hex::decode(hash)?;
if expected_hash == url_hash {
Ok(data)
} else {
Err(crate::Error::HashError)
}
}
/// The installer directory of the app.
fn app_installer_dir(settings: &Settings) -> crate::Result<PathBuf> {
let arch = match settings.binary_arch() {
"x86" => "x86",
"x86_64" => "x64",
target => {
return Err(crate::Error::ArchError(format!(
"Unsupported architecture: {}",
target
)))
}
};
let package_base_name = format!(
"{}_{}_{}",
settings.main_binary_name().replace(".exe", ""),
settings.version_string(),
arch
);
Ok(
settings
.project_out_directory()
.to_path_buf()
.join(format!("bundle/msi/{}.msi", package_base_name)),
)
}
/// Extracts the zips from Wix and VC_REDIST into a useable path.
fn extract_zip(data: &Vec<u8>, path: &Path) -> crate::Result<()> {
let cursor = Cursor::new(data);
let mut zipa = ZipArchive::new(cursor)?;
for i in 0..zipa.len() {
let mut file = zipa.by_index(i)?;
let dest_path = path.join(file.name());
let parent = dest_path.parent().expect("Failed to get parent");
if!parent.exists() {
create_dir_all(parent)?;
}
let mut buff: Vec<u8> = Vec::new();
file.read_to_end(&mut buff)?;
let mut fileout = File::create(dest_path).expect("Failed to open file");
fileout.write_all(&buff)?;
}
Ok(())
}
/// Generates the UUID for the Wix template.
fn generate_package_guid(settings: &Settings) -> Uuid {
generate_guid(settings.bundle_identifier().as_bytes())
}
/// Generates a GUID.
fn generate_guid(key: &[u8]) -> Uuid {
let namespace = Uuid::from_bytes(UUID_NAMESPACE);
Uuid::new_v5(&namespace, key)
}
// Specifically goes and gets Wix and verifies the download via Sha256
pub fn get_and_extract_wix(path: &Path) -> crate::Result<()> {
common::print_info("Verifying wix package")?;
let data = download_and_verify(WIX_URL, WIX_SHA256)?;
common::print_info("extracting WIX")?;
extract_zip(&data, path)
}
// For if bundler needs DLL files.
// fn run_heat_exe(
// wix_toolset_path: &Path,
// build_path: &Path,
// harvest_dir: &Path,
// platform: &str,
// ) -> Result<(), String> {
// let mut args = vec!["dir"];
// let harvest_str = harvest_dir.display().to_string();
// args.push(&harvest_str);
// args.push("-platform");
// args.push(platform);
// args.push("-cg");
// args.push("AppFiles");
// args.push("-dr");
// args.push("APPLICATIONFOLDER");
// args.push("-gg");
// args.push("-srd");
// args.push("-out");
// args.push("appdir.wxs");
// args.push("-var");
// args.push("var.SourceDir");
// let heat_exe = wix_toolset_path.join("heat.exe");
// let mut cmd = Command::new(&heat_exe)
// .args(&args)
// .stdout(Stdio::piped())
// .current_dir(build_path)
// .spawn()
// .expect("error running heat.exe");
// {
// let stdout = cmd.stdout.as_mut().unwrap();
// let reader = BufReader::new(stdout);
// for line in reader.lines() {
// info!(logger, "{}", line.unwrap());
// }
// }
// let status = cmd.wait().unwrap();
// if status.success() {
// Ok(())
// } else {
// Err("error running heat.exe".to_string())
// }
// }
/// Runs the Candle.exe executable for Wix. Candle parses the wxs file and generates the code for building the installer.
fn run_candle(
settings: &Settings,
wix_toolset_path: &Path,
build_path: &Path,
wxs_file_name: &str,
) -> crate::Result<()> {
let arch = match settings.binary_arch() {
"x86_64" => "x64",
"x86" => "x86",
target => {
return Err(crate::Error::ArchError(format!(
"unsupported target: {}",
target
)))
}
};
let main_binary = settings
.binaries()
.iter()
.find(|bin| bin.main())
.ok_or_else(|| anyhow::anyhow!("Failed to get main binary"))?;
let args = vec![
"-arch".to_string(),
arch.to_string(),
wxs_file_name.to_string(),
format!(
"-dSourceDir={}",
settings.binary_path(main_binary).display()
),
];
let candle_exe = wix_toolset_path.join("candle.exe");
common::print_info(format!("running candle for {}", wxs_file_name).as_str())?;
let mut cmd = Command::new(&candle_exe);
cmd
.args(&args)
.stdout(Stdio::piped())
.current_dir(build_path);
common::print_info("running candle.exe")?;
common::execute_with_output(&mut cmd).map_err(|_| crate::Error::CandleError)
}
/// Runs the Light.exe file. Light takes the generated code from Candle and produces an MSI Installer.
fn run_light(
wix_toolset_path: &Path,
build_path: &Path,
wixobjs: &[&str],
output_path: &Path,
) -> crate::Result<PathBuf> {
let light_exe = wix_toolset_path.join("light.exe");
let mut args: Vec<String> = vec![
"-ext".to_string(),
"WixUIExtension".to_string(),
"-o".to_string(),
output_path.display().to_string(),
];
for p in wixobjs {
args.push(p.to_string());
}
let mut cmd = Command::new(&light_exe);
cmd
.args(&args)
.stdout(Stdio::piped())
.current_dir(build_path);
common::print_info(format!("running light to produce {}", output_path.display()).as_str())?;
common::execute_with_output(&mut cmd)
.map(|_| output_path.to_path_buf())
.map_err(|_| crate::Error::LightError)
}
// fn get_icon_data() -> crate::Result<()> {
// Ok(())
// }
// Entry point for bundling and creating the MSI installer. For now the only supported platform is Windows x64.
pub fn build_wix_app_installer(
settings: &Settings,
wix_toolset_path: &Path,
) -> crate::Result<PathBuf> {
let arch = match settings.binary_arch() {
"x86_64" => "x64",
"x86" => "x86",
target => {
return Err(crate::Error::ArchError(format!(
"unsupported target: {}",
target
)))
}
};
// target only supports x64.
common::print_info(format!("Target: {}", arch).as_str())?;
let output_path = settings
.project_out_directory()
.join("bundle/msi")
.join(arch);
let mut data = BTreeMap::new();
if let Ok(tauri_config) = crate::bundle::tauri_config::get() {
data.insert(
"embedded_server",
to_json(tauri_config.tauri.embedded_server.active),
);
}
data.insert("product_name", to_json(settings.bundle_name()));
data.insert("version", to_json(settings.version_string()));
let manufacturer = settings.bundle_identifier().to_string();
data.insert("manufacturer", to_json(manufacturer.as_str()));
let upgrade_code = Uuid::new_v5(
&Uuid::NAMESPACE_DNS,
format!("{}.app.x64", &settings.main_binary_name()).as_bytes(),
)
.to_string();
data.insert("upgrade_code", to_json(&upgrade_code.as_str()));
let path_guid = generate_package_guid(settings).to_string();
data.insert("path_component_guid", to_json(&path_guid.as_str()));
let shortcut_guid = generate_package_guid(settings).to_string();
data.insert("shortcut_guid", to_json(&shortcut_guid.as_str()));
let app_exe_name = settings.main_binary_name().to_string();
data.insert("app_exe_name", to_json(&app_exe_name));
let binaries = generate_binaries_data(&settings)?;
let binaries_json = to_json(&binaries);
data.insert("binaries", binaries_json);
let resources = generate_resource_data(&settings)?;
let mut resources_wix_string = String::from("");
let mut files_ids = Vec::new();
for (_, dir) in resources {
let (wix_string, ids) = dir.get_wix_data()?;
resources_wix_string.push_str(wix_string.as_str());
for id in ids {
files_ids.push(id);
}
}
data.insert("resources", to_json(resources_wix_string));
data.insert("resource_file_ids", to_json(files_ids));
let main_binary = settings
.binaries()
.iter()
.find(|bin| bin.main())
.ok_or_else(|| anyhow::anyhow!("Failed to get main binary"))?;
let app_exe_source = settings.binary_path(main_binary).display().to_string();
data.insert("app_exe_source", to_json(&app_exe_source));
// copy icons from icons folder to resource folder near msi
let image_path = copy_icons(&settings)?;
let path = image_path.join("icon.ico").display().to_string();
data.insert("icon_path", to_json(path.as_str()));
let temp = HANDLEBARS.render("main.wxs", &data)?;
if output_path.exists() {
remove_dir_all(&output_path).or_else(|e| Err(e))?;
}
create_dir_all(&output_path).or_else(|e| Err(e))?;
let main_wxs_path = output_path.join("main.wxs");
write(&main_wxs_path, temp).or_else(|e| Err(e))?;
let input_basenames = vec!["main"];
for basename in &input_basenames {
let wxs = format!("{}.wxs", basename);
run_candle(settings, &wix_toolset_path, &output_path, &wxs)?;
}
let wixobjs = vec!["main.wixobj"];
let target = run_light(
&wix_toolset_path,
&output_path,
&wixobjs,
&app_installer_dir(settings)?,
)?;
Ok(target)
}
/// Generates the data required for the external binaries and extra binaries bundling.
fn generate_binaries_data(settings: &Settings) -> crate::Result<Vec<Binary>> {
let mut binaries = Vec::new();
let regex = Regex::new(r"[^\w\d\.]")?;
let cwd = std::env::current_dir()?;
for src in settings.external_binaries() {
let src = src?;
let filename = src
.file_name()
.expect("failed to extract external binary filename")
.to_os_string()
.into_string()
.expect("failed to convert external binary filename to string");
let guid = generate_guid(filename.as_bytes()).to_string();
binaries.push(Binary {
guid,
path: cwd
.join(src)
.into_os_string()
.into_string()
.expect("failed to read external binary path"),
id: regex.replace_all(&filename, "").to_string(),
});
}
for bin in settings.binaries() {
let filename = bin.name();
let guid = generate_guid(filename.as_bytes()).to_string();
if!bin.main() {
binaries.push(Binary {
guid,
path: settings
.binary_path(bin)
.into_os_string()
.into_string()
.expect("failed to read binary path"),
id: regex.replace_all(&filename, "").to_string(),
})
}
}
Ok(binaries)
}
/// Generates the data required for the resource bundling on wix
fn generate_resource_data(settings: &Settings) -> crate::Result<ResourceMap> {
let mut resources = ResourceMap::new();
let regex = Regex::new(r"[^\w\d\.]")?;
let cwd = std::env::current_dir()?;
for src in settings.resource_files() {
let src = src?;
let filename = src
.file_name()
.expect("failed to extract resource filename")
.to_os_string()
.into_string()
.expect("failed to convert resource filename to string");
let resource_path = cwd
.join(src.clone())
.into_os_string()
.into_string()
.expect("failed to read resource path");
let resource_entry = ResourceFile {
guid: generate_guid(filename.as_bytes()).to_string(),
path: resource_path,
id: regex.replace_all(&filename, "").to_string(),
};
// split the resource path directories
let mut directories = src
.components()
.filter(|component| {
let comp = component.as_os_str();
comp!= "." && comp!= ".."
})
.collect::<Vec<_>>();
directories.truncate(directories.len() - 1);
// transform the directory structure to a chained vec structure
for directory in directories {
let directory_name = directory
.as_os_str()
.to_os_string()
.into_string()
.expect("failed to read resource folder name");
// if the directory is already on the map
if resources.contains_key(&directory_name) {
let directory_entry = &mut resources
.get_mut(&directory_name)
.expect("Unable to handle resources");
if directory_entry.name == directory_name {
// the directory entry is the root of the chain
directory_entry.add_file(resource_entry.clone());
} else {
let index = directory_entry | .directories
.iter()
.position(|f| f.name == directory_name);
if index.is_some() {
// the directory entry is already a part of the chain
let dir = directory_entry
.directories
.get_mut(index.expect("Unable to get index"))
.expect("Unable to get directory");
dir.add_file(resource_entry.clone());
} else {
// push it to the chain
directory_entry.directories.push(ResourceDirectory {
name: directory_name.clone(),
directories: vec![],
files: vec![resource_entry.clone()],
});
}
}
} else {
resources.insert(
directory_name.clone(),
ResourceDirectory {
name: directory_name.clone(),
directories: vec![],
files: vec![resource_entry.clone()],
},
);
}
}
}
Ok(resources)
} | random_line_split |
|
cursor.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
use crate::io::prelude::*;
use crate::alloc::Allocator;
use crate::cmp;
use crate::io::{self, BorrowedCursor, ErrorKind, IoSlice, IoSliceMut, SeekFrom};
/// A `Cursor` wraps an in-memory buffer and provides it with a
/// [`Seek`] implementation.
///
/// `Cursor`s are used with in-memory buffers, anything implementing
/// <code>[AsRef]<\[u8]></code>, to allow them to implement [`Read`] and/or [`Write`],
/// allowing these buffers to be used anywhere you might use a reader or writer
/// that does actual I/O.
///
/// The standard library implements some I/O traits on various types which
/// are commonly used as a buffer, like <code>Cursor<[Vec]\<u8>></code> and
/// <code>Cursor<[&\[u8\]][bytes]></code>.
///
/// # Examples
///
/// We may want to write bytes to a [`File`] in our production
/// code, but use an in-memory buffer in our tests. We can do this with
/// `Cursor`:
///
/// [bytes]: crate::slice "slice"
/// [`File`]: crate::fs::File
///
/// ```no_run
/// use std::io::prelude::*;
/// use std::io::{self, SeekFrom};
/// use std::fs::File;
///
/// // a library function we've written
/// fn write_ten_bytes_at_end<W: Write + Seek>(writer: &mut W) -> io::Result<()> {
/// writer.seek(SeekFrom::End(-10))?;
///
/// for i in 0..10 {
/// writer.write(&[i])?;
/// }
///
/// // all went well
/// Ok(())
/// }
///
/// # fn foo() -> io::Result<()> {
/// // Here's some code that uses this library function.
/// //
/// // We might want to use a BufReader here for efficiency, but let's
/// // keep this example focused.
/// let mut file = File::create("foo.txt")?;
///
/// write_ten_bytes_at_end(&mut file)?;
/// # Ok(())
/// # }
///
/// // now let's write a test
/// #[test]
/// fn test_writes_bytes() {
/// // setting up a real File is much slower than an in-memory buffer,
/// // let's use a cursor instead
/// use std::io::Cursor;
/// let mut buff = Cursor::new(vec![0; 15]);
///
/// write_ten_bytes_at_end(&mut buff).unwrap();
///
/// assert_eq!(&buff.get_ref()[5..15], &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
/// }
/// ```
#[derive(Debug, Default, Eq, PartialEq)]
pub struct Cursor<T> {
inner: T,
pos: u64,
}
impl<T> Cursor<T> {
/// Creates a new cursor wrapping the provided underlying in-memory buffer.
///
/// Cursor initial position is `0` even if underlying buffer (e.g., [`Vec`])
/// is not empty. So writing to cursor starts with overwriting [`Vec`]
/// content, not with appending to it.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
/// ```
pub const fn new(inner: T) -> Cursor<T> {
Cursor { pos: 0, inner }
}
/// Consumes this cursor, returning the underlying value.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let vec = buff.into_inner();
/// ```
pub fn into_inner(self) -> T {
self.inner
}
/// Gets a reference to the underlying value in this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let reference = buff.get_ref();
/// ```
pub const fn get_ref(&self) -> &T {
&self.inner
}
/// Gets a mutable reference to the underlying value in this cursor.
///
/// Care should be taken to avoid modifying the internal I/O state of the
/// underlying value as it may corrupt this cursor's position.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let reference = buff.get_mut();
/// ```
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
/// Returns the current position of this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
/// use std::io::prelude::*;
/// use std::io::SeekFrom;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.position(), 0);
///
/// buff.seek(SeekFrom::Current(2)).unwrap();
/// assert_eq!(buff.position(), 2);
///
/// buff.seek(SeekFrom::Current(-1)).unwrap();
/// assert_eq!(buff.position(), 1);
/// ```
pub const fn position(&self) -> u64 {
self.pos
}
/// Sets the position of this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.position(), 0);
///
/// buff.set_position(2);
/// assert_eq!(buff.position(), 2);
///
/// buff.set_position(4);
/// assert_eq!(buff.position(), 4);
/// ```
pub fn set_position(&mut self, pos: u64) {
self.pos = pos;
}
}
impl<T> Cursor<T>
where
T: AsRef<[u8]>,
{
/// Returns the remaining slice.
///
/// # Examples
///
/// ```
/// #![feature(cursor_remaining)]
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.remaining_slice(), &[1, 2, 3, 4, 5]);
///
/// buff.set_position(2);
/// assert_eq!(buff.remaining_slice(), &[3, 4, 5]);
///
/// buff.set_position(4);
/// assert_eq!(buff.remaining_slice(), &[5]);
///
/// buff.set_position(6);
/// assert_eq!(buff.remaining_slice(), &[]);
/// ```
pub fn remaining_slice(&self) -> &[u8] {
let len = self.pos.min(self.inner.as_ref().len() as u64);
&self.inner.as_ref()[(len as usize)..]
}
/// Returns `true` if the remaining slice is empty.
///
/// # Examples
///
/// ```
/// #![feature(cursor_remaining)]
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// buff.set_position(2);
/// assert!(!buff.is_empty());
///
/// buff.set_position(5);
/// assert!(buff.is_empty());
///
/// buff.set_position(10);
/// assert!(buff.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.pos >= self.inner.as_ref().len() as u64
}
}
impl<T> Clone for Cursor<T> | T: Clone,
{
#[inline]
fn clone(&self) -> Self {
Cursor { inner: self.inner.clone(), pos: self.pos }
}
#[inline]
fn clone_from(&mut self, other: &Self) {
self.inner.clone_from(&other.inner);
self.pos = other.pos;
}
}
impl<T> io::Seek for Cursor<T>
where
T: AsRef<[u8]>,
{
fn seek(&mut self, style: SeekFrom) -> io::Result<u64> {
let (base_pos, offset) = match style {
SeekFrom::Start(n) => {
self.pos = n;
return Ok(n);
}
SeekFrom::End(n) => (self.inner.as_ref().len() as u64, n),
SeekFrom::Current(n) => (self.pos, n),
};
match base_pos.checked_add_signed(offset) {
Some(n) => {
self.pos = n;
Ok(self.pos)
}
None => Err(io::const_io_error!(
ErrorKind::InvalidInput,
"invalid seek to a negative or overflowing position",
)),
}
}
fn stream_len(&mut self) -> io::Result<u64> {
Ok(self.inner.as_ref().len() as u64)
}
fn stream_position(&mut self) -> io::Result<u64> {
Ok(self.pos)
}
}
impl<T> Read for Cursor<T>
where
T: AsRef<[u8]>,
{
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let n = Read::read(&mut self.remaining_slice(), buf)?;
self.pos += n as u64;
Ok(n)
}
fn read_buf(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
let prev_written = cursor.written();
Read::read_buf(&mut self.fill_buf()?, cursor.reborrow())?;
self.pos += (cursor.written() - prev_written) as u64;
Ok(())
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
let mut nread = 0;
for buf in bufs {
let n = self.read(buf)?;
nread += n;
if n < buf.len() {
break;
}
}
Ok(nread)
}
fn is_read_vectored(&self) -> bool {
true
}
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
let n = buf.len();
Read::read_exact(&mut self.remaining_slice(), buf)?;
self.pos += n as u64;
Ok(())
}
}
impl<T> BufRead for Cursor<T>
where
T: AsRef<[u8]>,
{
fn fill_buf(&mut self) -> io::Result<&[u8]> {
Ok(self.remaining_slice())
}
fn consume(&mut self, amt: usize) {
self.pos += amt as u64;
}
}
// Non-resizing write implementation
#[inline]
fn slice_write(pos_mut: &mut u64, slice: &mut [u8], buf: &[u8]) -> io::Result<usize> {
let pos = cmp::min(*pos_mut, slice.len() as u64);
let amt = (&mut slice[(pos as usize)..]).write(buf)?;
*pos_mut += amt as u64;
Ok(amt)
}
#[inline]
fn slice_write_vectored(
pos_mut: &mut u64,
slice: &mut [u8],
bufs: &[IoSlice<'_>],
) -> io::Result<usize> {
let mut nwritten = 0;
for buf in bufs {
let n = slice_write(pos_mut, slice, buf)?;
nwritten += n;
if n < buf.len() {
break;
}
}
Ok(nwritten)
}
/// Reserves the required space, and pads the vec with 0s if necessary.
fn reserve_and_pad<A: Allocator>(
pos_mut: &mut u64,
vec: &mut Vec<u8, A>,
buf_len: usize,
) -> io::Result<usize> {
let pos: usize = (*pos_mut).try_into().map_err(|_| {
io::const_io_error!(
ErrorKind::InvalidInput,
"cursor position exceeds maximum possible vector length",
)
})?;
// For safety reasons, we don't want these numbers to overflow
// otherwise our allocation won't be enough
let desired_cap = pos.saturating_add(buf_len);
if desired_cap > vec.capacity() {
// We want our vec's total capacity
// to have room for (pos+buf_len) bytes. Reserve allocates
// based on additional elements from the length, so we need to
// reserve the difference
vec.reserve(desired_cap - vec.len());
}
// Pad if pos is above the current len.
if pos > vec.len() {
let diff = pos - vec.len();
// Unfortunately, `resize()` would suffice but the optimiser does not
// realise the `reserve` it does can be eliminated. So we do it manually
// to eliminate that extra branch
let spare = vec.spare_capacity_mut();
debug_assert!(spare.len() >= diff);
// Safety: we have allocated enough capacity for this.
// And we are only writing, not reading
unsafe {
spare.get_unchecked_mut(..diff).fill(core::mem::MaybeUninit::new(0));
vec.set_len(pos);
}
}
Ok(pos)
}
/// Writes the slice to the vec without allocating
/// # Safety: vec must have buf.len() spare capacity
unsafe fn vec_write_unchecked<A>(pos: usize, vec: &mut Vec<u8, A>, buf: &[u8]) -> usize
where
A: Allocator,
{
debug_assert!(vec.capacity() >= pos + buf.len());
vec.as_mut_ptr().add(pos).copy_from(buf.as_ptr(), buf.len());
pos + buf.len()
}
/// Resizing write implementation for [`Cursor`]
///
/// Cursor is allowed to have a pre-allocated and initialised
/// vector body, but with a position of 0. This means the [`Write`]
/// will overwrite the contents of the vec.
///
/// This also allows for the vec body to be empty, but with a position of N.
/// This means that [`Write`] will pad the vec with 0 initially,
/// before writing anything from that point
fn vec_write<A>(pos_mut: &mut u64, vec: &mut Vec<u8, A>, buf: &[u8]) -> io::Result<usize>
where
A: Allocator,
{
let buf_len = buf.len();
let mut pos = reserve_and_pad(pos_mut, vec, buf_len)?;
// Write the buf then progress the vec forward if necessary
// Safety: we have ensured that the capacity is available
// and that all bytes get written up to pos
unsafe {
pos = vec_write_unchecked(pos, vec, buf);
if pos > vec.len() {
vec.set_len(pos);
}
};
// Bump us forward
*pos_mut += buf_len as u64;
Ok(buf_len)
}
/// Resizing write_vectored implementation for [`Cursor`]
///
/// Cursor is allowed to have a pre-allocated and initialised
/// vector body, but with a position of 0. This means the [`Write`]
/// will overwrite the contents of the vec.
///
/// This also allows for the vec body to be empty, but with a position of N.
/// This means that [`Write`] will pad the vec with 0 initially,
/// before writing anything from that point
fn vec_write_vectored<A>(
pos_mut: &mut u64,
vec: &mut Vec<u8, A>,
bufs: &[IoSlice<'_>],
) -> io::Result<usize>
where
A: Allocator,
{
// For safety reasons, we don't want this sum to overflow ever.
// If this saturates, the reserve should panic to avoid any unsound writing.
let buf_len = bufs.iter().fold(0usize, |a, b| a.saturating_add(b.len()));
let mut pos = reserve_and_pad(pos_mut, vec, buf_len)?;
// Write the buf then progress the vec forward if necessary
// Safety: we have ensured that the capacity is available
// and that all bytes get written up to the last pos
unsafe {
for buf in bufs {
pos = vec_write_unchecked(pos, vec, buf);
}
if pos > vec.len() {
vec.set_len(pos);
}
}
// Bump us forward
*pos_mut += buf_len as u64;
Ok(buf_len)
}
impl Write for Cursor<&mut [u8]> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<A> Write for Cursor<&mut Vec<u8, A>>
where
A: Allocator,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
vec_write(&mut self.pos, self.inner, buf)
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
vec_write_vectored(&mut self.pos, self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<A> Write for Cursor<Vec<u8, A>>
where
A: Allocator,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
vec_write(&mut self.pos, &mut self.inner, buf)
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
vec_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<A> Write for Cursor<Box<[u8], A>>
where
A: Allocator,
{
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, &mut self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<const N: usize> Write for Cursor<[u8; N]> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, &mut self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
} | where | random_line_split |
cursor.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
use crate::io::prelude::*;
use crate::alloc::Allocator;
use crate::cmp;
use crate::io::{self, BorrowedCursor, ErrorKind, IoSlice, IoSliceMut, SeekFrom};
/// A `Cursor` wraps an in-memory buffer and provides it with a
/// [`Seek`] implementation.
///
/// `Cursor`s are used with in-memory buffers, anything implementing
/// <code>[AsRef]<\[u8]></code>, to allow them to implement [`Read`] and/or [`Write`],
/// allowing these buffers to be used anywhere you might use a reader or writer
/// that does actual I/O.
///
/// The standard library implements some I/O traits on various types which
/// are commonly used as a buffer, like <code>Cursor<[Vec]\<u8>></code> and
/// <code>Cursor<[&\[u8\]][bytes]></code>.
///
/// # Examples
///
/// We may want to write bytes to a [`File`] in our production
/// code, but use an in-memory buffer in our tests. We can do this with
/// `Cursor`:
///
/// [bytes]: crate::slice "slice"
/// [`File`]: crate::fs::File
///
/// ```no_run
/// use std::io::prelude::*;
/// use std::io::{self, SeekFrom};
/// use std::fs::File;
///
/// // a library function we've written
/// fn write_ten_bytes_at_end<W: Write + Seek>(writer: &mut W) -> io::Result<()> {
/// writer.seek(SeekFrom::End(-10))?;
///
/// for i in 0..10 {
/// writer.write(&[i])?;
/// }
///
/// // all went well
/// Ok(())
/// }
///
/// # fn foo() -> io::Result<()> {
/// // Here's some code that uses this library function.
/// //
/// // We might want to use a BufReader here for efficiency, but let's
/// // keep this example focused.
/// let mut file = File::create("foo.txt")?;
///
/// write_ten_bytes_at_end(&mut file)?;
/// # Ok(())
/// # }
///
/// // now let's write a test
/// #[test]
/// fn test_writes_bytes() {
/// // setting up a real File is much slower than an in-memory buffer,
/// // let's use a cursor instead
/// use std::io::Cursor;
/// let mut buff = Cursor::new(vec![0; 15]);
///
/// write_ten_bytes_at_end(&mut buff).unwrap();
///
/// assert_eq!(&buff.get_ref()[5..15], &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
/// }
/// ```
#[derive(Debug, Default, Eq, PartialEq)]
pub struct Cursor<T> {
inner: T,
pos: u64,
}
impl<T> Cursor<T> {
/// Creates a new cursor wrapping the provided underlying in-memory buffer.
///
/// Cursor initial position is `0` even if underlying buffer (e.g., [`Vec`])
/// is not empty. So writing to cursor starts with overwriting [`Vec`]
/// content, not with appending to it.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
/// ```
pub const fn new(inner: T) -> Cursor<T> {
Cursor { pos: 0, inner }
}
/// Consumes this cursor, returning the underlying value.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let vec = buff.into_inner();
/// ```
pub fn into_inner(self) -> T {
self.inner
}
/// Gets a reference to the underlying value in this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let reference = buff.get_ref();
/// ```
pub const fn get_ref(&self) -> &T {
&self.inner
}
/// Gets a mutable reference to the underlying value in this cursor.
///
/// Care should be taken to avoid modifying the internal I/O state of the
/// underlying value as it may corrupt this cursor's position.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let reference = buff.get_mut();
/// ```
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
/// Returns the current position of this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
/// use std::io::prelude::*;
/// use std::io::SeekFrom;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.position(), 0);
///
/// buff.seek(SeekFrom::Current(2)).unwrap();
/// assert_eq!(buff.position(), 2);
///
/// buff.seek(SeekFrom::Current(-1)).unwrap();
/// assert_eq!(buff.position(), 1);
/// ```
pub const fn position(&self) -> u64 {
self.pos
}
/// Sets the position of this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.position(), 0);
///
/// buff.set_position(2);
/// assert_eq!(buff.position(), 2);
///
/// buff.set_position(4);
/// assert_eq!(buff.position(), 4);
/// ```
pub fn set_position(&mut self, pos: u64) {
self.pos = pos;
}
}
impl<T> Cursor<T>
where
T: AsRef<[u8]>,
{
/// Returns the remaining slice.
///
/// # Examples
///
/// ```
/// #![feature(cursor_remaining)]
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.remaining_slice(), &[1, 2, 3, 4, 5]);
///
/// buff.set_position(2);
/// assert_eq!(buff.remaining_slice(), &[3, 4, 5]);
///
/// buff.set_position(4);
/// assert_eq!(buff.remaining_slice(), &[5]);
///
/// buff.set_position(6);
/// assert_eq!(buff.remaining_slice(), &[]);
/// ```
pub fn remaining_slice(&self) -> &[u8] {
let len = self.pos.min(self.inner.as_ref().len() as u64);
&self.inner.as_ref()[(len as usize)..]
}
/// Returns `true` if the remaining slice is empty.
///
/// # Examples
///
/// ```
/// #![feature(cursor_remaining)]
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// buff.set_position(2);
/// assert!(!buff.is_empty());
///
/// buff.set_position(5);
/// assert!(buff.is_empty());
///
/// buff.set_position(10);
/// assert!(buff.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.pos >= self.inner.as_ref().len() as u64
}
}
impl<T> Clone for Cursor<T>
where
T: Clone,
{
#[inline]
fn clone(&self) -> Self {
Cursor { inner: self.inner.clone(), pos: self.pos }
}
#[inline]
fn clone_from(&mut self, other: &Self) {
self.inner.clone_from(&other.inner);
self.pos = other.pos;
}
}
impl<T> io::Seek for Cursor<T>
where
T: AsRef<[u8]>,
{
fn seek(&mut self, style: SeekFrom) -> io::Result<u64> {
let (base_pos, offset) = match style {
SeekFrom::Start(n) => {
self.pos = n;
return Ok(n);
}
SeekFrom::End(n) => (self.inner.as_ref().len() as u64, n),
SeekFrom::Current(n) => (self.pos, n),
};
match base_pos.checked_add_signed(offset) {
Some(n) => {
self.pos = n;
Ok(self.pos)
}
None => Err(io::const_io_error!(
ErrorKind::InvalidInput,
"invalid seek to a negative or overflowing position",
)),
}
}
fn stream_len(&mut self) -> io::Result<u64> {
Ok(self.inner.as_ref().len() as u64)
}
fn stream_position(&mut self) -> io::Result<u64> {
Ok(self.pos)
}
}
impl<T> Read for Cursor<T>
where
T: AsRef<[u8]>,
{
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let n = Read::read(&mut self.remaining_slice(), buf)?;
self.pos += n as u64;
Ok(n)
}
fn read_buf(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
let prev_written = cursor.written();
Read::read_buf(&mut self.fill_buf()?, cursor.reborrow())?;
self.pos += (cursor.written() - prev_written) as u64;
Ok(())
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
let mut nread = 0;
for buf in bufs {
let n = self.read(buf)?;
nread += n;
if n < buf.len() {
break;
}
}
Ok(nread)
}
fn is_read_vectored(&self) -> bool {
true
}
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
let n = buf.len();
Read::read_exact(&mut self.remaining_slice(), buf)?;
self.pos += n as u64;
Ok(())
}
}
impl<T> BufRead for Cursor<T>
where
T: AsRef<[u8]>,
{
fn fill_buf(&mut self) -> io::Result<&[u8]> {
Ok(self.remaining_slice())
}
fn consume(&mut self, amt: usize) {
self.pos += amt as u64;
}
}
// Non-resizing write implementation
#[inline]
fn slice_write(pos_mut: &mut u64, slice: &mut [u8], buf: &[u8]) -> io::Result<usize> {
let pos = cmp::min(*pos_mut, slice.len() as u64);
let amt = (&mut slice[(pos as usize)..]).write(buf)?;
*pos_mut += amt as u64;
Ok(amt)
}
#[inline]
fn slice_write_vectored(
pos_mut: &mut u64,
slice: &mut [u8],
bufs: &[IoSlice<'_>],
) -> io::Result<usize> {
let mut nwritten = 0;
for buf in bufs {
let n = slice_write(pos_mut, slice, buf)?;
nwritten += n;
if n < buf.len() {
break;
}
}
Ok(nwritten)
}
/// Reserves the required space, and pads the vec with 0s if necessary.
fn reserve_and_pad<A: Allocator>(
pos_mut: &mut u64,
vec: &mut Vec<u8, A>,
buf_len: usize,
) -> io::Result<usize> {
let pos: usize = (*pos_mut).try_into().map_err(|_| {
io::const_io_error!(
ErrorKind::InvalidInput,
"cursor position exceeds maximum possible vector length",
)
})?;
// For safety reasons, we don't want these numbers to overflow
// otherwise our allocation won't be enough
let desired_cap = pos.saturating_add(buf_len);
if desired_cap > vec.capacity() {
// We want our vec's total capacity
// to have room for (pos+buf_len) bytes. Reserve allocates
// based on additional elements from the length, so we need to
// reserve the difference
vec.reserve(desired_cap - vec.len());
}
// Pad if pos is above the current len.
if pos > vec.len() {
let diff = pos - vec.len();
// Unfortunately, `resize()` would suffice but the optimiser does not
// realise the `reserve` it does can be eliminated. So we do it manually
// to eliminate that extra branch
let spare = vec.spare_capacity_mut();
debug_assert!(spare.len() >= diff);
// Safety: we have allocated enough capacity for this.
// And we are only writing, not reading
unsafe {
spare.get_unchecked_mut(..diff).fill(core::mem::MaybeUninit::new(0));
vec.set_len(pos);
}
}
Ok(pos)
}
/// Writes the slice to the vec without allocating
/// # Safety: vec must have buf.len() spare capacity
unsafe fn vec_write_unchecked<A>(pos: usize, vec: &mut Vec<u8, A>, buf: &[u8]) -> usize
where
A: Allocator,
{
debug_assert!(vec.capacity() >= pos + buf.len());
vec.as_mut_ptr().add(pos).copy_from(buf.as_ptr(), buf.len());
pos + buf.len()
}
/// Resizing write implementation for [`Cursor`]
///
/// Cursor is allowed to have a pre-allocated and initialised
/// vector body, but with a position of 0. This means the [`Write`]
/// will overwrite the contents of the vec.
///
/// This also allows for the vec body to be empty, but with a position of N.
/// This means that [`Write`] will pad the vec with 0 initially,
/// before writing anything from that point
fn vec_write<A>(pos_mut: &mut u64, vec: &mut Vec<u8, A>, buf: &[u8]) -> io::Result<usize>
where
A: Allocator,
{
let buf_len = buf.len();
let mut pos = reserve_and_pad(pos_mut, vec, buf_len)?;
// Write the buf then progress the vec forward if necessary
// Safety: we have ensured that the capacity is available
// and that all bytes get written up to pos
unsafe {
pos = vec_write_unchecked(pos, vec, buf);
if pos > vec.len() {
vec.set_len(pos);
}
};
// Bump us forward
*pos_mut += buf_len as u64;
Ok(buf_len)
}
/// Resizing write_vectored implementation for [`Cursor`]
///
/// Cursor is allowed to have a pre-allocated and initialised
/// vector body, but with a position of 0. This means the [`Write`]
/// will overwrite the contents of the vec.
///
/// This also allows for the vec body to be empty, but with a position of N.
/// This means that [`Write`] will pad the vec with 0 initially,
/// before writing anything from that point
fn vec_write_vectored<A>(
pos_mut: &mut u64,
vec: &mut Vec<u8, A>,
bufs: &[IoSlice<'_>],
) -> io::Result<usize>
where
A: Allocator,
{
// For safety reasons, we don't want this sum to overflow ever.
// If this saturates, the reserve should panic to avoid any unsound writing.
let buf_len = bufs.iter().fold(0usize, |a, b| a.saturating_add(b.len()));
let mut pos = reserve_and_pad(pos_mut, vec, buf_len)?;
// Write the buf then progress the vec forward if necessary
// Safety: we have ensured that the capacity is available
// and that all bytes get written up to the last pos
unsafe {
for buf in bufs {
pos = vec_write_unchecked(pos, vec, buf);
}
if pos > vec.len() {
vec.set_len(pos);
}
}
// Bump us forward
*pos_mut += buf_len as u64;
Ok(buf_len)
}
impl Write for Cursor<&mut [u8]> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<A> Write for Cursor<&mut Vec<u8, A>>
where
A: Allocator,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
vec_write(&mut self.pos, self.inner, buf)
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
vec_write_vectored(&mut self.pos, self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn | (&mut self) -> io::Result<()> {
Ok(())
}
}
impl<A> Write for Cursor<Vec<u8, A>>
where
A: Allocator,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
vec_write(&mut self.pos, &mut self.inner, buf)
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
vec_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<A> Write for Cursor<Box<[u8], A>>
where
A: Allocator,
{
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, &mut self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<const N: usize> Write for Cursor<[u8; N]> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, &mut self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
| flush | identifier_name |
cursor.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
use crate::io::prelude::*;
use crate::alloc::Allocator;
use crate::cmp;
use crate::io::{self, BorrowedCursor, ErrorKind, IoSlice, IoSliceMut, SeekFrom};
/// A `Cursor` wraps an in-memory buffer and provides it with a
/// [`Seek`] implementation.
///
/// `Cursor`s are used with in-memory buffers, anything implementing
/// <code>[AsRef]<\[u8]></code>, to allow them to implement [`Read`] and/or [`Write`],
/// allowing these buffers to be used anywhere you might use a reader or writer
/// that does actual I/O.
///
/// The standard library implements some I/O traits on various types which
/// are commonly used as a buffer, like <code>Cursor<[Vec]\<u8>></code> and
/// <code>Cursor<[&\[u8\]][bytes]></code>.
///
/// # Examples
///
/// We may want to write bytes to a [`File`] in our production
/// code, but use an in-memory buffer in our tests. We can do this with
/// `Cursor`:
///
/// [bytes]: crate::slice "slice"
/// [`File`]: crate::fs::File
///
/// ```no_run
/// use std::io::prelude::*;
/// use std::io::{self, SeekFrom};
/// use std::fs::File;
///
/// // a library function we've written
/// fn write_ten_bytes_at_end<W: Write + Seek>(writer: &mut W) -> io::Result<()> {
/// writer.seek(SeekFrom::End(-10))?;
///
/// for i in 0..10 {
/// writer.write(&[i])?;
/// }
///
/// // all went well
/// Ok(())
/// }
///
/// # fn foo() -> io::Result<()> {
/// // Here's some code that uses this library function.
/// //
/// // We might want to use a BufReader here for efficiency, but let's
/// // keep this example focused.
/// let mut file = File::create("foo.txt")?;
///
/// write_ten_bytes_at_end(&mut file)?;
/// # Ok(())
/// # }
///
/// // now let's write a test
/// #[test]
/// fn test_writes_bytes() {
/// // setting up a real File is much slower than an in-memory buffer,
/// // let's use a cursor instead
/// use std::io::Cursor;
/// let mut buff = Cursor::new(vec![0; 15]);
///
/// write_ten_bytes_at_end(&mut buff).unwrap();
///
/// assert_eq!(&buff.get_ref()[5..15], &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
/// }
/// ```
#[derive(Debug, Default, Eq, PartialEq)]
pub struct Cursor<T> {
inner: T,
pos: u64,
}
impl<T> Cursor<T> {
/// Creates a new cursor wrapping the provided underlying in-memory buffer.
///
/// Cursor initial position is `0` even if underlying buffer (e.g., [`Vec`])
/// is not empty. So writing to cursor starts with overwriting [`Vec`]
/// content, not with appending to it.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
/// ```
pub const fn new(inner: T) -> Cursor<T> {
Cursor { pos: 0, inner }
}
/// Consumes this cursor, returning the underlying value.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let vec = buff.into_inner();
/// ```
pub fn into_inner(self) -> T {
self.inner
}
/// Gets a reference to the underlying value in this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let reference = buff.get_ref();
/// ```
pub const fn get_ref(&self) -> &T {
&self.inner
}
/// Gets a mutable reference to the underlying value in this cursor.
///
/// Care should be taken to avoid modifying the internal I/O state of the
/// underlying value as it may corrupt this cursor's position.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let reference = buff.get_mut();
/// ```
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
/// Returns the current position of this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
/// use std::io::prelude::*;
/// use std::io::SeekFrom;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.position(), 0);
///
/// buff.seek(SeekFrom::Current(2)).unwrap();
/// assert_eq!(buff.position(), 2);
///
/// buff.seek(SeekFrom::Current(-1)).unwrap();
/// assert_eq!(buff.position(), 1);
/// ```
pub const fn position(&self) -> u64 {
self.pos
}
/// Sets the position of this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.position(), 0);
///
/// buff.set_position(2);
/// assert_eq!(buff.position(), 2);
///
/// buff.set_position(4);
/// assert_eq!(buff.position(), 4);
/// ```
pub fn set_position(&mut self, pos: u64) {
self.pos = pos;
}
}
impl<T> Cursor<T>
where
T: AsRef<[u8]>,
{
/// Returns the remaining slice.
///
/// # Examples
///
/// ```
/// #![feature(cursor_remaining)]
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.remaining_slice(), &[1, 2, 3, 4, 5]);
///
/// buff.set_position(2);
/// assert_eq!(buff.remaining_slice(), &[3, 4, 5]);
///
/// buff.set_position(4);
/// assert_eq!(buff.remaining_slice(), &[5]);
///
/// buff.set_position(6);
/// assert_eq!(buff.remaining_slice(), &[]);
/// ```
pub fn remaining_slice(&self) -> &[u8] {
let len = self.pos.min(self.inner.as_ref().len() as u64);
&self.inner.as_ref()[(len as usize)..]
}
/// Returns `true` if the remaining slice is empty.
///
/// # Examples
///
/// ```
/// #![feature(cursor_remaining)]
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// buff.set_position(2);
/// assert!(!buff.is_empty());
///
/// buff.set_position(5);
/// assert!(buff.is_empty());
///
/// buff.set_position(10);
/// assert!(buff.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.pos >= self.inner.as_ref().len() as u64
}
}
impl<T> Clone for Cursor<T>
where
T: Clone,
{
#[inline]
fn clone(&self) -> Self {
Cursor { inner: self.inner.clone(), pos: self.pos }
}
#[inline]
fn clone_from(&mut self, other: &Self) {
self.inner.clone_from(&other.inner);
self.pos = other.pos;
}
}
impl<T> io::Seek for Cursor<T>
where
T: AsRef<[u8]>,
{
fn seek(&mut self, style: SeekFrom) -> io::Result<u64> {
let (base_pos, offset) = match style {
SeekFrom::Start(n) => {
self.pos = n;
return Ok(n);
}
SeekFrom::End(n) => (self.inner.as_ref().len() as u64, n),
SeekFrom::Current(n) => (self.pos, n),
};
match base_pos.checked_add_signed(offset) {
Some(n) => {
self.pos = n;
Ok(self.pos)
}
None => Err(io::const_io_error!(
ErrorKind::InvalidInput,
"invalid seek to a negative or overflowing position",
)),
}
}
fn stream_len(&mut self) -> io::Result<u64> {
Ok(self.inner.as_ref().len() as u64)
}
fn stream_position(&mut self) -> io::Result<u64> {
Ok(self.pos)
}
}
impl<T> Read for Cursor<T>
where
T: AsRef<[u8]>,
{
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let n = Read::read(&mut self.remaining_slice(), buf)?;
self.pos += n as u64;
Ok(n)
}
fn read_buf(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
let prev_written = cursor.written();
Read::read_buf(&mut self.fill_buf()?, cursor.reborrow())?;
self.pos += (cursor.written() - prev_written) as u64;
Ok(())
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
let mut nread = 0;
for buf in bufs {
let n = self.read(buf)?;
nread += n;
if n < buf.len() {
break;
}
}
Ok(nread)
}
fn is_read_vectored(&self) -> bool {
true
}
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
let n = buf.len();
Read::read_exact(&mut self.remaining_slice(), buf)?;
self.pos += n as u64;
Ok(())
}
}
impl<T> BufRead for Cursor<T>
where
T: AsRef<[u8]>,
{
fn fill_buf(&mut self) -> io::Result<&[u8]> {
Ok(self.remaining_slice())
}
fn consume(&mut self, amt: usize) {
self.pos += amt as u64;
}
}
// Non-resizing write implementation
#[inline]
fn slice_write(pos_mut: &mut u64, slice: &mut [u8], buf: &[u8]) -> io::Result<usize> {
let pos = cmp::min(*pos_mut, slice.len() as u64);
let amt = (&mut slice[(pos as usize)..]).write(buf)?;
*pos_mut += amt as u64;
Ok(amt)
}
#[inline]
fn slice_write_vectored(
pos_mut: &mut u64,
slice: &mut [u8],
bufs: &[IoSlice<'_>],
) -> io::Result<usize> {
let mut nwritten = 0;
for buf in bufs {
let n = slice_write(pos_mut, slice, buf)?;
nwritten += n;
if n < buf.len() {
break;
}
}
Ok(nwritten)
}
/// Reserves the required space, and pads the vec with 0s if necessary.
fn reserve_and_pad<A: Allocator>(
pos_mut: &mut u64,
vec: &mut Vec<u8, A>,
buf_len: usize,
) -> io::Result<usize> {
let pos: usize = (*pos_mut).try_into().map_err(|_| {
io::const_io_error!(
ErrorKind::InvalidInput,
"cursor position exceeds maximum possible vector length",
)
})?;
// For safety reasons, we don't want these numbers to overflow
// otherwise our allocation won't be enough
let desired_cap = pos.saturating_add(buf_len);
if desired_cap > vec.capacity() |
// Pad if pos is above the current len.
if pos > vec.len() {
let diff = pos - vec.len();
// Unfortunately, `resize()` would suffice but the optimiser does not
// realise the `reserve` it does can be eliminated. So we do it manually
// to eliminate that extra branch
let spare = vec.spare_capacity_mut();
debug_assert!(spare.len() >= diff);
// Safety: we have allocated enough capacity for this.
// And we are only writing, not reading
unsafe {
spare.get_unchecked_mut(..diff).fill(core::mem::MaybeUninit::new(0));
vec.set_len(pos);
}
}
Ok(pos)
}
/// Writes the slice to the vec without allocating
/// # Safety: vec must have buf.len() spare capacity
unsafe fn vec_write_unchecked<A>(pos: usize, vec: &mut Vec<u8, A>, buf: &[u8]) -> usize
where
A: Allocator,
{
debug_assert!(vec.capacity() >= pos + buf.len());
vec.as_mut_ptr().add(pos).copy_from(buf.as_ptr(), buf.len());
pos + buf.len()
}
/// Resizing write implementation for [`Cursor`]
///
/// Cursor is allowed to have a pre-allocated and initialised
/// vector body, but with a position of 0. This means the [`Write`]
/// will overwrite the contents of the vec.
///
/// This also allows for the vec body to be empty, but with a position of N.
/// This means that [`Write`] will pad the vec with 0 initially,
/// before writing anything from that point
fn vec_write<A>(pos_mut: &mut u64, vec: &mut Vec<u8, A>, buf: &[u8]) -> io::Result<usize>
where
A: Allocator,
{
let buf_len = buf.len();
let mut pos = reserve_and_pad(pos_mut, vec, buf_len)?;
// Write the buf then progress the vec forward if necessary
// Safety: we have ensured that the capacity is available
// and that all bytes get written up to pos
unsafe {
pos = vec_write_unchecked(pos, vec, buf);
if pos > vec.len() {
vec.set_len(pos);
}
};
// Bump us forward
*pos_mut += buf_len as u64;
Ok(buf_len)
}
/// Resizing write_vectored implementation for [`Cursor`]
///
/// Cursor is allowed to have a pre-allocated and initialised
/// vector body, but with a position of 0. This means the [`Write`]
/// will overwrite the contents of the vec.
///
/// This also allows for the vec body to be empty, but with a position of N.
/// This means that [`Write`] will pad the vec with 0 initially,
/// before writing anything from that point
fn vec_write_vectored<A>(
pos_mut: &mut u64,
vec: &mut Vec<u8, A>,
bufs: &[IoSlice<'_>],
) -> io::Result<usize>
where
A: Allocator,
{
// For safety reasons, we don't want this sum to overflow ever.
// If this saturates, the reserve should panic to avoid any unsound writing.
let buf_len = bufs.iter().fold(0usize, |a, b| a.saturating_add(b.len()));
let mut pos = reserve_and_pad(pos_mut, vec, buf_len)?;
// Write the buf then progress the vec forward if necessary
// Safety: we have ensured that the capacity is available
// and that all bytes get written up to the last pos
unsafe {
for buf in bufs {
pos = vec_write_unchecked(pos, vec, buf);
}
if pos > vec.len() {
vec.set_len(pos);
}
}
// Bump us forward
*pos_mut += buf_len as u64;
Ok(buf_len)
}
impl Write for Cursor<&mut [u8]> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<A> Write for Cursor<&mut Vec<u8, A>>
where
A: Allocator,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
vec_write(&mut self.pos, self.inner, buf)
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
vec_write_vectored(&mut self.pos, self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<A> Write for Cursor<Vec<u8, A>>
where
A: Allocator,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
vec_write(&mut self.pos, &mut self.inner, buf)
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
vec_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<A> Write for Cursor<Box<[u8], A>>
where
A: Allocator,
{
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, &mut self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<const N: usize> Write for Cursor<[u8; N]> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, &mut self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
| {
// We want our vec's total capacity
// to have room for (pos+buf_len) bytes. Reserve allocates
// based on additional elements from the length, so we need to
// reserve the difference
vec.reserve(desired_cap - vec.len());
} | conditional_block |
cursor.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
use crate::io::prelude::*;
use crate::alloc::Allocator;
use crate::cmp;
use crate::io::{self, BorrowedCursor, ErrorKind, IoSlice, IoSliceMut, SeekFrom};
/// A `Cursor` wraps an in-memory buffer and provides it with a
/// [`Seek`] implementation.
///
/// `Cursor`s are used with in-memory buffers, anything implementing
/// <code>[AsRef]<\[u8]></code>, to allow them to implement [`Read`] and/or [`Write`],
/// allowing these buffers to be used anywhere you might use a reader or writer
/// that does actual I/O.
///
/// The standard library implements some I/O traits on various types which
/// are commonly used as a buffer, like <code>Cursor<[Vec]\<u8>></code> and
/// <code>Cursor<[&\[u8\]][bytes]></code>.
///
/// # Examples
///
/// We may want to write bytes to a [`File`] in our production
/// code, but use an in-memory buffer in our tests. We can do this with
/// `Cursor`:
///
/// [bytes]: crate::slice "slice"
/// [`File`]: crate::fs::File
///
/// ```no_run
/// use std::io::prelude::*;
/// use std::io::{self, SeekFrom};
/// use std::fs::File;
///
/// // a library function we've written
/// fn write_ten_bytes_at_end<W: Write + Seek>(writer: &mut W) -> io::Result<()> {
/// writer.seek(SeekFrom::End(-10))?;
///
/// for i in 0..10 {
/// writer.write(&[i])?;
/// }
///
/// // all went well
/// Ok(())
/// }
///
/// # fn foo() -> io::Result<()> {
/// // Here's some code that uses this library function.
/// //
/// // We might want to use a BufReader here for efficiency, but let's
/// // keep this example focused.
/// let mut file = File::create("foo.txt")?;
///
/// write_ten_bytes_at_end(&mut file)?;
/// # Ok(())
/// # }
///
/// // now let's write a test
/// #[test]
/// fn test_writes_bytes() {
/// // setting up a real File is much slower than an in-memory buffer,
/// // let's use a cursor instead
/// use std::io::Cursor;
/// let mut buff = Cursor::new(vec![0; 15]);
///
/// write_ten_bytes_at_end(&mut buff).unwrap();
///
/// assert_eq!(&buff.get_ref()[5..15], &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
/// }
/// ```
#[derive(Debug, Default, Eq, PartialEq)]
pub struct Cursor<T> {
inner: T,
pos: u64,
}
impl<T> Cursor<T> {
/// Creates a new cursor wrapping the provided underlying in-memory buffer.
///
/// Cursor initial position is `0` even if underlying buffer (e.g., [`Vec`])
/// is not empty. So writing to cursor starts with overwriting [`Vec`]
/// content, not with appending to it.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
/// ```
pub const fn new(inner: T) -> Cursor<T> {
Cursor { pos: 0, inner }
}
/// Consumes this cursor, returning the underlying value.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let vec = buff.into_inner();
/// ```
pub fn into_inner(self) -> T {
self.inner
}
/// Gets a reference to the underlying value in this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let reference = buff.get_ref();
/// ```
pub const fn get_ref(&self) -> &T {
&self.inner
}
/// Gets a mutable reference to the underlying value in this cursor.
///
/// Care should be taken to avoid modifying the internal I/O state of the
/// underlying value as it may corrupt this cursor's position.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(Vec::new());
/// # fn force_inference(_: &Cursor<Vec<u8>>) {}
/// # force_inference(&buff);
///
/// let reference = buff.get_mut();
/// ```
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
/// Returns the current position of this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
/// use std::io::prelude::*;
/// use std::io::SeekFrom;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.position(), 0);
///
/// buff.seek(SeekFrom::Current(2)).unwrap();
/// assert_eq!(buff.position(), 2);
///
/// buff.seek(SeekFrom::Current(-1)).unwrap();
/// assert_eq!(buff.position(), 1);
/// ```
pub const fn position(&self) -> u64 {
self.pos
}
/// Sets the position of this cursor.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.position(), 0);
///
/// buff.set_position(2);
/// assert_eq!(buff.position(), 2);
///
/// buff.set_position(4);
/// assert_eq!(buff.position(), 4);
/// ```
pub fn set_position(&mut self, pos: u64) {
self.pos = pos;
}
}
impl<T> Cursor<T>
where
T: AsRef<[u8]>,
{
/// Returns the remaining slice.
///
/// # Examples
///
/// ```
/// #![feature(cursor_remaining)]
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// assert_eq!(buff.remaining_slice(), &[1, 2, 3, 4, 5]);
///
/// buff.set_position(2);
/// assert_eq!(buff.remaining_slice(), &[3, 4, 5]);
///
/// buff.set_position(4);
/// assert_eq!(buff.remaining_slice(), &[5]);
///
/// buff.set_position(6);
/// assert_eq!(buff.remaining_slice(), &[]);
/// ```
pub fn remaining_slice(&self) -> &[u8] {
let len = self.pos.min(self.inner.as_ref().len() as u64);
&self.inner.as_ref()[(len as usize)..]
}
/// Returns `true` if the remaining slice is empty.
///
/// # Examples
///
/// ```
/// #![feature(cursor_remaining)]
/// use std::io::Cursor;
///
/// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]);
///
/// buff.set_position(2);
/// assert!(!buff.is_empty());
///
/// buff.set_position(5);
/// assert!(buff.is_empty());
///
/// buff.set_position(10);
/// assert!(buff.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.pos >= self.inner.as_ref().len() as u64
}
}
impl<T> Clone for Cursor<T>
where
T: Clone,
{
#[inline]
fn clone(&self) -> Self {
Cursor { inner: self.inner.clone(), pos: self.pos }
}
#[inline]
fn clone_from(&mut self, other: &Self) {
self.inner.clone_from(&other.inner);
self.pos = other.pos;
}
}
impl<T> io::Seek for Cursor<T>
where
T: AsRef<[u8]>,
{
fn seek(&mut self, style: SeekFrom) -> io::Result<u64> {
let (base_pos, offset) = match style {
SeekFrom::Start(n) => {
self.pos = n;
return Ok(n);
}
SeekFrom::End(n) => (self.inner.as_ref().len() as u64, n),
SeekFrom::Current(n) => (self.pos, n),
};
match base_pos.checked_add_signed(offset) {
Some(n) => {
self.pos = n;
Ok(self.pos)
}
None => Err(io::const_io_error!(
ErrorKind::InvalidInput,
"invalid seek to a negative or overflowing position",
)),
}
}
fn stream_len(&mut self) -> io::Result<u64> {
Ok(self.inner.as_ref().len() as u64)
}
fn stream_position(&mut self) -> io::Result<u64> {
Ok(self.pos)
}
}
impl<T> Read for Cursor<T>
where
T: AsRef<[u8]>,
{
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let n = Read::read(&mut self.remaining_slice(), buf)?;
self.pos += n as u64;
Ok(n)
}
fn read_buf(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
let prev_written = cursor.written();
Read::read_buf(&mut self.fill_buf()?, cursor.reborrow())?;
self.pos += (cursor.written() - prev_written) as u64;
Ok(())
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
let mut nread = 0;
for buf in bufs {
let n = self.read(buf)?;
nread += n;
if n < buf.len() {
break;
}
}
Ok(nread)
}
fn is_read_vectored(&self) -> bool {
true
}
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
let n = buf.len();
Read::read_exact(&mut self.remaining_slice(), buf)?;
self.pos += n as u64;
Ok(())
}
}
impl<T> BufRead for Cursor<T>
where
T: AsRef<[u8]>,
{
fn fill_buf(&mut self) -> io::Result<&[u8]> {
Ok(self.remaining_slice())
}
fn consume(&mut self, amt: usize) {
self.pos += amt as u64;
}
}
// Non-resizing write implementation
#[inline]
fn slice_write(pos_mut: &mut u64, slice: &mut [u8], buf: &[u8]) -> io::Result<usize> {
let pos = cmp::min(*pos_mut, slice.len() as u64);
let amt = (&mut slice[(pos as usize)..]).write(buf)?;
*pos_mut += amt as u64;
Ok(amt)
}
#[inline]
fn slice_write_vectored(
pos_mut: &mut u64,
slice: &mut [u8],
bufs: &[IoSlice<'_>],
) -> io::Result<usize> {
let mut nwritten = 0;
for buf in bufs {
let n = slice_write(pos_mut, slice, buf)?;
nwritten += n;
if n < buf.len() {
break;
}
}
Ok(nwritten)
}
/// Reserves the required space, and pads the vec with 0s if necessary.
fn reserve_and_pad<A: Allocator>(
pos_mut: &mut u64,
vec: &mut Vec<u8, A>,
buf_len: usize,
) -> io::Result<usize> {
let pos: usize = (*pos_mut).try_into().map_err(|_| {
io::const_io_error!(
ErrorKind::InvalidInput,
"cursor position exceeds maximum possible vector length",
)
})?;
// For safety reasons, we don't want these numbers to overflow
// otherwise our allocation won't be enough
let desired_cap = pos.saturating_add(buf_len);
if desired_cap > vec.capacity() {
// We want our vec's total capacity
// to have room for (pos+buf_len) bytes. Reserve allocates
// based on additional elements from the length, so we need to
// reserve the difference
vec.reserve(desired_cap - vec.len());
}
// Pad if pos is above the current len.
if pos > vec.len() {
let diff = pos - vec.len();
// Unfortunately, `resize()` would suffice but the optimiser does not
// realise the `reserve` it does can be eliminated. So we do it manually
// to eliminate that extra branch
let spare = vec.spare_capacity_mut();
debug_assert!(spare.len() >= diff);
// Safety: we have allocated enough capacity for this.
// And we are only writing, not reading
unsafe {
spare.get_unchecked_mut(..diff).fill(core::mem::MaybeUninit::new(0));
vec.set_len(pos);
}
}
Ok(pos)
}
/// Writes the slice to the vec without allocating
/// # Safety: vec must have buf.len() spare capacity
unsafe fn vec_write_unchecked<A>(pos: usize, vec: &mut Vec<u8, A>, buf: &[u8]) -> usize
where
A: Allocator,
{
debug_assert!(vec.capacity() >= pos + buf.len());
vec.as_mut_ptr().add(pos).copy_from(buf.as_ptr(), buf.len());
pos + buf.len()
}
/// Resizing write implementation for [`Cursor`]
///
/// Cursor is allowed to have a pre-allocated and initialised
/// vector body, but with a position of 0. This means the [`Write`]
/// will overwrite the contents of the vec.
///
/// This also allows for the vec body to be empty, but with a position of N.
/// This means that [`Write`] will pad the vec with 0 initially,
/// before writing anything from that point
fn vec_write<A>(pos_mut: &mut u64, vec: &mut Vec<u8, A>, buf: &[u8]) -> io::Result<usize>
where
A: Allocator,
{
let buf_len = buf.len();
let mut pos = reserve_and_pad(pos_mut, vec, buf_len)?;
// Write the buf then progress the vec forward if necessary
// Safety: we have ensured that the capacity is available
// and that all bytes get written up to pos
unsafe {
pos = vec_write_unchecked(pos, vec, buf);
if pos > vec.len() {
vec.set_len(pos);
}
};
// Bump us forward
*pos_mut += buf_len as u64;
Ok(buf_len)
}
/// Resizing write_vectored implementation for [`Cursor`]
///
/// Cursor is allowed to have a pre-allocated and initialised
/// vector body, but with a position of 0. This means the [`Write`]
/// will overwrite the contents of the vec.
///
/// This also allows for the vec body to be empty, but with a position of N.
/// This means that [`Write`] will pad the vec with 0 initially,
/// before writing anything from that point
fn vec_write_vectored<A>(
pos_mut: &mut u64,
vec: &mut Vec<u8, A>,
bufs: &[IoSlice<'_>],
) -> io::Result<usize>
where
A: Allocator,
{
// For safety reasons, we don't want this sum to overflow ever.
// If this saturates, the reserve should panic to avoid any unsound writing.
let buf_len = bufs.iter().fold(0usize, |a, b| a.saturating_add(b.len()));
let mut pos = reserve_and_pad(pos_mut, vec, buf_len)?;
// Write the buf then progress the vec forward if necessary
// Safety: we have ensured that the capacity is available
// and that all bytes get written up to the last pos
unsafe {
for buf in bufs {
pos = vec_write_unchecked(pos, vec, buf);
}
if pos > vec.len() {
vec.set_len(pos);
}
}
// Bump us forward
*pos_mut += buf_len as u64;
Ok(buf_len)
}
impl Write for Cursor<&mut [u8]> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<A> Write for Cursor<&mut Vec<u8, A>>
where
A: Allocator,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
vec_write(&mut self.pos, self.inner, buf)
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
vec_write_vectored(&mut self.pos, self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> |
}
impl<A> Write for Cursor<Vec<u8, A>>
where
A: Allocator,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
vec_write(&mut self.pos, &mut self.inner, buf)
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
vec_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<A> Write for Cursor<Box<[u8], A>>
where
A: Allocator,
{
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, &mut self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<const N: usize> Write for Cursor<[u8; N]> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, &mut self.inner, buf)
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
slice_write_vectored(&mut self.pos, &mut self.inner, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
true
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
| {
Ok(())
} | identifier_body |
lib.rs | #![deny(
absolute_paths_not_starting_with_crate,
ambiguous_associated_items,
ambiguous_glob_reexports,
anonymous_parameters,
arithmetic_overflow,
array_into_iter,
asm_sub_register,
bad_asm_style,
bindings_with_variant_name,
break_with_label_and_loop,
byte_slice_in_packed_struct_with_derive,
cenum_impl_drop_cast,
clashing_extern_declarations,
coherence_leak_check,
conflicting_repr_hints,
confusable_idents,
const_evaluatable_unchecked,
const_item_mutation,
dead_code,
deprecated,
deprecated_cfg_attr_crate_type_name,
deprecated_in_future,
deprecated_where_clause_location,
deref_into_dyn_supertrait,
deref_nullptr,
drop_bounds,
dropping_copy_types,
dropping_references,
duplicate_macro_attributes,
dyn_drop,
ellipsis_inclusive_range_patterns,
enum_intrinsics_non_enums,
explicit_outlives_requirements,
exported_private_dependencies,
ffi_unwind_calls,
for_loops_over_fallibles,
forbidden_lint_groups,
forgetting_copy_types,
forgetting_references,
function_item_references,
ill_formed_attribute_input,
illegal_floating_point_literal_pattern,
implied_bounds_entailment,
improper_ctypes,
improper_ctypes_definitions,
incomplete_features,
incomplete_include,
indirect_structural_match,
ineffective_unstable_trait_impl,
inline_no_sanitize,
invalid_alignment,
invalid_atomic_ordering,
invalid_doc_attributes,
invalid_macro_export_arguments,
invalid_type_param_default,
invalid_value,
irrefutable_let_patterns,
keyword_idents,
large_assignments,
late_bound_lifetime_arguments,
legacy_derive_helpers,
let_underscore_drop,
let_underscore_lock,
macro_expanded_macro_exports_accessed_by_absolute_paths,
map_unit_fn,
meta_variable_misuse,
missing_abi,
missing_copy_implementations,
missing_docs,
missing_fragment_specifier,
mixed_script_confusables,
mutable_transmutes,
named_arguments_used_positionally,
named_asm_labels,
no_mangle_const_items,
no_mangle_generic_items,
non_ascii_idents,
non_camel_case_types,
non_fmt_panics,
non_shorthand_field_patterns,
non_snake_case,
non_upper_case_globals,
nontrivial_structural_match,
noop_method_call,
opaque_hidden_inferred_bound,
order_dependent_trait_objects,
overflowing_literals,
overlapping_range_endpoints,
path_statements,
patterns_in_fns_without_body,
pointer_structural_match,
private_in_public,
proc_macro_back_compat,
proc_macro_derive_resolution_fallback,
pub_use_of_private_extern_crate,
redundant_semicolons,
repr_transparent_external_private_fields,
rust_2021_incompatible_closure_captures,
rust_2021_incompatible_or_patterns,
rust_2021_prefixes_incompatible_syntax,
rust_2021_prelude_collisions,
semicolon_in_expressions_from_macros,
soft_unstable,
special_module_name,
stable_features,
suspicious_auto_trait_impls,
suspicious_double_ref_op,
temporary_cstring_as_ptr,
text_direction_codepoint_in_comment,
text_direction_codepoint_in_literal,
trivial_bounds,
trivial_casts,
trivial_numeric_casts,
type_alias_bounds,
tyvar_behind_raw_pointer,
uncommon_codepoints,
unconditional_panic,
unconditional_recursion,
undefined_naked_function_abi,
unexpected_cfgs,
ungated_async_fn_track_caller,
uninhabited_static,
unknown_crate_types,
unnameable_test_items,
unreachable_code,
unreachable_patterns,
unreachable_pub,
unsafe_code,
unsafe_op_in_unsafe_fn,
unstable_features,
unstable_name_collisions,
unstable_syntax_pre_expansion,
unsupported_calling_conventions,
unused_allocation,
unused_assignments,
unused_assignments,
unused_attributes,
unused_braces,
unused_comparisons,
unused_crate_dependencies,
unused_doc_comments,
unused_extern_crates,
unused_features,
unused_import_braces,
unused_imports,
unused_labels,
unused_lifetimes,
unused_macro_rules,
unused_macros,
unused_must_use,
unused_mut,
unused_parens,
unused_qualifications,
unused_tuple_struct_fields,
unused_unsafe,
unused_variables,
useless_deprecated,
where_clauses_object_safety,
while_true
)]
#![warn(macro_use_extern_crate, unknown_lints)]
#![allow(
bare_trait_objects,
box_pointers,
elided_lifetimes_in_paths,
missing_debug_implementations,
single_use_lifetimes,
unused_results,
variant_size_differences,
warnings,
renamed_and_removed_lints
)]
//! # simple_redis
//!
//! Simple and resilient [redis](https://redis.io/) client based on [redis-rs](https://crates.io/crates/redis) with
//! internal connection and subscription handling.
//!
//! This library provides a very basic, simple API for the most common redis operations.<br>
//! While not as comprehensive or flexiable as [redis-rs](https://crates.io/crates/redis),
//! it does provide a simpler api for most common use cases and operations as well as automatic and resilient internal
//! connection and subscription (pubsub) handling.<br>
//! In addition, the entire API is accessible via redis client and there is no need to manage connection or pubsub
//! instances in parallel.<br>
//!
//! ## Connection Resiliency
//!
//! Connection resiliency is managed by verifying the internally managed connection before every operation against the
//! redis server.<br>
//! In case of any connection issue, a new connection will be allocated to ensure the operation is invoked on a valid
//! connection only.<br>
//! However, this comes at a small performance cost of PING operation to the redis server.<br>
//!
//! ## Subscription Resiliency
//!
//! Subscription resiliency is ensured by recreating the internal pubsub and issuing new subscription requests
//! automatically in case of any error while fetching a message from the subscribed channels.
//!
//! # Examples
//!
//! ## Initialization and Simple Operations
//!
//! ```
//! fn main() {
//! match simple_redis::create("redis://127.0.0.1:6379/") {
//! Ok(mut client) => {
//! println!("Created Redis Client");
//!
//! match client.set("my_key", "my_value") {
//! Err(error) => println!("Unable to set value in Redis: {}", error),
//! _ => println!("Value set in Redis")
//! };
//!
//! match client.get_string("my_key") {
//! Ok(value) => println!("Read value from Redis: {}", value),
//! Err(error) => println!("Unable to get value from Redis: {}", error)
//! };
//!
//! match client.set("my_numeric_key", 255.5) {
//! Err(error) => println!("Unable to set value in Redis: {}", error),
//! _ => println!("Value set in Redis")
//! };
//!
//! match client.get::<f32>("my_numeric_key") {
//! Ok(value) => println!("Read value from Redis: {}", value),
//! Err(error) => println!("Unable to get value from Redis: {}", error)
//! };
//!
//! match client.hgetall("my_map") {
//! Ok(map) => {
//! match map.get("my_field") {
//! Some(value) => println!("Got field value from map: {}", value),
//! None => println!("Map field is emtpy"),
//! }
//! },
//! Err(error) => println!("Unable to read map from Redis: {}", error),
//! };
//!
//! /// run some command that is not built in the library
//! match client.run_command::<String>("ECHO", vec!["testing"]) {
//! Ok(value) => assert_eq!(value, "testing"),
//! _ => panic!("test error"),
//! };
//!
//! /// publish messages
//! let result = client.publish("news_channel", "test message");
//! assert!(result.is_ok());
//! },
//! Err(error) => println!("Unable to create Redis client: {}", error)
//! }
//! }
//! ```
//!
//! ## Subscription Flow
//!
//! ```rust,no_run
//! use simple_redis::{Interrupts, Message};
//!
//! fn main() {
//! match simple_redis::create("redis://127.0.0.1:6379/") {
//! Ok(mut client) => {
//! println!("Created Redis Client");
//!
//! let mut result = client.subscribe("important_notifications");
//! assert!(result.is_ok());
//! result = client.psubscribe("*_notifications");
//! assert!(result.is_ok());
//!
//! // fetch messages from all subscriptions
//! client.fetch_messages(
//! &mut |message: Message| -> bool {
//! let payload : String = message.get_payload().unwrap();
//! println!("Got message: {}", payload);
//!
//! // continue fetching
//! false
//! },
//! &mut || -> Interrupts { Interrupts::new() },
//! ).unwrap();
//! },
//! Err(error) => println!("Unable to create Redis client: {}", error)
//! }
//! }
//! ```
//!
//! ## Closing Connection
//!
//! ```rust
//! fn main() {
//! match simple_redis::create("redis://127.0.0.1:6379/") {
//! Ok(mut client) => {
//! println!("Created Redis Client");
//!
//! match client.set("my_key", "my_value") {
//! Err(error) => println!("Unable to set value in Redis: {}", error),
//! _ => println!("Value set in Redis")
//! };
//!
//! match client.quit() {
//! Err(error) => println!("Error: {}", error),
//! _ => println!("Connection Closed.")
//! }
//! },
//! Err(error) => println!("Unable to create Redis client: {}", error)
//! }
//! }
//! ```
//!
//! # Installation
//! In order to use this library, just add it as a dependency:
//!
//! ```ini
//! [dependencies]
//! simple_redis = "*"
//! ```
//!
//! # Contributing
//! See [contributing guide](https://github.com/sagiegurari/simple_redis/blob/master/.github/CONTRIBUTING.md)
//!
//! # License
//! Developed by Sagie Gur-Ari and licensed under the
//! [Apache 2](https://github.com/sagiegurari/simple_redis/blob/master/LICENSE) open source license.
//!
#[cfg(test)]
#[path = "./lib_test.rs"]
mod lib_test;
#[cfg(doctest)]
doc_comment::doctest!("../README.md");
pub mod client;
mod commands;
mod connection;
mod subscriber;
pub mod types;
/// Error Type
pub type RedisError = types::RedisError;
/// PubSub message
pub type Message = types::Message;
/// Blocking operations interrupts
pub type Interrupts = types::Interrupts;
/// Redis result which either holds a value or a Redis error
pub type RedisResult<T> = types::RedisResult<T>;
/// Constructs a new redis client.<br>
/// The redis connection string must be in the following format: `redis://[:<passwd>@]<hostname>[:port][/<db>]`
///
/// # Arguments
///
/// * `connection_string` - The connection string in the format of: `redis://[:<passwd>@]<hostname>[:port][/<db>]`
///
/// # Example
///
/// ```
/// extern crate simple_redis;
///
/// fn main() {
/// match simple_redis::create("redis://127.0.0.1:6379/") {
/// Ok(client) => println!("Created Redis Client"),
/// Err(error) => println!("Unable to create Redis client: {}", error)
/// }
/// }
/// ```
pub fn create(connection_string: &str) -> Result<client::Client, RedisError> | {
client::create(connection_string)
} | identifier_body |
|
lib.rs | #![deny(
absolute_paths_not_starting_with_crate,
ambiguous_associated_items,
ambiguous_glob_reexports,
anonymous_parameters,
arithmetic_overflow,
array_into_iter,
asm_sub_register,
bad_asm_style,
bindings_with_variant_name,
break_with_label_and_loop,
byte_slice_in_packed_struct_with_derive,
cenum_impl_drop_cast,
clashing_extern_declarations,
coherence_leak_check,
conflicting_repr_hints,
confusable_idents,
const_evaluatable_unchecked,
const_item_mutation,
dead_code,
deprecated,
deprecated_cfg_attr_crate_type_name,
deprecated_in_future,
deprecated_where_clause_location,
deref_into_dyn_supertrait,
deref_nullptr,
drop_bounds,
dropping_copy_types,
dropping_references,
duplicate_macro_attributes,
dyn_drop,
ellipsis_inclusive_range_patterns,
enum_intrinsics_non_enums,
explicit_outlives_requirements,
exported_private_dependencies,
ffi_unwind_calls,
for_loops_over_fallibles,
forbidden_lint_groups,
forgetting_copy_types,
forgetting_references,
function_item_references,
ill_formed_attribute_input,
illegal_floating_point_literal_pattern,
implied_bounds_entailment,
improper_ctypes,
improper_ctypes_definitions,
incomplete_features,
incomplete_include,
indirect_structural_match,
ineffective_unstable_trait_impl,
inline_no_sanitize,
invalid_alignment,
invalid_atomic_ordering,
invalid_doc_attributes,
invalid_macro_export_arguments,
invalid_type_param_default,
invalid_value,
irrefutable_let_patterns,
keyword_idents,
large_assignments,
late_bound_lifetime_arguments,
legacy_derive_helpers,
let_underscore_drop,
let_underscore_lock,
macro_expanded_macro_exports_accessed_by_absolute_paths,
map_unit_fn,
meta_variable_misuse,
missing_abi,
missing_copy_implementations,
missing_docs,
missing_fragment_specifier,
mixed_script_confusables,
mutable_transmutes,
named_arguments_used_positionally,
named_asm_labels,
no_mangle_const_items,
no_mangle_generic_items,
non_ascii_idents,
non_camel_case_types,
non_fmt_panics,
non_shorthand_field_patterns,
non_snake_case,
non_upper_case_globals,
nontrivial_structural_match,
noop_method_call,
opaque_hidden_inferred_bound,
order_dependent_trait_objects,
overflowing_literals,
overlapping_range_endpoints,
path_statements,
patterns_in_fns_without_body,
pointer_structural_match,
private_in_public,
proc_macro_back_compat,
proc_macro_derive_resolution_fallback,
pub_use_of_private_extern_crate,
redundant_semicolons,
repr_transparent_external_private_fields,
rust_2021_incompatible_closure_captures,
rust_2021_incompatible_or_patterns,
rust_2021_prefixes_incompatible_syntax,
rust_2021_prelude_collisions,
semicolon_in_expressions_from_macros,
soft_unstable,
special_module_name,
stable_features,
suspicious_auto_trait_impls,
suspicious_double_ref_op,
temporary_cstring_as_ptr,
text_direction_codepoint_in_comment,
text_direction_codepoint_in_literal,
trivial_bounds,
trivial_casts,
trivial_numeric_casts,
type_alias_bounds,
tyvar_behind_raw_pointer,
uncommon_codepoints,
unconditional_panic,
unconditional_recursion,
undefined_naked_function_abi,
unexpected_cfgs,
ungated_async_fn_track_caller,
uninhabited_static,
unknown_crate_types,
unnameable_test_items,
unreachable_code,
unreachable_patterns,
unreachable_pub,
unsafe_code,
unsafe_op_in_unsafe_fn,
unstable_features,
unstable_name_collisions,
unstable_syntax_pre_expansion,
unsupported_calling_conventions,
unused_allocation,
unused_assignments,
unused_assignments,
unused_attributes,
unused_braces,
unused_comparisons,
unused_crate_dependencies,
unused_doc_comments,
unused_extern_crates,
unused_features,
unused_import_braces,
unused_imports,
unused_labels,
unused_lifetimes,
unused_macro_rules,
unused_macros,
unused_must_use,
unused_mut,
unused_parens,
unused_qualifications,
unused_tuple_struct_fields,
unused_unsafe,
unused_variables,
useless_deprecated,
where_clauses_object_safety,
while_true
)]
#![warn(macro_use_extern_crate, unknown_lints)]
#![allow(
bare_trait_objects,
box_pointers,
elided_lifetimes_in_paths,
missing_debug_implementations,
single_use_lifetimes,
unused_results,
variant_size_differences,
warnings,
renamed_and_removed_lints
)]
//! # simple_redis
//!
//! Simple and resilient [redis](https://redis.io/) client based on [redis-rs](https://crates.io/crates/redis) with
//! internal connection and subscription handling.
//!
//! This library provides a very basic, simple API for the most common redis operations.<br>
//! While not as comprehensive or flexiable as [redis-rs](https://crates.io/crates/redis),
//! it does provide a simpler api for most common use cases and operations as well as automatic and resilient internal
//! connection and subscription (pubsub) handling.<br>
//! In addition, the entire API is accessible via redis client and there is no need to manage connection or pubsub
//! instances in parallel.<br>
//!
//! ## Connection Resiliency
//!
//! Connection resiliency is managed by verifying the internally managed connection before every operation against the
//! redis server.<br>
//! In case of any connection issue, a new connection will be allocated to ensure the operation is invoked on a valid
//! connection only.<br>
//! However, this comes at a small performance cost of PING operation to the redis server.<br>
//!
//! ## Subscription Resiliency
//!
//! Subscription resiliency is ensured by recreating the internal pubsub and issuing new subscription requests
//! automatically in case of any error while fetching a message from the subscribed channels.
//!
//! # Examples
//!
//! ## Initialization and Simple Operations
//!
//! ```
//! fn main() {
//! match simple_redis::create("redis://127.0.0.1:6379/") {
//! Ok(mut client) => {
//! println!("Created Redis Client");
//!
//! match client.set("my_key", "my_value") {
//! Err(error) => println!("Unable to set value in Redis: {}", error),
//! _ => println!("Value set in Redis")
//! };
//!
//! match client.get_string("my_key") {
//! Ok(value) => println!("Read value from Redis: {}", value),
//! Err(error) => println!("Unable to get value from Redis: {}", error)
//! };
//!
//! match client.set("my_numeric_key", 255.5) {
//! Err(error) => println!("Unable to set value in Redis: {}", error),
//! _ => println!("Value set in Redis")
//! };
//!
//! match client.get::<f32>("my_numeric_key") {
//! Ok(value) => println!("Read value from Redis: {}", value),
//! Err(error) => println!("Unable to get value from Redis: {}", error)
//! };
//!
//! match client.hgetall("my_map") {
//! Ok(map) => {
//! match map.get("my_field") {
//! Some(value) => println!("Got field value from map: {}", value),
//! None => println!("Map field is emtpy"),
//! }
//! },
//! Err(error) => println!("Unable to read map from Redis: {}", error),
//! };
//!
//! /// run some command that is not built in the library
//! match client.run_command::<String>("ECHO", vec!["testing"]) {
//! Ok(value) => assert_eq!(value, "testing"),
//! _ => panic!("test error"),
//! };
//!
//! /// publish messages
//! let result = client.publish("news_channel", "test message");
//! assert!(result.is_ok());
//! },
//! Err(error) => println!("Unable to create Redis client: {}", error)
//! }
//! }
//! ```
//!
//! ## Subscription Flow
//!
//! ```rust,no_run
//! use simple_redis::{Interrupts, Message};
//!
//! fn main() {
//! match simple_redis::create("redis://127.0.0.1:6379/") {
//! Ok(mut client) => {
//! println!("Created Redis Client");
//!
//! let mut result = client.subscribe("important_notifications");
//! assert!(result.is_ok());
//! result = client.psubscribe("*_notifications");
//! assert!(result.is_ok());
//!
//! // fetch messages from all subscriptions
//! client.fetch_messages(
//! &mut |message: Message| -> bool {
//! let payload : String = message.get_payload().unwrap();
//! println!("Got message: {}", payload);
//!
//! // continue fetching
//! false
//! },
//! &mut || -> Interrupts { Interrupts::new() },
//! ).unwrap();
//! },
//! Err(error) => println!("Unable to create Redis client: {}", error)
//! }
//! }
//! ```
//!
//! ## Closing Connection
//!
//! ```rust
//! fn main() {
//! match simple_redis::create("redis://127.0.0.1:6379/") {
//! Ok(mut client) => {
//! println!("Created Redis Client");
//!
//! match client.set("my_key", "my_value") {
//! Err(error) => println!("Unable to set value in Redis: {}", error),
//! _ => println!("Value set in Redis")
//! };
//!
//! match client.quit() {
//! Err(error) => println!("Error: {}", error),
//! _ => println!("Connection Closed.")
//! }
//! },
//! Err(error) => println!("Unable to create Redis client: {}", error)
//! }
//! }
//! ```
//!
//! # Installation
//! In order to use this library, just add it as a dependency:
//!
//! ```ini
//! [dependencies]
//! simple_redis = "*"
//! ```
//!
//! # Contributing
//! See [contributing guide](https://github.com/sagiegurari/simple_redis/blob/master/.github/CONTRIBUTING.md)
//!
//! # License
//! Developed by Sagie Gur-Ari and licensed under the
//! [Apache 2](https://github.com/sagiegurari/simple_redis/blob/master/LICENSE) open source license.
//!
#[cfg(test)]
#[path = "./lib_test.rs"]
mod lib_test;
#[cfg(doctest)]
doc_comment::doctest!("../README.md");
pub mod client;
mod commands;
mod connection;
mod subscriber;
pub mod types;
/// Error Type
pub type RedisError = types::RedisError;
/// PubSub message
pub type Message = types::Message;
/// Blocking operations interrupts
pub type Interrupts = types::Interrupts;
/// Redis result which either holds a value or a Redis error
pub type RedisResult<T> = types::RedisResult<T>;
/// Constructs a new redis client.<br>
/// The redis connection string must be in the following format: `redis://[:<passwd>@]<hostname>[:port][/<db>]`
///
/// # Arguments
///
/// * `connection_string` - The connection string in the format of: `redis://[:<passwd>@]<hostname>[:port][/<db>]`
///
/// # Example
///
/// ```
/// extern crate simple_redis;
///
/// fn main() {
/// match simple_redis::create("redis://127.0.0.1:6379/") {
/// Ok(client) => println!("Created Redis Client"),
/// Err(error) => println!("Unable to create Redis client: {}", error)
/// }
/// }
/// ```
pub fn | (connection_string: &str) -> Result<client::Client, RedisError> {
client::create(connection_string)
}
| create | identifier_name |
lib.rs | #![deny(
absolute_paths_not_starting_with_crate,
ambiguous_associated_items,
ambiguous_glob_reexports,
anonymous_parameters,
arithmetic_overflow,
array_into_iter,
asm_sub_register,
bad_asm_style,
bindings_with_variant_name,
break_with_label_and_loop,
byte_slice_in_packed_struct_with_derive,
cenum_impl_drop_cast,
clashing_extern_declarations,
coherence_leak_check,
conflicting_repr_hints,
confusable_idents,
const_evaluatable_unchecked,
const_item_mutation,
dead_code,
deprecated,
deprecated_cfg_attr_crate_type_name,
deprecated_in_future,
deprecated_where_clause_location,
deref_into_dyn_supertrait,
deref_nullptr,
drop_bounds,
dropping_copy_types,
dropping_references,
duplicate_macro_attributes,
dyn_drop,
ellipsis_inclusive_range_patterns,
enum_intrinsics_non_enums,
explicit_outlives_requirements,
exported_private_dependencies,
ffi_unwind_calls,
for_loops_over_fallibles,
forbidden_lint_groups,
forgetting_copy_types,
forgetting_references,
function_item_references,
ill_formed_attribute_input,
illegal_floating_point_literal_pattern,
implied_bounds_entailment,
improper_ctypes,
improper_ctypes_definitions,
incomplete_features,
incomplete_include,
indirect_structural_match,
ineffective_unstable_trait_impl,
inline_no_sanitize,
invalid_alignment,
invalid_atomic_ordering,
invalid_doc_attributes,
invalid_macro_export_arguments,
invalid_type_param_default,
invalid_value,
irrefutable_let_patterns,
keyword_idents,
large_assignments,
late_bound_lifetime_arguments,
legacy_derive_helpers,
let_underscore_drop,
let_underscore_lock,
macro_expanded_macro_exports_accessed_by_absolute_paths,
map_unit_fn,
meta_variable_misuse,
missing_abi,
missing_copy_implementations,
missing_docs,
missing_fragment_specifier,
mixed_script_confusables,
mutable_transmutes,
named_arguments_used_positionally,
named_asm_labels,
no_mangle_const_items,
no_mangle_generic_items,
non_ascii_idents,
non_camel_case_types,
non_fmt_panics,
non_shorthand_field_patterns,
non_snake_case,
non_upper_case_globals,
nontrivial_structural_match,
noop_method_call,
opaque_hidden_inferred_bound,
order_dependent_trait_objects,
overflowing_literals,
overlapping_range_endpoints,
path_statements,
patterns_in_fns_without_body,
pointer_structural_match,
private_in_public,
proc_macro_back_compat,
proc_macro_derive_resolution_fallback,
pub_use_of_private_extern_crate,
redundant_semicolons,
repr_transparent_external_private_fields,
rust_2021_incompatible_closure_captures,
rust_2021_incompatible_or_patterns,
rust_2021_prefixes_incompatible_syntax,
rust_2021_prelude_collisions,
semicolon_in_expressions_from_macros,
soft_unstable,
special_module_name,
stable_features,
suspicious_auto_trait_impls,
suspicious_double_ref_op,
temporary_cstring_as_ptr,
text_direction_codepoint_in_comment,
text_direction_codepoint_in_literal,
trivial_bounds,
trivial_casts,
trivial_numeric_casts,
type_alias_bounds,
tyvar_behind_raw_pointer,
uncommon_codepoints,
unconditional_panic,
unconditional_recursion,
undefined_naked_function_abi,
unexpected_cfgs,
ungated_async_fn_track_caller,
uninhabited_static,
unknown_crate_types,
unnameable_test_items,
unreachable_code,
unreachable_patterns,
unreachable_pub,
unsafe_code,
unsafe_op_in_unsafe_fn,
unstable_features,
unstable_name_collisions,
unstable_syntax_pre_expansion,
unsupported_calling_conventions,
unused_allocation,
unused_assignments,
unused_assignments,
unused_attributes,
unused_braces,
unused_comparisons,
unused_crate_dependencies,
unused_doc_comments,
unused_extern_crates,
unused_features,
unused_import_braces,
unused_imports,
unused_labels,
unused_lifetimes,
unused_macro_rules,
unused_macros,
unused_must_use,
unused_mut,
unused_parens,
unused_qualifications,
unused_tuple_struct_fields,
unused_unsafe,
unused_variables,
useless_deprecated,
where_clauses_object_safety,
while_true
)]
#![warn(macro_use_extern_crate, unknown_lints)]
#![allow(
bare_trait_objects,
box_pointers,
elided_lifetimes_in_paths,
missing_debug_implementations,
single_use_lifetimes,
unused_results,
variant_size_differences,
warnings,
renamed_and_removed_lints
)]
//! # simple_redis
//!
//! Simple and resilient [redis](https://redis.io/) client based on [redis-rs](https://crates.io/crates/redis) with
//! internal connection and subscription handling.
//!
//! This library provides a very basic, simple API for the most common redis operations.<br>
//! While not as comprehensive or flexiable as [redis-rs](https://crates.io/crates/redis),
//! it does provide a simpler api for most common use cases and operations as well as automatic and resilient internal
//! connection and subscription (pubsub) handling.<br>
//! In addition, the entire API is accessible via redis client and there is no need to manage connection or pubsub
//! instances in parallel.<br>
//!
//! ## Connection Resiliency
//!
//! Connection resiliency is managed by verifying the internally managed connection before every operation against the
//! redis server.<br>
//! In case of any connection issue, a new connection will be allocated to ensure the operation is invoked on a valid
//! connection only.<br>
//! However, this comes at a small performance cost of PING operation to the redis server.<br>
//!
//! ## Subscription Resiliency
//!
//! Subscription resiliency is ensured by recreating the internal pubsub and issuing new subscription requests
//! automatically in case of any error while fetching a message from the subscribed channels.
//!
//! # Examples
//!
//! ## Initialization and Simple Operations
//!
//! ```
//! fn main() {
//! match simple_redis::create("redis://127.0.0.1:6379/") {
//! Ok(mut client) => {
//! println!("Created Redis Client");
//!
//! match client.set("my_key", "my_value") {
//! Err(error) => println!("Unable to set value in Redis: {}", error),
//! _ => println!("Value set in Redis")
//! };
//!
//! match client.get_string("my_key") {
//! Ok(value) => println!("Read value from Redis: {}", value),
//! Err(error) => println!("Unable to get value from Redis: {}", error)
//! };
//!
//! match client.set("my_numeric_key", 255.5) {
//! Err(error) => println!("Unable to set value in Redis: {}", error),
//! _ => println!("Value set in Redis")
//! };
//!
//! match client.get::<f32>("my_numeric_key") {
//! Ok(value) => println!("Read value from Redis: {}", value),
//! Err(error) => println!("Unable to get value from Redis: {}", error)
//! };
//!
//! match client.hgetall("my_map") {
//! Ok(map) => {
//! match map.get("my_field") {
//! Some(value) => println!("Got field value from map: {}", value),
//! None => println!("Map field is emtpy"),
//! }
//! },
//! Err(error) => println!("Unable to read map from Redis: {}", error),
//! };
//!
//! /// run some command that is not built in the library
//! match client.run_command::<String>("ECHO", vec!["testing"]) {
//! Ok(value) => assert_eq!(value, "testing"),
//! _ => panic!("test error"),
//! };
//!
//! /// publish messages
//! let result = client.publish("news_channel", "test message");
//! assert!(result.is_ok());
//! },
//! Err(error) => println!("Unable to create Redis client: {}", error)
//! }
//! }
//! ```
//!
//! ## Subscription Flow
//!
//! ```rust,no_run
//! use simple_redis::{Interrupts, Message};
//!
//! fn main() {
//! match simple_redis::create("redis://127.0.0.1:6379/") {
//! Ok(mut client) => {
//! println!("Created Redis Client");
//!
//! let mut result = client.subscribe("important_notifications");
//! assert!(result.is_ok());
//! result = client.psubscribe("*_notifications");
//! assert!(result.is_ok());
//!
//! // fetch messages from all subscriptions
//! client.fetch_messages(
//! &mut |message: Message| -> bool {
//! let payload : String = message.get_payload().unwrap();
//! println!("Got message: {}", payload);
//!
//! // continue fetching
//! false
//! },
//! &mut || -> Interrupts { Interrupts::new() },
//! ).unwrap();
//! },
//! Err(error) => println!("Unable to create Redis client: {}", error)
//! }
//! }
//! ```
//!
//! ## Closing Connection
//!
//! ```rust
//! fn main() {
//! match simple_redis::create("redis://127.0.0.1:6379/") {
//! Ok(mut client) => {
//! println!("Created Redis Client");
//!
//! match client.set("my_key", "my_value") {
//! Err(error) => println!("Unable to set value in Redis: {}", error),
//! _ => println!("Value set in Redis")
//! };
//!
//! match client.quit() {
//! Err(error) => println!("Error: {}", error),
//! _ => println!("Connection Closed.")
//! }
//! },
//! Err(error) => println!("Unable to create Redis client: {}", error)
//! }
//! }
//! ```
//!
//! # Installation
//! In order to use this library, just add it as a dependency:
//!
//! ```ini
//! [dependencies]
//! simple_redis = "*"
//! ```
//!
//! # Contributing
//! See [contributing guide](https://github.com/sagiegurari/simple_redis/blob/master/.github/CONTRIBUTING.md)
//!
//! # License
//! Developed by Sagie Gur-Ari and licensed under the
//! [Apache 2](https://github.com/sagiegurari/simple_redis/blob/master/LICENSE) open source license.
//!
#[cfg(test)]
#[path = "./lib_test.rs"]
mod lib_test;
#[cfg(doctest)]
doc_comment::doctest!("../README.md");
pub mod client;
mod commands;
mod connection;
mod subscriber;
pub mod types;
/// Error Type
pub type RedisError = types::RedisError;
/// PubSub message
pub type Message = types::Message;
/// Blocking operations interrupts
pub type Interrupts = types::Interrupts;
/// Redis result which either holds a value or a Redis error
pub type RedisResult<T> = types::RedisResult<T>;
/// Constructs a new redis client.<br>
/// The redis connection string must be in the following format: `redis://[:<passwd>@]<hostname>[:port][/<db>]`
///
/// # Arguments
///
/// * `connection_string` - The connection string in the format of: `redis://[:<passwd>@]<hostname>[:port][/<db>]`
///
/// # Example | /// extern crate simple_redis;
///
/// fn main() {
/// match simple_redis::create("redis://127.0.0.1:6379/") {
/// Ok(client) => println!("Created Redis Client"),
/// Err(error) => println!("Unable to create Redis client: {}", error)
/// }
/// }
/// ```
pub fn create(connection_string: &str) -> Result<client::Client, RedisError> {
client::create(connection_string)
} | ///
/// ``` | random_line_split |
structs.rs | use super::*;
use mio::{
*,
event::Evented,
};
use ::std::{
io,
io::{
Read,
Write,
ErrorKind,
},
time,
};
#[derive(Debug)]
pub struct Middleman {
stream: mio::net::TcpStream,
buf: Vec<u8>,
buf_occupancy: usize,
payload_bytes: Option<u32>,
}
impl Middleman { fn check_payload(&mut self) {
if self.payload_bytes.is_none() && self.buf_occupancy >= 4 {
self.payload_bytes = Some(
bincode::deserialize(&self.buf[..4])
.unwrap()
)
}
}
/// Create a new Middleman structure to wrap the given Mio TcpStream.
/// The Middleman implements `mio::Evented`, but delegates its functions to this given stream
/// As such, registering the Middleman and registering the TcpStream are anaologous.
pub fn new(stream: mio::net::TcpStream) -> Middleman {
Self {
stream: stream,
buf: Vec::with_capacity(128),
buf_occupancy: 0,
payload_bytes: None,
}
}
fn read_in(&mut self) -> Result<usize, io::Error> {
let mut total = 0;
loop {
let limit = (self.buf_occupancy + 64) + (self.buf_occupancy);
if self.buf.len() < limit {
self.buf.resize(limit, 0u8);
}
match self.stream.read(&mut self.buf[self.buf_occupancy..]) {
Ok(0) => return Ok(total),
Ok(bytes) => {
self.buf_occupancy += bytes;
total += bytes;
},
Err(ref e) if e.kind() == ErrorKind::WouldBlock => {
return Ok(total);
},
Err(e) => return Err(e),
};
}
}
/// Write the given message directly into the TcpStream. Returns `Err` variant if there
/// is a problem serializing or writing the message. The call returns Ok(()) once the bytes
/// are entirely written to the stream.
pub fn send<M: Message>(&mut self, m: &M) -> Result<(), SendError> {
self.send_packed(
& PackedMessage::new(m)?
)?;
Ok(())
}
/// See `send`. This variant can be useful to
/// avoid the overhead of repeatedly packing a message for whatever reason, eg: sending
/// the same message using multiple Middleman structs.
///
/// Note that this function does NOT check for internal consistency of the packed message.
/// So, if this message was constructed by a means other than `Packed::new`, then the
/// results may be unpredictable.
pub fn send_packed(&mut self, msg: & PackedMessage) -> Result<(), io::Error> {
self.stream.write_all(&msg.0)
}
/// Conume an iterator over some Message structs, sending them all in the order traversed (see `send`).
/// Returns (a,b) where a gives the total number of messages sent successfully and where b is Ok if
/// nothing goes wrong and an error otherwise. In the event of the first error, no more messages will be sent.
pub fn send_all<'m, I, M>(&'m mut self, msg_iter: I) -> (usize, Result<(), SendError>)
where
M: Message +'m,
I: Iterator<Item = &'m M>,
{
let mut total = 0;
for msg in msg_iter {
match self.send(msg) {
Ok(_) => total += 1,
Err(e) => return (total, Err(e)),
}
}
(total, Ok(()))
}
/// See `send_all` and `send_packed`. This uses the message iterator from the former and
/// the packed messages from the latter.
pub fn send_all_packed<'m, I>(&'m mut self, packed_msg_iter: I) -> (usize, Result<(), io::Error>)
where
I: Iterator<Item = &'m PackedMessage>,
{
let mut total = 0;
for msg in packed_msg_iter {
match self.send_packed(msg) {
Ok(_) => total += 1,
Err(e) => return (total, Err(e)),
}
}
(total, Ok(()))
}
/// Attempt to dedserialize some data in the receiving buffer into a single
/// complete structure with the given type M. If there is insufficient data
/// at the moment, Ok(None) is returned.
///
/// As the type is provided by the reader, it is possible for the sent
/// message to be misinterpreted as a different type. At best, this is detected
/// by a failure in deserialization. If an error occurs, the data is not consumed
/// from the Middleman. Subsequent reads will operate on the same data.
///
/// NOTE: The correctness of this call depends on the sender sending an _internally consistent_
/// `PackedMessage`. If you (or the sender) are manually manipulating the internal state of
/// sent messages this may cause errors for the receiver. If you are sticking to the Middleman API
/// and treating each `PackedMessage` as a black box, everything should be fine.
pub fn recv<M: Message>(&mut self) -> Result<Option<M>, RecvError> {
self.read_in()?;
self.check_payload();
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
let decoded: M = bincode::deserialize(
&self.buf[4..buf_end]
)?;
self.payload_bytes = None;
self.buf.drain(0..buf_end);
self.buf_occupancy -= buf_end;
return Ok(Some(decoded))
}
}
Ok(None)
}
/// See `recv`. Will repeatedly call recv() until the next message is not yet ready.
/// Recevied messages are placed into the buffer `dest_vector`. The return result is (a,b)
/// where a is the total number of message successfully received and where b is OK(()) if all goes well
/// and some Err otherwise. In the event of the first error, the call will return and not receive any further.
pub fn recv_all_into<M: Message>(&mut self, dest_vector: &mut Vec<M>) -> (usize, Result<(), RecvError>) {
let mut total = 0;
loop {
match self.recv::<M>() {
Ok(None) => return (total, Ok(())),
Ok(Some(msg)) => { dest_vector.push(msg); total += 1; },
Err(e) => return (total, Err(e)),
};
}
}
/// Hijack the mio event loop, reading and writing to the socket as polling allows.
/// Events not related to the recv() of this middleman (determined from the provided mio::Token)
/// are pushed into the provided extra_events vector. Returns Ok(Some(_)) if a
/// message was successfully received. May return Ok(None) if the user provides as timeout some
// non-none Duration. Returns Err(_) if something goes wrong with reading from the socket or
/// deserializing the message. See try_recv for more information.
/// WARNING: The user should take care to iterate over these events also, as without them all the
/// Evented objects registered with the provided poll object might experience lost wakeups.
/// It is suggested that in the event of any recv_blocking calls in your loop, you extend the event
/// loop with a drain() on the same vector passed here as extra_events (using the iterator chain function, for example.)
pub fn recv_blocking<M: Message>(&mut self,
poll: &Poll,
events: &mut Events,
my_tok: Token,
extra_events: &mut Vec<Event>,
mut timeout: Option<time::Duration>) -> Result<Option<M>, RecvError> {
if let Some(msg) = self.recv::<M>()? {
// trivial case.
// message was already sitting in the buffer.
return Ok(Some(msg));
}
let started_at = time::Instant::now();
let mut res = None;
loop {
for event in events.iter() {
let tok = event.token();
if res.is_none() && tok == my_tok {
if! event.readiness().is_readable() {
continue;
}
// event is relevant!
self.read_in()?;
match self.recv::<M>() {
Ok(Some(msg)) => {
// got a message!
res = Some(msg);
},
Ok(None) => (),
Err(e) => return Err(e),
}
} else {
extra_events.push(event);
}
}
if let Some(msg) = res { | return Ok(Some(msg));
} else {
poll.poll(events, timeout).expect("poll() failed inside `recv_blocking()`");
if let Some(t) = timeout {
// update remaining timeout
let since = started_at.elapsed();
if since >= t {
// ran out of time
return Ok(None);
}
timeout = Some(t-since);
}
}
}
}
/// See `recv_blocking`. This function is intended as an alternative for use
/// for cases where it is _certain_ that this Middleman is the only registered `mio::Evented`
/// for the provided `Poll` and `Events` objects. Thus, the call _WILL NOT CHECK_ the token at all,
/// presuming that all events are associated with this middleman.
pub fn recv_blocking_solo<M: Message>(&mut self,
poll: &Poll,
events: &mut Events,
mut timeout: Option<time::Duration>) -> Result<Option<M>, RecvError> {
if let Some(msg) = self.recv::<M>()? {
// trivial case.
// message was already sitting in the buffer.
return Ok(Some(msg));
}
let started_at = time::Instant::now();
loop {
for event in events.iter() {
if event.readiness().is_readable(){
// event is relevant!
self.read_in()?;
match self.recv::<M>() {
Ok(Some(msg)) => return Ok(Some(msg)),
Ok(None) => (),
Err(e) => return Err(e),
}
}
}
poll.poll(events, timeout).expect("poll() failed inside `recv_blocking_solo()`");
if let Some(t) = timeout {
// update remaining timeout
let since = started_at.elapsed();
if since >= t {
// ran out of time
return Ok(None);
}
timeout = Some(t-since);
}
}
}
/// Similar to `recv_all_into`, but rather than storing each received message'm',
/// the provided function is called with arguments (self, m) where self is `&mut self`.
/// This allows for ergonomic utility of the received messages using a closure.
pub fn recv_all_map<F,M>(&mut self, mut func: F) -> (usize, Result<(), RecvError>)
where M: Message, F: FnMut(&mut Self, M) + Sized {
let mut total = 0;
loop {
match self.recv::<M>() {
Ok(None) => return (total, Ok(())),
Ok(Some(msg)) => { total += 1; func(self, msg) },
Err(e) => return (total, Err(e)),
};
}
}
/// Combination of `recv_all_map` and `recv_packed`.
pub fn recv_all_packed_map<F>(&mut self, mut func: F) -> (usize, Result<(), RecvError>)
where F: FnMut(&mut Self, PackedMessage) + Sized {
let mut total = 0;
loop {
match self.recv_packed() {
Ok(None) => return (total, Ok(())),
Ok(Some(packed)) => { total += 1; func(self, packed) },
Err(e) => return (total, Err(e)),
};
}
}
/// Similar to `recv`, except builds (instead of some M: Message), a `PackedMessage` object.
/// These packed messages can be deserialized later, sent on the line without knowledge of the
/// message type etc.
pub fn recv_packed(&mut self) -> Result<Option<PackedMessage>, RecvError> {
self.read_in()?;
self.check_payload();
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
let mut vec = self.buf.drain(0..buf_end)
.collect::<Vec<_>>();
self.payload_bytes = None;
self.buf_occupancy -= buf_end;
return Ok(Some(PackedMessage(vec)))
}
}
Ok(None)
}
/// Similar to `recv_packed`, but the potentially-read bytes are not actually removed
/// from the stream. The message will _still be there_.
pub fn peek_packed(&mut self) -> Result<Option<PackedMessage>, RecvError> {
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
return Ok(
Some(
PackedMessage::from_raw(
self.buf[4..buf_end].to_vec()
)
)
)
}
}
Ok(None)
}
}
/// This structure represents the serialized form of some `Message`-implementing structure.
/// Dealing with a PackedMessage may be suitable when:
/// (1) You need to send/store/receive a message but don't need to actually _use_ it yourself.
/// (2) You want to serialize a message once, and send it multiple times.
/// (3) You want to read and discard a message whose type is unknown.
///
/// NOTE: The packed message maps 1:1 with the bytes that travel over the TcpStream. As such,
/// packed messages also contain the 4-byte length preable. The user is discocuraged from
/// manipulating the contents of a packed message. The `recv` statement relies on consistency
/// of packed messages
pub struct PackedMessage(Vec<u8>);
impl PackedMessage {
/// Create a new PakcedMessage from the given `Message`-implementing struct
pub fn new<M: Message>(m: &M) -> Result<Self, PackingError> {
let m_len: usize = bincode::serialized_size(&m)? as usize;
if m_len > ::std::u32::MAX as usize {
return Err(PackingError::TooBigToRepresent);
}
let tot_len = m_len+4;
let mut vec = Vec::with_capacity(tot_len);
vec.resize(tot_len, 0u8);
bincode::serialize_into(&mut vec[0..4], &(m_len as u32))?;
bincode::serialize_into(&mut vec[4..tot_len], m)?;
Ok(PackedMessage(vec))
}
/// Attempt to unpack this Packedmessage given a type hint. This may fail if the
/// PackedMessage isn't internally consistent or the type doesn't match that
/// of the type used for serialization.
pub fn unpack<M: Message>(&self) -> Result<M, Box<bincode::ErrorKind>> {
bincode::deserialize(&self.0[4..])
}
/// Unwrap the byte buffer comprising this PackedMessage
#[inline] pub fn into_raw(self) -> Vec<u8> { self.0 }
/// Accept the given byte buffer as the basis for a PackedMessage
///
/// WARNING: Use this at your own risk! The `recv` functions and their variants rely on
/// the correct contents of messages to work correcty.
///
/// NOTE: The first 4 bytes of a the buffer are used to store the length of the payload.
#[inline] pub fn from_raw(v: Vec<u8>) -> Self { PackedMessage(v) }
/// Return the number of bytes this packed message contains. Maps 1:1 with
/// the bit complexity of the message sent over the network.
#[inline] pub fn byte_len(&self) -> usize { self.0.len() }
/// Acquire an immutable reference to the internal buffer of the packed message.
#[inline] pub fn get_raw(&self) -> &Vec<u8> { &self.0 }
/// Acquire a mutable reference to the internal buffer of the packed message.
///
/// WARNING: Contents of a PackedMessage represent a delicata internal state. Sending an
/// internally inconsistent PackedMessage will compromise the connection. Use at your own risk!
#[inline] pub fn get_mut_raw(&mut self) -> &mut Vec<u8> { &mut self.0 }
}
impl Evented for Middleman {
fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
-> io::Result<()> {
self.stream.register(poll, token, interest, opts)
}
fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
-> io::Result<()> {
self.stream.reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &Poll) -> io::Result<()> {
self.stream.deregister(poll)
}
} | // message ready to go. Exiting loop | random_line_split |
structs.rs | use super::*;
use mio::{
*,
event::Evented,
};
use ::std::{
io,
io::{
Read,
Write,
ErrorKind,
},
time,
};
#[derive(Debug)]
pub struct Middleman {
stream: mio::net::TcpStream,
buf: Vec<u8>,
buf_occupancy: usize,
payload_bytes: Option<u32>,
}
impl Middleman { fn check_payload(&mut self) {
if self.payload_bytes.is_none() && self.buf_occupancy >= 4 {
self.payload_bytes = Some(
bincode::deserialize(&self.buf[..4])
.unwrap()
)
}
}
/// Create a new Middleman structure to wrap the given Mio TcpStream.
/// The Middleman implements `mio::Evented`, but delegates its functions to this given stream
/// As such, registering the Middleman and registering the TcpStream are anaologous.
pub fn new(stream: mio::net::TcpStream) -> Middleman {
Self {
stream: stream,
buf: Vec::with_capacity(128),
buf_occupancy: 0,
payload_bytes: None,
}
}
fn read_in(&mut self) -> Result<usize, io::Error> {
let mut total = 0;
loop {
let limit = (self.buf_occupancy + 64) + (self.buf_occupancy);
if self.buf.len() < limit {
self.buf.resize(limit, 0u8);
}
match self.stream.read(&mut self.buf[self.buf_occupancy..]) {
Ok(0) => return Ok(total),
Ok(bytes) => {
self.buf_occupancy += bytes;
total += bytes;
},
Err(ref e) if e.kind() == ErrorKind::WouldBlock => {
return Ok(total);
},
Err(e) => return Err(e),
};
}
}
/// Write the given message directly into the TcpStream. Returns `Err` variant if there
/// is a problem serializing or writing the message. The call returns Ok(()) once the bytes
/// are entirely written to the stream.
pub fn send<M: Message>(&mut self, m: &M) -> Result<(), SendError> {
self.send_packed(
& PackedMessage::new(m)?
)?;
Ok(())
}
/// See `send`. This variant can be useful to
/// avoid the overhead of repeatedly packing a message for whatever reason, eg: sending
/// the same message using multiple Middleman structs.
///
/// Note that this function does NOT check for internal consistency of the packed message.
/// So, if this message was constructed by a means other than `Packed::new`, then the
/// results may be unpredictable.
pub fn send_packed(&mut self, msg: & PackedMessage) -> Result<(), io::Error> {
self.stream.write_all(&msg.0)
}
/// Conume an iterator over some Message structs, sending them all in the order traversed (see `send`).
/// Returns (a,b) where a gives the total number of messages sent successfully and where b is Ok if
/// nothing goes wrong and an error otherwise. In the event of the first error, no more messages will be sent.
pub fn send_all<'m, I, M>(&'m mut self, msg_iter: I) -> (usize, Result<(), SendError>)
where
M: Message +'m,
I: Iterator<Item = &'m M>,
{
let mut total = 0;
for msg in msg_iter {
match self.send(msg) {
Ok(_) => total += 1,
Err(e) => return (total, Err(e)),
}
}
(total, Ok(()))
}
/// See `send_all` and `send_packed`. This uses the message iterator from the former and
/// the packed messages from the latter.
pub fn send_all_packed<'m, I>(&'m mut self, packed_msg_iter: I) -> (usize, Result<(), io::Error>)
where
I: Iterator<Item = &'m PackedMessage>,
{
let mut total = 0;
for msg in packed_msg_iter {
match self.send_packed(msg) {
Ok(_) => total += 1,
Err(e) => return (total, Err(e)),
}
}
(total, Ok(()))
}
/// Attempt to dedserialize some data in the receiving buffer into a single
/// complete structure with the given type M. If there is insufficient data
/// at the moment, Ok(None) is returned.
///
/// As the type is provided by the reader, it is possible for the sent
/// message to be misinterpreted as a different type. At best, this is detected
/// by a failure in deserialization. If an error occurs, the data is not consumed
/// from the Middleman. Subsequent reads will operate on the same data.
///
/// NOTE: The correctness of this call depends on the sender sending an _internally consistent_
/// `PackedMessage`. If you (or the sender) are manually manipulating the internal state of
/// sent messages this may cause errors for the receiver. If you are sticking to the Middleman API
/// and treating each `PackedMessage` as a black box, everything should be fine.
pub fn recv<M: Message>(&mut self) -> Result<Option<M>, RecvError> {
self.read_in()?;
self.check_payload();
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
let decoded: M = bincode::deserialize(
&self.buf[4..buf_end]
)?;
self.payload_bytes = None;
self.buf.drain(0..buf_end);
self.buf_occupancy -= buf_end;
return Ok(Some(decoded))
}
}
Ok(None)
}
/// See `recv`. Will repeatedly call recv() until the next message is not yet ready.
/// Recevied messages are placed into the buffer `dest_vector`. The return result is (a,b)
/// where a is the total number of message successfully received and where b is OK(()) if all goes well
/// and some Err otherwise. In the event of the first error, the call will return and not receive any further.
pub fn recv_all_into<M: Message>(&mut self, dest_vector: &mut Vec<M>) -> (usize, Result<(), RecvError>) {
let mut total = 0;
loop {
match self.recv::<M>() {
Ok(None) => return (total, Ok(())),
Ok(Some(msg)) => { dest_vector.push(msg); total += 1; },
Err(e) => return (total, Err(e)),
};
}
}
/// Hijack the mio event loop, reading and writing to the socket as polling allows.
/// Events not related to the recv() of this middleman (determined from the provided mio::Token)
/// are pushed into the provided extra_events vector. Returns Ok(Some(_)) if a
/// message was successfully received. May return Ok(None) if the user provides as timeout some
// non-none Duration. Returns Err(_) if something goes wrong with reading from the socket or
/// deserializing the message. See try_recv for more information.
/// WARNING: The user should take care to iterate over these events also, as without them all the
/// Evented objects registered with the provided poll object might experience lost wakeups.
/// It is suggested that in the event of any recv_blocking calls in your loop, you extend the event
/// loop with a drain() on the same vector passed here as extra_events (using the iterator chain function, for example.)
pub fn recv_blocking<M: Message>(&mut self,
poll: &Poll,
events: &mut Events,
my_tok: Token,
extra_events: &mut Vec<Event>,
mut timeout: Option<time::Duration>) -> Result<Option<M>, RecvError> {
if let Some(msg) = self.recv::<M>()? |
let started_at = time::Instant::now();
let mut res = None;
loop {
for event in events.iter() {
let tok = event.token();
if res.is_none() && tok == my_tok {
if! event.readiness().is_readable() {
continue;
}
// event is relevant!
self.read_in()?;
match self.recv::<M>() {
Ok(Some(msg)) => {
// got a message!
res = Some(msg);
},
Ok(None) => (),
Err(e) => return Err(e),
}
} else {
extra_events.push(event);
}
}
if let Some(msg) = res {
// message ready to go. Exiting loop
return Ok(Some(msg));
} else {
poll.poll(events, timeout).expect("poll() failed inside `recv_blocking()`");
if let Some(t) = timeout {
// update remaining timeout
let since = started_at.elapsed();
if since >= t {
// ran out of time
return Ok(None);
}
timeout = Some(t-since);
}
}
}
}
/// See `recv_blocking`. This function is intended as an alternative for use
/// for cases where it is _certain_ that this Middleman is the only registered `mio::Evented`
/// for the provided `Poll` and `Events` objects. Thus, the call _WILL NOT CHECK_ the token at all,
/// presuming that all events are associated with this middleman.
pub fn recv_blocking_solo<M: Message>(&mut self,
poll: &Poll,
events: &mut Events,
mut timeout: Option<time::Duration>) -> Result<Option<M>, RecvError> {
if let Some(msg) = self.recv::<M>()? {
// trivial case.
// message was already sitting in the buffer.
return Ok(Some(msg));
}
let started_at = time::Instant::now();
loop {
for event in events.iter() {
if event.readiness().is_readable(){
// event is relevant!
self.read_in()?;
match self.recv::<M>() {
Ok(Some(msg)) => return Ok(Some(msg)),
Ok(None) => (),
Err(e) => return Err(e),
}
}
}
poll.poll(events, timeout).expect("poll() failed inside `recv_blocking_solo()`");
if let Some(t) = timeout {
// update remaining timeout
let since = started_at.elapsed();
if since >= t {
// ran out of time
return Ok(None);
}
timeout = Some(t-since);
}
}
}
/// Similar to `recv_all_into`, but rather than storing each received message'm',
/// the provided function is called with arguments (self, m) where self is `&mut self`.
/// This allows for ergonomic utility of the received messages using a closure.
pub fn recv_all_map<F,M>(&mut self, mut func: F) -> (usize, Result<(), RecvError>)
where M: Message, F: FnMut(&mut Self, M) + Sized {
let mut total = 0;
loop {
match self.recv::<M>() {
Ok(None) => return (total, Ok(())),
Ok(Some(msg)) => { total += 1; func(self, msg) },
Err(e) => return (total, Err(e)),
};
}
}
/// Combination of `recv_all_map` and `recv_packed`.
pub fn recv_all_packed_map<F>(&mut self, mut func: F) -> (usize, Result<(), RecvError>)
where F: FnMut(&mut Self, PackedMessage) + Sized {
let mut total = 0;
loop {
match self.recv_packed() {
Ok(None) => return (total, Ok(())),
Ok(Some(packed)) => { total += 1; func(self, packed) },
Err(e) => return (total, Err(e)),
};
}
}
/// Similar to `recv`, except builds (instead of some M: Message), a `PackedMessage` object.
/// These packed messages can be deserialized later, sent on the line without knowledge of the
/// message type etc.
pub fn recv_packed(&mut self) -> Result<Option<PackedMessage>, RecvError> {
self.read_in()?;
self.check_payload();
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
let mut vec = self.buf.drain(0..buf_end)
.collect::<Vec<_>>();
self.payload_bytes = None;
self.buf_occupancy -= buf_end;
return Ok(Some(PackedMessage(vec)))
}
}
Ok(None)
}
/// Similar to `recv_packed`, but the potentially-read bytes are not actually removed
/// from the stream. The message will _still be there_.
pub fn peek_packed(&mut self) -> Result<Option<PackedMessage>, RecvError> {
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
return Ok(
Some(
PackedMessage::from_raw(
self.buf[4..buf_end].to_vec()
)
)
)
}
}
Ok(None)
}
}
/// This structure represents the serialized form of some `Message`-implementing structure.
/// Dealing with a PackedMessage may be suitable when:
/// (1) You need to send/store/receive a message but don't need to actually _use_ it yourself.
/// (2) You want to serialize a message once, and send it multiple times.
/// (3) You want to read and discard a message whose type is unknown.
///
/// NOTE: The packed message maps 1:1 with the bytes that travel over the TcpStream. As such,
/// packed messages also contain the 4-byte length preable. The user is discocuraged from
/// manipulating the contents of a packed message. The `recv` statement relies on consistency
/// of packed messages
pub struct PackedMessage(Vec<u8>);
impl PackedMessage {
/// Create a new PakcedMessage from the given `Message`-implementing struct
pub fn new<M: Message>(m: &M) -> Result<Self, PackingError> {
let m_len: usize = bincode::serialized_size(&m)? as usize;
if m_len > ::std::u32::MAX as usize {
return Err(PackingError::TooBigToRepresent);
}
let tot_len = m_len+4;
let mut vec = Vec::with_capacity(tot_len);
vec.resize(tot_len, 0u8);
bincode::serialize_into(&mut vec[0..4], &(m_len as u32))?;
bincode::serialize_into(&mut vec[4..tot_len], m)?;
Ok(PackedMessage(vec))
}
/// Attempt to unpack this Packedmessage given a type hint. This may fail if the
/// PackedMessage isn't internally consistent or the type doesn't match that
/// of the type used for serialization.
pub fn unpack<M: Message>(&self) -> Result<M, Box<bincode::ErrorKind>> {
bincode::deserialize(&self.0[4..])
}
/// Unwrap the byte buffer comprising this PackedMessage
#[inline] pub fn into_raw(self) -> Vec<u8> { self.0 }
/// Accept the given byte buffer as the basis for a PackedMessage
///
/// WARNING: Use this at your own risk! The `recv` functions and their variants rely on
/// the correct contents of messages to work correcty.
///
/// NOTE: The first 4 bytes of a the buffer are used to store the length of the payload.
#[inline] pub fn from_raw(v: Vec<u8>) -> Self { PackedMessage(v) }
/// Return the number of bytes this packed message contains. Maps 1:1 with
/// the bit complexity of the message sent over the network.
#[inline] pub fn byte_len(&self) -> usize { self.0.len() }
/// Acquire an immutable reference to the internal buffer of the packed message.
#[inline] pub fn get_raw(&self) -> &Vec<u8> { &self.0 }
/// Acquire a mutable reference to the internal buffer of the packed message.
///
/// WARNING: Contents of a PackedMessage represent a delicata internal state. Sending an
/// internally inconsistent PackedMessage will compromise the connection. Use at your own risk!
#[inline] pub fn get_mut_raw(&mut self) -> &mut Vec<u8> { &mut self.0 }
}
impl Evented for Middleman {
fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
-> io::Result<()> {
self.stream.register(poll, token, interest, opts)
}
fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
-> io::Result<()> {
self.stream.reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &Poll) -> io::Result<()> {
self.stream.deregister(poll)
}
} | {
// trivial case.
// message was already sitting in the buffer.
return Ok(Some(msg));
} | conditional_block |
structs.rs | use super::*;
use mio::{
*,
event::Evented,
};
use ::std::{
io,
io::{
Read,
Write,
ErrorKind,
},
time,
};
#[derive(Debug)]
pub struct Middleman {
stream: mio::net::TcpStream,
buf: Vec<u8>,
buf_occupancy: usize,
payload_bytes: Option<u32>,
}
impl Middleman { fn check_payload(&mut self) {
if self.payload_bytes.is_none() && self.buf_occupancy >= 4 {
self.payload_bytes = Some(
bincode::deserialize(&self.buf[..4])
.unwrap()
)
}
}
/// Create a new Middleman structure to wrap the given Mio TcpStream.
/// The Middleman implements `mio::Evented`, but delegates its functions to this given stream
/// As such, registering the Middleman and registering the TcpStream are anaologous.
pub fn new(stream: mio::net::TcpStream) -> Middleman {
Self {
stream: stream,
buf: Vec::with_capacity(128),
buf_occupancy: 0,
payload_bytes: None,
}
}
fn read_in(&mut self) -> Result<usize, io::Error> {
let mut total = 0;
loop {
let limit = (self.buf_occupancy + 64) + (self.buf_occupancy);
if self.buf.len() < limit {
self.buf.resize(limit, 0u8);
}
match self.stream.read(&mut self.buf[self.buf_occupancy..]) {
Ok(0) => return Ok(total),
Ok(bytes) => {
self.buf_occupancy += bytes;
total += bytes;
},
Err(ref e) if e.kind() == ErrorKind::WouldBlock => {
return Ok(total);
},
Err(e) => return Err(e),
};
}
}
/// Write the given message directly into the TcpStream. Returns `Err` variant if there
/// is a problem serializing or writing the message. The call returns Ok(()) once the bytes
/// are entirely written to the stream.
pub fn send<M: Message>(&mut self, m: &M) -> Result<(), SendError> {
self.send_packed(
& PackedMessage::new(m)?
)?;
Ok(())
}
/// See `send`. This variant can be useful to
/// avoid the overhead of repeatedly packing a message for whatever reason, eg: sending
/// the same message using multiple Middleman structs.
///
/// Note that this function does NOT check for internal consistency of the packed message.
/// So, if this message was constructed by a means other than `Packed::new`, then the
/// results may be unpredictable.
pub fn send_packed(&mut self, msg: & PackedMessage) -> Result<(), io::Error> {
self.stream.write_all(&msg.0)
}
/// Conume an iterator over some Message structs, sending them all in the order traversed (see `send`).
/// Returns (a,b) where a gives the total number of messages sent successfully and where b is Ok if
/// nothing goes wrong and an error otherwise. In the event of the first error, no more messages will be sent.
pub fn send_all<'m, I, M>(&'m mut self, msg_iter: I) -> (usize, Result<(), SendError>)
where
M: Message +'m,
I: Iterator<Item = &'m M>,
{
let mut total = 0;
for msg in msg_iter {
match self.send(msg) {
Ok(_) => total += 1,
Err(e) => return (total, Err(e)),
}
}
(total, Ok(()))
}
/// See `send_all` and `send_packed`. This uses the message iterator from the former and
/// the packed messages from the latter.
pub fn send_all_packed<'m, I>(&'m mut self, packed_msg_iter: I) -> (usize, Result<(), io::Error>)
where
I: Iterator<Item = &'m PackedMessage>,
{
let mut total = 0;
for msg in packed_msg_iter {
match self.send_packed(msg) {
Ok(_) => total += 1,
Err(e) => return (total, Err(e)),
}
}
(total, Ok(()))
}
/// Attempt to dedserialize some data in the receiving buffer into a single
/// complete structure with the given type M. If there is insufficient data
/// at the moment, Ok(None) is returned.
///
/// As the type is provided by the reader, it is possible for the sent
/// message to be misinterpreted as a different type. At best, this is detected
/// by a failure in deserialization. If an error occurs, the data is not consumed
/// from the Middleman. Subsequent reads will operate on the same data.
///
/// NOTE: The correctness of this call depends on the sender sending an _internally consistent_
/// `PackedMessage`. If you (or the sender) are manually manipulating the internal state of
/// sent messages this may cause errors for the receiver. If you are sticking to the Middleman API
/// and treating each `PackedMessage` as a black box, everything should be fine.
pub fn recv<M: Message>(&mut self) -> Result<Option<M>, RecvError> {
self.read_in()?;
self.check_payload();
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
let decoded: M = bincode::deserialize(
&self.buf[4..buf_end]
)?;
self.payload_bytes = None;
self.buf.drain(0..buf_end);
self.buf_occupancy -= buf_end;
return Ok(Some(decoded))
}
}
Ok(None)
}
/// See `recv`. Will repeatedly call recv() until the next message is not yet ready.
/// Recevied messages are placed into the buffer `dest_vector`. The return result is (a,b)
/// where a is the total number of message successfully received and where b is OK(()) if all goes well
/// and some Err otherwise. In the event of the first error, the call will return and not receive any further.
pub fn recv_all_into<M: Message>(&mut self, dest_vector: &mut Vec<M>) -> (usize, Result<(), RecvError>) {
let mut total = 0;
loop {
match self.recv::<M>() {
Ok(None) => return (total, Ok(())),
Ok(Some(msg)) => { dest_vector.push(msg); total += 1; },
Err(e) => return (total, Err(e)),
};
}
}
/// Hijack the mio event loop, reading and writing to the socket as polling allows.
/// Events not related to the recv() of this middleman (determined from the provided mio::Token)
/// are pushed into the provided extra_events vector. Returns Ok(Some(_)) if a
/// message was successfully received. May return Ok(None) if the user provides as timeout some
// non-none Duration. Returns Err(_) if something goes wrong with reading from the socket or
/// deserializing the message. See try_recv for more information.
/// WARNING: The user should take care to iterate over these events also, as without them all the
/// Evented objects registered with the provided poll object might experience lost wakeups.
/// It is suggested that in the event of any recv_blocking calls in your loop, you extend the event
/// loop with a drain() on the same vector passed here as extra_events (using the iterator chain function, for example.)
pub fn recv_blocking<M: Message>(&mut self,
poll: &Poll,
events: &mut Events,
my_tok: Token,
extra_events: &mut Vec<Event>,
mut timeout: Option<time::Duration>) -> Result<Option<M>, RecvError> {
if let Some(msg) = self.recv::<M>()? {
// trivial case.
// message was already sitting in the buffer.
return Ok(Some(msg));
}
let started_at = time::Instant::now();
let mut res = None;
loop {
for event in events.iter() {
let tok = event.token();
if res.is_none() && tok == my_tok {
if! event.readiness().is_readable() {
continue;
}
// event is relevant!
self.read_in()?;
match self.recv::<M>() {
Ok(Some(msg)) => {
// got a message!
res = Some(msg);
},
Ok(None) => (),
Err(e) => return Err(e),
}
} else {
extra_events.push(event);
}
}
if let Some(msg) = res {
// message ready to go. Exiting loop
return Ok(Some(msg));
} else {
poll.poll(events, timeout).expect("poll() failed inside `recv_blocking()`");
if let Some(t) = timeout {
// update remaining timeout
let since = started_at.elapsed();
if since >= t {
// ran out of time
return Ok(None);
}
timeout = Some(t-since);
}
}
}
}
/// See `recv_blocking`. This function is intended as an alternative for use
/// for cases where it is _certain_ that this Middleman is the only registered `mio::Evented`
/// for the provided `Poll` and `Events` objects. Thus, the call _WILL NOT CHECK_ the token at all,
/// presuming that all events are associated with this middleman.
pub fn recv_blocking_solo<M: Message>(&mut self,
poll: &Poll,
events: &mut Events,
mut timeout: Option<time::Duration>) -> Result<Option<M>, RecvError> {
if let Some(msg) = self.recv::<M>()? {
// trivial case.
// message was already sitting in the buffer.
return Ok(Some(msg));
}
let started_at = time::Instant::now();
loop {
for event in events.iter() {
if event.readiness().is_readable(){
// event is relevant!
self.read_in()?;
match self.recv::<M>() {
Ok(Some(msg)) => return Ok(Some(msg)),
Ok(None) => (),
Err(e) => return Err(e),
}
}
}
poll.poll(events, timeout).expect("poll() failed inside `recv_blocking_solo()`");
if let Some(t) = timeout {
// update remaining timeout
let since = started_at.elapsed();
if since >= t {
// ran out of time
return Ok(None);
}
timeout = Some(t-since);
}
}
}
/// Similar to `recv_all_into`, but rather than storing each received message'm',
/// the provided function is called with arguments (self, m) where self is `&mut self`.
/// This allows for ergonomic utility of the received messages using a closure.
pub fn recv_all_map<F,M>(&mut self, mut func: F) -> (usize, Result<(), RecvError>)
where M: Message, F: FnMut(&mut Self, M) + Sized {
let mut total = 0;
loop {
match self.recv::<M>() {
Ok(None) => return (total, Ok(())),
Ok(Some(msg)) => { total += 1; func(self, msg) },
Err(e) => return (total, Err(e)),
};
}
}
/// Combination of `recv_all_map` and `recv_packed`.
pub fn recv_all_packed_map<F>(&mut self, mut func: F) -> (usize, Result<(), RecvError>)
where F: FnMut(&mut Self, PackedMessage) + Sized {
let mut total = 0;
loop {
match self.recv_packed() {
Ok(None) => return (total, Ok(())),
Ok(Some(packed)) => { total += 1; func(self, packed) },
Err(e) => return (total, Err(e)),
};
}
}
/// Similar to `recv`, except builds (instead of some M: Message), a `PackedMessage` object.
/// These packed messages can be deserialized later, sent on the line without knowledge of the
/// message type etc.
pub fn recv_packed(&mut self) -> Result<Option<PackedMessage>, RecvError> {
self.read_in()?;
self.check_payload();
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
let mut vec = self.buf.drain(0..buf_end)
.collect::<Vec<_>>();
self.payload_bytes = None;
self.buf_occupancy -= buf_end;
return Ok(Some(PackedMessage(vec)))
}
}
Ok(None)
}
/// Similar to `recv_packed`, but the potentially-read bytes are not actually removed
/// from the stream. The message will _still be there_.
pub fn peek_packed(&mut self) -> Result<Option<PackedMessage>, RecvError> {
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
return Ok(
Some(
PackedMessage::from_raw(
self.buf[4..buf_end].to_vec()
)
)
)
}
}
Ok(None)
}
}
/// This structure represents the serialized form of some `Message`-implementing structure.
/// Dealing with a PackedMessage may be suitable when:
/// (1) You need to send/store/receive a message but don't need to actually _use_ it yourself.
/// (2) You want to serialize a message once, and send it multiple times.
/// (3) You want to read and discard a message whose type is unknown.
///
/// NOTE: The packed message maps 1:1 with the bytes that travel over the TcpStream. As such,
/// packed messages also contain the 4-byte length preable. The user is discocuraged from
/// manipulating the contents of a packed message. The `recv` statement relies on consistency
/// of packed messages
pub struct PackedMessage(Vec<u8>);
impl PackedMessage {
/// Create a new PakcedMessage from the given `Message`-implementing struct
pub fn new<M: Message>(m: &M) -> Result<Self, PackingError> {
let m_len: usize = bincode::serialized_size(&m)? as usize;
if m_len > ::std::u32::MAX as usize {
return Err(PackingError::TooBigToRepresent);
}
let tot_len = m_len+4;
let mut vec = Vec::with_capacity(tot_len);
vec.resize(tot_len, 0u8);
bincode::serialize_into(&mut vec[0..4], &(m_len as u32))?;
bincode::serialize_into(&mut vec[4..tot_len], m)?;
Ok(PackedMessage(vec))
}
/// Attempt to unpack this Packedmessage given a type hint. This may fail if the
/// PackedMessage isn't internally consistent or the type doesn't match that
/// of the type used for serialization.
pub fn unpack<M: Message>(&self) -> Result<M, Box<bincode::ErrorKind>> {
bincode::deserialize(&self.0[4..])
}
/// Unwrap the byte buffer comprising this PackedMessage
#[inline] pub fn into_raw(self) -> Vec<u8> { self.0 }
/// Accept the given byte buffer as the basis for a PackedMessage
///
/// WARNING: Use this at your own risk! The `recv` functions and their variants rely on
/// the correct contents of messages to work correcty.
///
/// NOTE: The first 4 bytes of a the buffer are used to store the length of the payload.
#[inline] pub fn from_raw(v: Vec<u8>) -> Self { PackedMessage(v) }
/// Return the number of bytes this packed message contains. Maps 1:1 with
/// the bit complexity of the message sent over the network.
#[inline] pub fn byte_len(&self) -> usize { self.0.len() }
/// Acquire an immutable reference to the internal buffer of the packed message.
#[inline] pub fn get_raw(&self) -> &Vec<u8> { &self.0 }
/// Acquire a mutable reference to the internal buffer of the packed message.
///
/// WARNING: Contents of a PackedMessage represent a delicata internal state. Sending an
/// internally inconsistent PackedMessage will compromise the connection. Use at your own risk!
#[inline] pub fn get_mut_raw(&mut self) -> &mut Vec<u8> { &mut self.0 }
}
impl Evented for Middleman {
fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
-> io::Result<()> {
self.stream.register(poll, token, interest, opts)
}
fn | (&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
-> io::Result<()> {
self.stream.reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &Poll) -> io::Result<()> {
self.stream.deregister(poll)
}
} | reregister | identifier_name |
structs.rs | use super::*;
use mio::{
*,
event::Evented,
};
use ::std::{
io,
io::{
Read,
Write,
ErrorKind,
},
time,
};
#[derive(Debug)]
pub struct Middleman {
stream: mio::net::TcpStream,
buf: Vec<u8>,
buf_occupancy: usize,
payload_bytes: Option<u32>,
}
impl Middleman { fn check_payload(&mut self) {
if self.payload_bytes.is_none() && self.buf_occupancy >= 4 {
self.payload_bytes = Some(
bincode::deserialize(&self.buf[..4])
.unwrap()
)
}
}
/// Create a new Middleman structure to wrap the given Mio TcpStream.
/// The Middleman implements `mio::Evented`, but delegates its functions to this given stream
/// As such, registering the Middleman and registering the TcpStream are anaologous.
pub fn new(stream: mio::net::TcpStream) -> Middleman {
Self {
stream: stream,
buf: Vec::with_capacity(128),
buf_occupancy: 0,
payload_bytes: None,
}
}
fn read_in(&mut self) -> Result<usize, io::Error> {
let mut total = 0;
loop {
let limit = (self.buf_occupancy + 64) + (self.buf_occupancy);
if self.buf.len() < limit {
self.buf.resize(limit, 0u8);
}
match self.stream.read(&mut self.buf[self.buf_occupancy..]) {
Ok(0) => return Ok(total),
Ok(bytes) => {
self.buf_occupancy += bytes;
total += bytes;
},
Err(ref e) if e.kind() == ErrorKind::WouldBlock => {
return Ok(total);
},
Err(e) => return Err(e),
};
}
}
/// Write the given message directly into the TcpStream. Returns `Err` variant if there
/// is a problem serializing or writing the message. The call returns Ok(()) once the bytes
/// are entirely written to the stream.
pub fn send<M: Message>(&mut self, m: &M) -> Result<(), SendError> {
self.send_packed(
& PackedMessage::new(m)?
)?;
Ok(())
}
/// See `send`. This variant can be useful to
/// avoid the overhead of repeatedly packing a message for whatever reason, eg: sending
/// the same message using multiple Middleman structs.
///
/// Note that this function does NOT check for internal consistency of the packed message.
/// So, if this message was constructed by a means other than `Packed::new`, then the
/// results may be unpredictable.
pub fn send_packed(&mut self, msg: & PackedMessage) -> Result<(), io::Error> {
self.stream.write_all(&msg.0)
}
/// Conume an iterator over some Message structs, sending them all in the order traversed (see `send`).
/// Returns (a,b) where a gives the total number of messages sent successfully and where b is Ok if
/// nothing goes wrong and an error otherwise. In the event of the first error, no more messages will be sent.
pub fn send_all<'m, I, M>(&'m mut self, msg_iter: I) -> (usize, Result<(), SendError>)
where
M: Message +'m,
I: Iterator<Item = &'m M>,
{
let mut total = 0;
for msg in msg_iter {
match self.send(msg) {
Ok(_) => total += 1,
Err(e) => return (total, Err(e)),
}
}
(total, Ok(()))
}
/// See `send_all` and `send_packed`. This uses the message iterator from the former and
/// the packed messages from the latter.
pub fn send_all_packed<'m, I>(&'m mut self, packed_msg_iter: I) -> (usize, Result<(), io::Error>)
where
I: Iterator<Item = &'m PackedMessage>,
{
let mut total = 0;
for msg in packed_msg_iter {
match self.send_packed(msg) {
Ok(_) => total += 1,
Err(e) => return (total, Err(e)),
}
}
(total, Ok(()))
}
/// Attempt to dedserialize some data in the receiving buffer into a single
/// complete structure with the given type M. If there is insufficient data
/// at the moment, Ok(None) is returned.
///
/// As the type is provided by the reader, it is possible for the sent
/// message to be misinterpreted as a different type. At best, this is detected
/// by a failure in deserialization. If an error occurs, the data is not consumed
/// from the Middleman. Subsequent reads will operate on the same data.
///
/// NOTE: The correctness of this call depends on the sender sending an _internally consistent_
/// `PackedMessage`. If you (or the sender) are manually manipulating the internal state of
/// sent messages this may cause errors for the receiver. If you are sticking to the Middleman API
/// and treating each `PackedMessage` as a black box, everything should be fine.
pub fn recv<M: Message>(&mut self) -> Result<Option<M>, RecvError> {
self.read_in()?;
self.check_payload();
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
let decoded: M = bincode::deserialize(
&self.buf[4..buf_end]
)?;
self.payload_bytes = None;
self.buf.drain(0..buf_end);
self.buf_occupancy -= buf_end;
return Ok(Some(decoded))
}
}
Ok(None)
}
/// See `recv`. Will repeatedly call recv() until the next message is not yet ready.
/// Recevied messages are placed into the buffer `dest_vector`. The return result is (a,b)
/// where a is the total number of message successfully received and where b is OK(()) if all goes well
/// and some Err otherwise. In the event of the first error, the call will return and not receive any further.
pub fn recv_all_into<M: Message>(&mut self, dest_vector: &mut Vec<M>) -> (usize, Result<(), RecvError>) {
let mut total = 0;
loop {
match self.recv::<M>() {
Ok(None) => return (total, Ok(())),
Ok(Some(msg)) => { dest_vector.push(msg); total += 1; },
Err(e) => return (total, Err(e)),
};
}
}
/// Hijack the mio event loop, reading and writing to the socket as polling allows.
/// Events not related to the recv() of this middleman (determined from the provided mio::Token)
/// are pushed into the provided extra_events vector. Returns Ok(Some(_)) if a
/// message was successfully received. May return Ok(None) if the user provides as timeout some
// non-none Duration. Returns Err(_) if something goes wrong with reading from the socket or
/// deserializing the message. See try_recv for more information.
/// WARNING: The user should take care to iterate over these events also, as without them all the
/// Evented objects registered with the provided poll object might experience lost wakeups.
/// It is suggested that in the event of any recv_blocking calls in your loop, you extend the event
/// loop with a drain() on the same vector passed here as extra_events (using the iterator chain function, for example.)
pub fn recv_blocking<M: Message>(&mut self,
poll: &Poll,
events: &mut Events,
my_tok: Token,
extra_events: &mut Vec<Event>,
mut timeout: Option<time::Duration>) -> Result<Option<M>, RecvError> {
if let Some(msg) = self.recv::<M>()? {
// trivial case.
// message was already sitting in the buffer.
return Ok(Some(msg));
}
let started_at = time::Instant::now();
let mut res = None;
loop {
for event in events.iter() {
let tok = event.token();
if res.is_none() && tok == my_tok {
if! event.readiness().is_readable() {
continue;
}
// event is relevant!
self.read_in()?;
match self.recv::<M>() {
Ok(Some(msg)) => {
// got a message!
res = Some(msg);
},
Ok(None) => (),
Err(e) => return Err(e),
}
} else {
extra_events.push(event);
}
}
if let Some(msg) = res {
// message ready to go. Exiting loop
return Ok(Some(msg));
} else {
poll.poll(events, timeout).expect("poll() failed inside `recv_blocking()`");
if let Some(t) = timeout {
// update remaining timeout
let since = started_at.elapsed();
if since >= t {
// ran out of time
return Ok(None);
}
timeout = Some(t-since);
}
}
}
}
/// See `recv_blocking`. This function is intended as an alternative for use
/// for cases where it is _certain_ that this Middleman is the only registered `mio::Evented`
/// for the provided `Poll` and `Events` objects. Thus, the call _WILL NOT CHECK_ the token at all,
/// presuming that all events are associated with this middleman.
pub fn recv_blocking_solo<M: Message>(&mut self,
poll: &Poll,
events: &mut Events,
mut timeout: Option<time::Duration>) -> Result<Option<M>, RecvError> {
if let Some(msg) = self.recv::<M>()? {
// trivial case.
// message was already sitting in the buffer.
return Ok(Some(msg));
}
let started_at = time::Instant::now();
loop {
for event in events.iter() {
if event.readiness().is_readable(){
// event is relevant!
self.read_in()?;
match self.recv::<M>() {
Ok(Some(msg)) => return Ok(Some(msg)),
Ok(None) => (),
Err(e) => return Err(e),
}
}
}
poll.poll(events, timeout).expect("poll() failed inside `recv_blocking_solo()`");
if let Some(t) = timeout {
// update remaining timeout
let since = started_at.elapsed();
if since >= t {
// ran out of time
return Ok(None);
}
timeout = Some(t-since);
}
}
}
/// Similar to `recv_all_into`, but rather than storing each received message'm',
/// the provided function is called with arguments (self, m) where self is `&mut self`.
/// This allows for ergonomic utility of the received messages using a closure.
pub fn recv_all_map<F,M>(&mut self, mut func: F) -> (usize, Result<(), RecvError>)
where M: Message, F: FnMut(&mut Self, M) + Sized {
let mut total = 0;
loop {
match self.recv::<M>() {
Ok(None) => return (total, Ok(())),
Ok(Some(msg)) => { total += 1; func(self, msg) },
Err(e) => return (total, Err(e)),
};
}
}
/// Combination of `recv_all_map` and `recv_packed`.
pub fn recv_all_packed_map<F>(&mut self, mut func: F) -> (usize, Result<(), RecvError>)
where F: FnMut(&mut Self, PackedMessage) + Sized {
let mut total = 0;
loop {
match self.recv_packed() {
Ok(None) => return (total, Ok(())),
Ok(Some(packed)) => { total += 1; func(self, packed) },
Err(e) => return (total, Err(e)),
};
}
}
/// Similar to `recv`, except builds (instead of some M: Message), a `PackedMessage` object.
/// These packed messages can be deserialized later, sent on the line without knowledge of the
/// message type etc.
pub fn recv_packed(&mut self) -> Result<Option<PackedMessage>, RecvError> {
self.read_in()?;
self.check_payload();
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
let mut vec = self.buf.drain(0..buf_end)
.collect::<Vec<_>>();
self.payload_bytes = None;
self.buf_occupancy -= buf_end;
return Ok(Some(PackedMessage(vec)))
}
}
Ok(None)
}
/// Similar to `recv_packed`, but the potentially-read bytes are not actually removed
/// from the stream. The message will _still be there_.
pub fn peek_packed(&mut self) -> Result<Option<PackedMessage>, RecvError> {
if let Some(pb) = self.payload_bytes {
let buf_end = pb as usize + 4;
if self.buf_occupancy >= buf_end {
return Ok(
Some(
PackedMessage::from_raw(
self.buf[4..buf_end].to_vec()
)
)
)
}
}
Ok(None)
}
}
/// This structure represents the serialized form of some `Message`-implementing structure.
/// Dealing with a PackedMessage may be suitable when:
/// (1) You need to send/store/receive a message but don't need to actually _use_ it yourself.
/// (2) You want to serialize a message once, and send it multiple times.
/// (3) You want to read and discard a message whose type is unknown.
///
/// NOTE: The packed message maps 1:1 with the bytes that travel over the TcpStream. As such,
/// packed messages also contain the 4-byte length preable. The user is discocuraged from
/// manipulating the contents of a packed message. The `recv` statement relies on consistency
/// of packed messages
pub struct PackedMessage(Vec<u8>);
impl PackedMessage {
/// Create a new PakcedMessage from the given `Message`-implementing struct
pub fn new<M: Message>(m: &M) -> Result<Self, PackingError> {
let m_len: usize = bincode::serialized_size(&m)? as usize;
if m_len > ::std::u32::MAX as usize {
return Err(PackingError::TooBigToRepresent);
}
let tot_len = m_len+4;
let mut vec = Vec::with_capacity(tot_len);
vec.resize(tot_len, 0u8);
bincode::serialize_into(&mut vec[0..4], &(m_len as u32))?;
bincode::serialize_into(&mut vec[4..tot_len], m)?;
Ok(PackedMessage(vec))
}
/// Attempt to unpack this Packedmessage given a type hint. This may fail if the
/// PackedMessage isn't internally consistent or the type doesn't match that
/// of the type used for serialization.
pub fn unpack<M: Message>(&self) -> Result<M, Box<bincode::ErrorKind>> {
bincode::deserialize(&self.0[4..])
}
/// Unwrap the byte buffer comprising this PackedMessage
#[inline] pub fn into_raw(self) -> Vec<u8> { self.0 }
/// Accept the given byte buffer as the basis for a PackedMessage
///
/// WARNING: Use this at your own risk! The `recv` functions and their variants rely on
/// the correct contents of messages to work correcty.
///
/// NOTE: The first 4 bytes of a the buffer are used to store the length of the payload.
#[inline] pub fn from_raw(v: Vec<u8>) -> Self { PackedMessage(v) }
/// Return the number of bytes this packed message contains. Maps 1:1 with
/// the bit complexity of the message sent over the network.
#[inline] pub fn byte_len(&self) -> usize { self.0.len() }
/// Acquire an immutable reference to the internal buffer of the packed message.
#[inline] pub fn get_raw(&self) -> &Vec<u8> { &self.0 }
/// Acquire a mutable reference to the internal buffer of the packed message.
///
/// WARNING: Contents of a PackedMessage represent a delicata internal state. Sending an
/// internally inconsistent PackedMessage will compromise the connection. Use at your own risk!
#[inline] pub fn get_mut_raw(&mut self) -> &mut Vec<u8> |
}
impl Evented for Middleman {
fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
-> io::Result<()> {
self.stream.register(poll, token, interest, opts)
}
fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt)
-> io::Result<()> {
self.stream.reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &Poll) -> io::Result<()> {
self.stream.deregister(poll)
}
} | { &mut self.0 } | identifier_body |
main.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Overnet daemon for Fuchsia
#![deny(missing_docs)]
mod mdns;
use failure::{Error, ResultExt};
use fidl_fuchsia_overnet::{
MeshControllerRequest, MeshControllerRequestStream, OvernetListPeersResponder, OvernetRequest,
OvernetRequestStream, ServiceConsumerListPeersResponder, ServiceConsumerRequest,
ServiceConsumerRequestStream, ServicePublisherRequest, ServicePublisherRequestStream,
};
use fuchsia_async as fasync;
use fuchsia_component::server::ServiceFs;
use fuchsia_zircon as zx;
use futures::future::{abortable, AbortHandle};
use futures::prelude::*;
use overnet_core::{LinkId, Node, NodeId, NodeOptions, NodeRuntime, RouterTime, SendHandle};
use std::cell::RefCell;
use std::collections::HashMap;
use std::net::{SocketAddr, SocketAddrV6};
use std::ops::Deref;
use std::rc::Rc;
use zx::AsHandleRef;
/// Identifier for a link as defined by overnetstack.
#[derive(Clone, Copy, Debug)]
enum AppLinkId {
Udp(SocketAddrV6),
}
/// Adapter of fasync::Time to RouterTime for overnet's core library.
#[derive(PartialEq, PartialOrd, Eq, Ord, Clone, Copy, Debug)]
struct Time(fasync::Time);
impl RouterTime for Time {
type Duration = zx::Duration;
fn now() -> Self {
Time(fasync::Time::now())
}
fn after(time: Self, duration: zx::Duration) -> Self {
Self(time.0 + duration)
}
}
struct AppRuntime;
impl NodeRuntime for AppRuntime {
type Time = Time;
type LinkId = AppLinkId;
const IMPLEMENTATION: fidl_fuchsia_overnet_protocol::Implementation =
fidl_fuchsia_overnet_protocol::Implementation::OvernetStack;
fn handle_type(handle: &zx::Handle) -> Result<SendHandle, Error> {
match handle.basic_info()?.object_type {
zx::ObjectType::CHANNEL => Ok(SendHandle::Channel),
_ => failure::bail!("Handle type not proxyable {:?}", handle.basic_info()?.object_type),
}
}
fn spawn_local<F>(&mut self, future: F)
where
F: Future<Output = ()> +'static,
{
fasync::spawn_local(future)
}
fn at(&mut self, t: Self::Time, f: impl FnOnce() +'static) {
fasync::spawn_local(at(t.0, f))
}
fn router_link_id(&self, id: AppLinkId) -> LinkId<overnet_core::PhysLinkId<AppLinkId>> {
with_app_mut(|app| match id {
AppLinkId::Udp(addr) => {
app.udp_link_ids.get(&addr).copied().unwrap_or(LinkId::invalid())
}
})
}
fn send_on_link(&mut self, id: Self::LinkId, packet: &mut [u8]) -> Result<(), Error> {
match id {
AppLinkId::Udp(addr) => {
println!("UDP_SEND to:{} len:{}", addr, packet.len());
let sock = with_app_mut(|app| -> Result<_, Error> {
Ok(app
.udp_socket
.as_ref()
.ok_or_else(|| failure::format_err!("no udp socket"))?
.sock
.clone())
})?;
let sock = sock.deref().as_ref();
if let Err(e) = sock.send_to(packet, addr) {
if e.kind() == std::io::ErrorKind::BrokenPipe {
log::warn!("BrokenPipe on UDP socket: let's make a new one");
with_app_mut(|app| {
app.udp_socket.take();
app.udp_socket = Some(UdpSocketHolder::new(app.node_id)?);
Ok(())
})
} else {
Err(e.into())
}
} else {
Ok(())
}
}
}
}
}
struct UdpSocketHolder {
sock: Rc<fasync::net::UdpSocket>,
abort_publisher: AbortHandle,
}
impl UdpSocketHolder {
fn new(node_id: NodeId) -> Result<Self, Error> {
// Must not call with_app_mut here, as this is called from with_app_mut
let sock = std::net::UdpSocket::bind("[::]:0").context("Creating UDP socket")?;
let publisher =
mdns::publish(node_id, sock.local_addr().context("Getting UDP local address")?.port());
let sock = Rc::new(fasync::net::UdpSocket::from_socket(sock)?);
let (publisher, abort_publisher) = abortable(publisher);
fasync::spawn_local(async move {
let _ = publisher.await;
});
Ok(Self { sock, abort_publisher })
}
}
impl Drop for UdpSocketHolder {
fn drop(&mut self) {
self.abort_publisher.abort();
}
}
/// Global state for overnetstack.
struct App {
node_id: NodeId,
node: Node<AppRuntime>,
// TODO(ctiller): This state should be moved out into its own file.
/// Map socket addresses to udp link ids.
udp_link_ids: HashMap<SocketAddrV6, LinkId<overnet_core::PhysLinkId<AppLinkId>>>,
/// UDP socket to communicate over.
udp_socket: Option<UdpSocketHolder>,
}
thread_local! {
// Always access via with_app_mut
static APP: RefCell<App> = RefCell::new(App::new());
}
fn with_app_mut<R>(f: impl FnOnce(&mut App) -> R) -> R {
APP.with(|rcapp| f(&mut rcapp.borrow_mut()))
}
async fn at(when: fasync::Time, f: impl FnOnce()) {
fasync::Timer::new(when).await;
f();
}
impl App {
/// Create a new instance of App
fn | () -> App {
let node = Node::new(
AppRuntime,
NodeOptions::new()
.set_quic_server_key_file(Box::new("/pkg/data/cert.key".to_string()))
.set_quic_server_cert_file(Box::new("/pkg/data/cert.crt".to_string())),
)
.unwrap();
App { node_id: node.id(), node, udp_link_ids: HashMap::new(), udp_socket: None }
}
}
fn normalize_addr(addr: SocketAddr) -> SocketAddrV6 {
match addr {
SocketAddr::V6(a) => a,
SocketAddr::V4(a) => SocketAddrV6::new(a.ip().to_ipv6_mapped(), a.port(), 0, 0),
}
}
/// UDP read inner loop.
async fn read_udp_inner() -> Result<(), Error> {
let mut buf: [u8; 1500] = [0; 1500];
loop {
let sock = with_app_mut(|app| -> Result<_, Error> {
Ok(app
.udp_socket
.as_ref()
.ok_or_else(|| failure::format_err!("No udp socket to read from"))?
.sock
.clone())
})?;
let (length, sender) = sock.recv_from(&mut buf).await?;
println!("UDP_RECV from:{} len:{}", sender, length);
let sender = normalize_addr(sender);
with_app_mut(|app| -> Result<(), Error> {
if let Some(link_id) = app.udp_link_ids.get(&sender) {
app.node.queue_recv(*link_id, &mut buf[..length]);
} else {
log::warn!("No link for received packet {:?}", sender);
}
Ok(())
})?;
}
}
/// Read UDP socket until closed, logging errors.
async fn read_udp() {
if let Err(e) = read_udp_inner().await {
log::warn!("UDP read loop failed: {:?}", e);
}
}
/// Register a new UDP endpoint for some node_id.
fn register_udp(addr: SocketAddr, node_id: NodeId) -> Result<(), Error> {
with_app_mut(|app| {
app.node.mention_node(node_id);
let addr = normalize_addr(addr);
if app.udp_link_ids.get(&addr).is_none() {
let rtr_id = app.node.new_link(node_id, AppLinkId::Udp(addr))?;
println!("register peer: {} node_id={:?} rtr_id={:?}", addr, node_id, rtr_id);
app.udp_link_ids.insert(addr, rtr_id);
}
Ok(())
})
}
trait ListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error>;
}
impl ListPeersResponder for ServiceConsumerListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error> {
self.send(peers)
}
}
impl ListPeersResponder for OvernetListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error> {
self.send(peers)
}
}
async fn run_list_peers_inner(responder: impl ListPeersResponder) -> Result<(), Error> {
let mut peers = with_app_mut(|app| app.node.clone().list_peers()).await?;
responder.respond(&mut peers.iter_mut())?;
Ok(())
}
async fn run_list_peers(responder: impl ListPeersResponder) {
if let Err(e) = run_list_peers_inner(responder).await {
log::warn!("List peers gets error: {:?}", e);
}
}
async fn run_service_publisher_server(
mut stream: ServicePublisherRequestStream,
) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
ServicePublisherRequest::PublishService { service_name, provider,.. } => {
app.node.register_service(service_name, provider)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e)
}
}
Ok(())
}
async fn run_service_consumer_server(
mut stream: ServiceConsumerRequestStream,
) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
ServiceConsumerRequest::ListPeers { responder,.. } => {
fasync::spawn_local(run_list_peers(responder));
Ok(())
}
ServiceConsumerRequest::ConnectToService { node, service_name, chan,.. } => {
app.node.connect_to_service(node.id.into(), &service_name, chan)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
async fn run_mesh_controller_server(mut stream: MeshControllerRequestStream) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
MeshControllerRequest::AttachSocketLink { socket, options,.. } => {
app.node.attach_socket_link(options.connection_label, socket)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
async fn run_legacy_overnet_server(mut stream: OvernetRequestStream) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
OvernetRequest::PublishService { service_name, provider,.. } => {
app.node.register_service(service_name, provider)
}
OvernetRequest::ListPeers { responder,.. } => {
fasync::spawn_local(run_list_peers(responder));
Ok(())
}
OvernetRequest::ConnectToService { node, service_name, chan,.. } => {
app.node.connect_to_service(node.id.into(), &service_name, chan)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
enum IncomingService {
ServiceConsumer(ServiceConsumerRequestStream),
ServicePublisher(ServicePublisherRequestStream),
MeshController(MeshControllerRequestStream),
LegacyOvernet(OvernetRequestStream),
//... more services here
}
#[fasync::run_singlethreaded]
async fn main() -> Result<(), Error> {
fuchsia_syslog::init_with_tags(&["overnet"]).context("initialize logging")?;
let mut fs = ServiceFs::new_local();
let mut svc_dir = fs.dir("svc");
svc_dir.add_fidl_service(IncomingService::ServiceConsumer);
svc_dir.add_fidl_service(IncomingService::ServicePublisher);
svc_dir.add_fidl_service(IncomingService::MeshController);
svc_dir.add_fidl_service(IncomingService::LegacyOvernet);
fs.take_and_serve_directory_handle()?;
with_app_mut(|app| -> Result<(), Error> {
app.udp_socket = Some(UdpSocketHolder::new(app.node.id())?);
fasync::spawn_local(mdns::subscribe());
fasync::spawn_local(read_udp());
Ok(())
})
.context("Initializing UDP & MDNS")?;
const MAX_CONCURRENT: usize = 10_000;
fs.for_each_concurrent(MAX_CONCURRENT, |svcreq| match svcreq {
IncomingService::MeshController(stream) => {
run_mesh_controller_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::ServicePublisher(stream) => {
run_service_publisher_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::ServiceConsumer(stream) => {
run_service_consumer_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::LegacyOvernet(stream) => {
run_legacy_overnet_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
})
.await;
Ok(())
}
| new | identifier_name |
main.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Overnet daemon for Fuchsia
#![deny(missing_docs)]
mod mdns;
use failure::{Error, ResultExt};
use fidl_fuchsia_overnet::{
MeshControllerRequest, MeshControllerRequestStream, OvernetListPeersResponder, OvernetRequest,
OvernetRequestStream, ServiceConsumerListPeersResponder, ServiceConsumerRequest,
ServiceConsumerRequestStream, ServicePublisherRequest, ServicePublisherRequestStream,
};
use fuchsia_async as fasync;
use fuchsia_component::server::ServiceFs;
use fuchsia_zircon as zx;
use futures::future::{abortable, AbortHandle};
use futures::prelude::*;
use overnet_core::{LinkId, Node, NodeId, NodeOptions, NodeRuntime, RouterTime, SendHandle};
use std::cell::RefCell;
use std::collections::HashMap;
use std::net::{SocketAddr, SocketAddrV6};
use std::ops::Deref;
use std::rc::Rc;
use zx::AsHandleRef;
/// Identifier for a link as defined by overnetstack.
#[derive(Clone, Copy, Debug)]
enum AppLinkId {
Udp(SocketAddrV6),
}
/// Adapter of fasync::Time to RouterTime for overnet's core library.
#[derive(PartialEq, PartialOrd, Eq, Ord, Clone, Copy, Debug)]
struct Time(fasync::Time);
impl RouterTime for Time {
type Duration = zx::Duration;
fn now() -> Self {
Time(fasync::Time::now())
}
fn after(time: Self, duration: zx::Duration) -> Self {
Self(time.0 + duration)
}
}
struct AppRuntime;
impl NodeRuntime for AppRuntime {
type Time = Time;
type LinkId = AppLinkId;
const IMPLEMENTATION: fidl_fuchsia_overnet_protocol::Implementation =
fidl_fuchsia_overnet_protocol::Implementation::OvernetStack;
fn handle_type(handle: &zx::Handle) -> Result<SendHandle, Error> {
match handle.basic_info()?.object_type {
zx::ObjectType::CHANNEL => Ok(SendHandle::Channel),
_ => failure::bail!("Handle type not proxyable {:?}", handle.basic_info()?.object_type),
}
}
fn spawn_local<F>(&mut self, future: F)
where
F: Future<Output = ()> +'static,
{
fasync::spawn_local(future)
}
fn at(&mut self, t: Self::Time, f: impl FnOnce() +'static) {
fasync::spawn_local(at(t.0, f))
}
fn router_link_id(&self, id: AppLinkId) -> LinkId<overnet_core::PhysLinkId<AppLinkId>> {
with_app_mut(|app| match id {
AppLinkId::Udp(addr) => {
app.udp_link_ids.get(&addr).copied().unwrap_or(LinkId::invalid())
}
})
}
fn send_on_link(&mut self, id: Self::LinkId, packet: &mut [u8]) -> Result<(), Error> {
match id {
AppLinkId::Udp(addr) => {
println!("UDP_SEND to:{} len:{}", addr, packet.len());
let sock = with_app_mut(|app| -> Result<_, Error> {
Ok(app
.udp_socket
.as_ref()
.ok_or_else(|| failure::format_err!("no udp socket"))?
.sock
.clone())
})?;
let sock = sock.deref().as_ref();
if let Err(e) = sock.send_to(packet, addr) {
if e.kind() == std::io::ErrorKind::BrokenPipe {
log::warn!("BrokenPipe on UDP socket: let's make a new one");
with_app_mut(|app| {
app.udp_socket.take();
app.udp_socket = Some(UdpSocketHolder::new(app.node_id)?);
Ok(())
})
} else {
Err(e.into())
}
} else {
Ok(())
}
}
}
}
}
struct UdpSocketHolder {
sock: Rc<fasync::net::UdpSocket>,
abort_publisher: AbortHandle,
}
impl UdpSocketHolder {
fn new(node_id: NodeId) -> Result<Self, Error> {
// Must not call with_app_mut here, as this is called from with_app_mut
let sock = std::net::UdpSocket::bind("[::]:0").context("Creating UDP socket")?;
let publisher =
mdns::publish(node_id, sock.local_addr().context("Getting UDP local address")?.port());
let sock = Rc::new(fasync::net::UdpSocket::from_socket(sock)?);
let (publisher, abort_publisher) = abortable(publisher);
fasync::spawn_local(async move {
let _ = publisher.await;
});
Ok(Self { sock, abort_publisher })
}
}
impl Drop for UdpSocketHolder {
fn drop(&mut self) {
self.abort_publisher.abort();
}
}
/// Global state for overnetstack.
struct App {
node_id: NodeId,
node: Node<AppRuntime>,
// TODO(ctiller): This state should be moved out into its own file.
/// Map socket addresses to udp link ids.
udp_link_ids: HashMap<SocketAddrV6, LinkId<overnet_core::PhysLinkId<AppLinkId>>>,
/// UDP socket to communicate over.
udp_socket: Option<UdpSocketHolder>,
}
thread_local! {
// Always access via with_app_mut
static APP: RefCell<App> = RefCell::new(App::new());
}
fn with_app_mut<R>(f: impl FnOnce(&mut App) -> R) -> R |
async fn at(when: fasync::Time, f: impl FnOnce()) {
fasync::Timer::new(when).await;
f();
}
impl App {
/// Create a new instance of App
fn new() -> App {
let node = Node::new(
AppRuntime,
NodeOptions::new()
.set_quic_server_key_file(Box::new("/pkg/data/cert.key".to_string()))
.set_quic_server_cert_file(Box::new("/pkg/data/cert.crt".to_string())),
)
.unwrap();
App { node_id: node.id(), node, udp_link_ids: HashMap::new(), udp_socket: None }
}
}
fn normalize_addr(addr: SocketAddr) -> SocketAddrV6 {
match addr {
SocketAddr::V6(a) => a,
SocketAddr::V4(a) => SocketAddrV6::new(a.ip().to_ipv6_mapped(), a.port(), 0, 0),
}
}
/// UDP read inner loop.
async fn read_udp_inner() -> Result<(), Error> {
let mut buf: [u8; 1500] = [0; 1500];
loop {
let sock = with_app_mut(|app| -> Result<_, Error> {
Ok(app
.udp_socket
.as_ref()
.ok_or_else(|| failure::format_err!("No udp socket to read from"))?
.sock
.clone())
})?;
let (length, sender) = sock.recv_from(&mut buf).await?;
println!("UDP_RECV from:{} len:{}", sender, length);
let sender = normalize_addr(sender);
with_app_mut(|app| -> Result<(), Error> {
if let Some(link_id) = app.udp_link_ids.get(&sender) {
app.node.queue_recv(*link_id, &mut buf[..length]);
} else {
log::warn!("No link for received packet {:?}", sender);
}
Ok(())
})?;
}
}
/// Read UDP socket until closed, logging errors.
async fn read_udp() {
if let Err(e) = read_udp_inner().await {
log::warn!("UDP read loop failed: {:?}", e);
}
}
/// Register a new UDP endpoint for some node_id.
fn register_udp(addr: SocketAddr, node_id: NodeId) -> Result<(), Error> {
with_app_mut(|app| {
app.node.mention_node(node_id);
let addr = normalize_addr(addr);
if app.udp_link_ids.get(&addr).is_none() {
let rtr_id = app.node.new_link(node_id, AppLinkId::Udp(addr))?;
println!("register peer: {} node_id={:?} rtr_id={:?}", addr, node_id, rtr_id);
app.udp_link_ids.insert(addr, rtr_id);
}
Ok(())
})
}
trait ListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error>;
}
impl ListPeersResponder for ServiceConsumerListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error> {
self.send(peers)
}
}
impl ListPeersResponder for OvernetListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error> {
self.send(peers)
}
}
async fn run_list_peers_inner(responder: impl ListPeersResponder) -> Result<(), Error> {
let mut peers = with_app_mut(|app| app.node.clone().list_peers()).await?;
responder.respond(&mut peers.iter_mut())?;
Ok(())
}
async fn run_list_peers(responder: impl ListPeersResponder) {
if let Err(e) = run_list_peers_inner(responder).await {
log::warn!("List peers gets error: {:?}", e);
}
}
async fn run_service_publisher_server(
mut stream: ServicePublisherRequestStream,
) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
ServicePublisherRequest::PublishService { service_name, provider,.. } => {
app.node.register_service(service_name, provider)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e)
}
}
Ok(())
}
async fn run_service_consumer_server(
mut stream: ServiceConsumerRequestStream,
) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
ServiceConsumerRequest::ListPeers { responder,.. } => {
fasync::spawn_local(run_list_peers(responder));
Ok(())
}
ServiceConsumerRequest::ConnectToService { node, service_name, chan,.. } => {
app.node.connect_to_service(node.id.into(), &service_name, chan)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
async fn run_mesh_controller_server(mut stream: MeshControllerRequestStream) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
MeshControllerRequest::AttachSocketLink { socket, options,.. } => {
app.node.attach_socket_link(options.connection_label, socket)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
async fn run_legacy_overnet_server(mut stream: OvernetRequestStream) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
OvernetRequest::PublishService { service_name, provider,.. } => {
app.node.register_service(service_name, provider)
}
OvernetRequest::ListPeers { responder,.. } => {
fasync::spawn_local(run_list_peers(responder));
Ok(())
}
OvernetRequest::ConnectToService { node, service_name, chan,.. } => {
app.node.connect_to_service(node.id.into(), &service_name, chan)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
enum IncomingService {
ServiceConsumer(ServiceConsumerRequestStream),
ServicePublisher(ServicePublisherRequestStream),
MeshController(MeshControllerRequestStream),
LegacyOvernet(OvernetRequestStream),
//... more services here
}
#[fasync::run_singlethreaded]
async fn main() -> Result<(), Error> {
fuchsia_syslog::init_with_tags(&["overnet"]).context("initialize logging")?;
let mut fs = ServiceFs::new_local();
let mut svc_dir = fs.dir("svc");
svc_dir.add_fidl_service(IncomingService::ServiceConsumer);
svc_dir.add_fidl_service(IncomingService::ServicePublisher);
svc_dir.add_fidl_service(IncomingService::MeshController);
svc_dir.add_fidl_service(IncomingService::LegacyOvernet);
fs.take_and_serve_directory_handle()?;
with_app_mut(|app| -> Result<(), Error> {
app.udp_socket = Some(UdpSocketHolder::new(app.node.id())?);
fasync::spawn_local(mdns::subscribe());
fasync::spawn_local(read_udp());
Ok(())
})
.context("Initializing UDP & MDNS")?;
const MAX_CONCURRENT: usize = 10_000;
fs.for_each_concurrent(MAX_CONCURRENT, |svcreq| match svcreq {
IncomingService::MeshController(stream) => {
run_mesh_controller_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::ServicePublisher(stream) => {
run_service_publisher_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::ServiceConsumer(stream) => {
run_service_consumer_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::LegacyOvernet(stream) => {
run_legacy_overnet_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
})
.await;
Ok(())
}
| {
APP.with(|rcapp| f(&mut rcapp.borrow_mut()))
} | identifier_body |
main.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Overnet daemon for Fuchsia
#![deny(missing_docs)]
mod mdns;
use failure::{Error, ResultExt};
use fidl_fuchsia_overnet::{
MeshControllerRequest, MeshControllerRequestStream, OvernetListPeersResponder, OvernetRequest,
OvernetRequestStream, ServiceConsumerListPeersResponder, ServiceConsumerRequest,
ServiceConsumerRequestStream, ServicePublisherRequest, ServicePublisherRequestStream,
};
use fuchsia_async as fasync;
use fuchsia_component::server::ServiceFs;
use fuchsia_zircon as zx;
use futures::future::{abortable, AbortHandle};
use futures::prelude::*;
use overnet_core::{LinkId, Node, NodeId, NodeOptions, NodeRuntime, RouterTime, SendHandle};
use std::cell::RefCell;
use std::collections::HashMap;
use std::net::{SocketAddr, SocketAddrV6};
use std::ops::Deref;
use std::rc::Rc;
use zx::AsHandleRef;
/// Identifier for a link as defined by overnetstack.
#[derive(Clone, Copy, Debug)]
enum AppLinkId {
Udp(SocketAddrV6),
}
/// Adapter of fasync::Time to RouterTime for overnet's core library.
#[derive(PartialEq, PartialOrd, Eq, Ord, Clone, Copy, Debug)]
struct Time(fasync::Time);
impl RouterTime for Time {
type Duration = zx::Duration;
fn now() -> Self {
Time(fasync::Time::now())
}
fn after(time: Self, duration: zx::Duration) -> Self {
Self(time.0 + duration)
}
}
struct AppRuntime;
impl NodeRuntime for AppRuntime {
type Time = Time;
type LinkId = AppLinkId;
const IMPLEMENTATION: fidl_fuchsia_overnet_protocol::Implementation =
fidl_fuchsia_overnet_protocol::Implementation::OvernetStack;
fn handle_type(handle: &zx::Handle) -> Result<SendHandle, Error> {
match handle.basic_info()?.object_type {
zx::ObjectType::CHANNEL => Ok(SendHandle::Channel),
_ => failure::bail!("Handle type not proxyable {:?}", handle.basic_info()?.object_type),
}
}
fn spawn_local<F>(&mut self, future: F)
where
F: Future<Output = ()> +'static,
{
fasync::spawn_local(future)
}
fn at(&mut self, t: Self::Time, f: impl FnOnce() +'static) {
fasync::spawn_local(at(t.0, f))
}
fn router_link_id(&self, id: AppLinkId) -> LinkId<overnet_core::PhysLinkId<AppLinkId>> {
with_app_mut(|app| match id {
AppLinkId::Udp(addr) => {
app.udp_link_ids.get(&addr).copied().unwrap_or(LinkId::invalid())
}
})
}
fn send_on_link(&mut self, id: Self::LinkId, packet: &mut [u8]) -> Result<(), Error> {
match id {
AppLinkId::Udp(addr) => {
println!("UDP_SEND to:{} len:{}", addr, packet.len());
let sock = with_app_mut(|app| -> Result<_, Error> {
Ok(app
.udp_socket
.as_ref()
.ok_or_else(|| failure::format_err!("no udp socket"))?
.sock
.clone())
})?;
let sock = sock.deref().as_ref();
if let Err(e) = sock.send_to(packet, addr) {
if e.kind() == std::io::ErrorKind::BrokenPipe {
log::warn!("BrokenPipe on UDP socket: let's make a new one");
with_app_mut(|app| {
app.udp_socket.take();
app.udp_socket = Some(UdpSocketHolder::new(app.node_id)?);
Ok(())
})
} else {
Err(e.into())
}
} else {
Ok(())
}
}
}
}
}
struct UdpSocketHolder {
sock: Rc<fasync::net::UdpSocket>,
abort_publisher: AbortHandle,
}
impl UdpSocketHolder {
fn new(node_id: NodeId) -> Result<Self, Error> {
// Must not call with_app_mut here, as this is called from with_app_mut
let sock = std::net::UdpSocket::bind("[::]:0").context("Creating UDP socket")?;
let publisher =
mdns::publish(node_id, sock.local_addr().context("Getting UDP local address")?.port());
let sock = Rc::new(fasync::net::UdpSocket::from_socket(sock)?);
let (publisher, abort_publisher) = abortable(publisher);
fasync::spawn_local(async move {
let _ = publisher.await;
});
Ok(Self { sock, abort_publisher })
}
}
impl Drop for UdpSocketHolder {
fn drop(&mut self) {
self.abort_publisher.abort();
}
}
/// Global state for overnetstack.
struct App {
node_id: NodeId,
node: Node<AppRuntime>,
// TODO(ctiller): This state should be moved out into its own file.
/// Map socket addresses to udp link ids.
udp_link_ids: HashMap<SocketAddrV6, LinkId<overnet_core::PhysLinkId<AppLinkId>>>,
/// UDP socket to communicate over.
udp_socket: Option<UdpSocketHolder>,
}
thread_local! {
// Always access via with_app_mut
static APP: RefCell<App> = RefCell::new(App::new());
}
fn with_app_mut<R>(f: impl FnOnce(&mut App) -> R) -> R {
APP.with(|rcapp| f(&mut rcapp.borrow_mut()))
}
async fn at(when: fasync::Time, f: impl FnOnce()) {
fasync::Timer::new(when).await;
f();
}
impl App {
/// Create a new instance of App
fn new() -> App {
let node = Node::new(
AppRuntime,
NodeOptions::new()
.set_quic_server_key_file(Box::new("/pkg/data/cert.key".to_string()))
.set_quic_server_cert_file(Box::new("/pkg/data/cert.crt".to_string())),
)
.unwrap();
App { node_id: node.id(), node, udp_link_ids: HashMap::new(), udp_socket: None }
}
}
fn normalize_addr(addr: SocketAddr) -> SocketAddrV6 {
match addr {
SocketAddr::V6(a) => a,
SocketAddr::V4(a) => SocketAddrV6::new(a.ip().to_ipv6_mapped(), a.port(), 0, 0),
}
}
/// UDP read inner loop.
async fn read_udp_inner() -> Result<(), Error> {
let mut buf: [u8; 1500] = [0; 1500];
loop {
let sock = with_app_mut(|app| -> Result<_, Error> {
Ok(app
.udp_socket
.as_ref()
.ok_or_else(|| failure::format_err!("No udp socket to read from"))?
.sock
.clone())
})?;
let (length, sender) = sock.recv_from(&mut buf).await?;
println!("UDP_RECV from:{} len:{}", sender, length);
let sender = normalize_addr(sender);
with_app_mut(|app| -> Result<(), Error> {
if let Some(link_id) = app.udp_link_ids.get(&sender) {
app.node.queue_recv(*link_id, &mut buf[..length]);
} else {
log::warn!("No link for received packet {:?}", sender);
}
Ok(())
})?;
}
}
/// Read UDP socket until closed, logging errors.
async fn read_udp() {
if let Err(e) = read_udp_inner().await {
log::warn!("UDP read loop failed: {:?}", e);
}
}
/// Register a new UDP endpoint for some node_id.
fn register_udp(addr: SocketAddr, node_id: NodeId) -> Result<(), Error> {
with_app_mut(|app| {
app.node.mention_node(node_id);
let addr = normalize_addr(addr);
if app.udp_link_ids.get(&addr).is_none() {
let rtr_id = app.node.new_link(node_id, AppLinkId::Udp(addr))?;
println!("register peer: {} node_id={:?} rtr_id={:?}", addr, node_id, rtr_id);
app.udp_link_ids.insert(addr, rtr_id);
}
Ok(())
})
}
trait ListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error>;
}
impl ListPeersResponder for ServiceConsumerListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error> {
self.send(peers)
}
}
impl ListPeersResponder for OvernetListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error> {
self.send(peers)
}
}
async fn run_list_peers_inner(responder: impl ListPeersResponder) -> Result<(), Error> {
let mut peers = with_app_mut(|app| app.node.clone().list_peers()).await?;
responder.respond(&mut peers.iter_mut())?;
Ok(())
}
async fn run_list_peers(responder: impl ListPeersResponder) {
if let Err(e) = run_list_peers_inner(responder).await {
log::warn!("List peers gets error: {:?}", e);
}
}
async fn run_service_publisher_server(
mut stream: ServicePublisherRequestStream,
) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
ServicePublisherRequest::PublishService { service_name, provider,.. } => {
app.node.register_service(service_name, provider)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e)
}
}
Ok(())
}
async fn run_service_consumer_server(
mut stream: ServiceConsumerRequestStream,
) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
ServiceConsumerRequest::ListPeers { responder,.. } => {
fasync::spawn_local(run_list_peers(responder));
Ok(())
}
ServiceConsumerRequest::ConnectToService { node, service_name, chan,.. } => {
app.node.connect_to_service(node.id.into(), &service_name, chan)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
async fn run_mesh_controller_server(mut stream: MeshControllerRequestStream) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
MeshControllerRequest::AttachSocketLink { socket, options,.. } => {
app.node.attach_socket_link(options.connection_label, socket)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
async fn run_legacy_overnet_server(mut stream: OvernetRequestStream) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
OvernetRequest::PublishService { service_name, provider,.. } => {
app.node.register_service(service_name, provider)
}
OvernetRequest::ListPeers { responder,.. } => |
OvernetRequest::ConnectToService { node, service_name, chan,.. } => {
app.node.connect_to_service(node.id.into(), &service_name, chan)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
enum IncomingService {
ServiceConsumer(ServiceConsumerRequestStream),
ServicePublisher(ServicePublisherRequestStream),
MeshController(MeshControllerRequestStream),
LegacyOvernet(OvernetRequestStream),
//... more services here
}
#[fasync::run_singlethreaded]
async fn main() -> Result<(), Error> {
fuchsia_syslog::init_with_tags(&["overnet"]).context("initialize logging")?;
let mut fs = ServiceFs::new_local();
let mut svc_dir = fs.dir("svc");
svc_dir.add_fidl_service(IncomingService::ServiceConsumer);
svc_dir.add_fidl_service(IncomingService::ServicePublisher);
svc_dir.add_fidl_service(IncomingService::MeshController);
svc_dir.add_fidl_service(IncomingService::LegacyOvernet);
fs.take_and_serve_directory_handle()?;
with_app_mut(|app| -> Result<(), Error> {
app.udp_socket = Some(UdpSocketHolder::new(app.node.id())?);
fasync::spawn_local(mdns::subscribe());
fasync::spawn_local(read_udp());
Ok(())
})
.context("Initializing UDP & MDNS")?;
const MAX_CONCURRENT: usize = 10_000;
fs.for_each_concurrent(MAX_CONCURRENT, |svcreq| match svcreq {
IncomingService::MeshController(stream) => {
run_mesh_controller_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::ServicePublisher(stream) => {
run_service_publisher_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::ServiceConsumer(stream) => {
run_service_consumer_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::LegacyOvernet(stream) => {
run_legacy_overnet_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
})
.await;
Ok(())
}
| {
fasync::spawn_local(run_list_peers(responder));
Ok(())
} | conditional_block |
main.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Overnet daemon for Fuchsia
#![deny(missing_docs)]
mod mdns;
use failure::{Error, ResultExt};
use fidl_fuchsia_overnet::{
MeshControllerRequest, MeshControllerRequestStream, OvernetListPeersResponder, OvernetRequest,
OvernetRequestStream, ServiceConsumerListPeersResponder, ServiceConsumerRequest,
ServiceConsumerRequestStream, ServicePublisherRequest, ServicePublisherRequestStream,
};
use fuchsia_async as fasync;
use fuchsia_component::server::ServiceFs;
use fuchsia_zircon as zx;
use futures::future::{abortable, AbortHandle};
use futures::prelude::*;
use overnet_core::{LinkId, Node, NodeId, NodeOptions, NodeRuntime, RouterTime, SendHandle};
use std::cell::RefCell;
use std::collections::HashMap;
use std::net::{SocketAddr, SocketAddrV6};
use std::ops::Deref;
use std::rc::Rc;
use zx::AsHandleRef;
/// Identifier for a link as defined by overnetstack.
#[derive(Clone, Copy, Debug)]
enum AppLinkId {
Udp(SocketAddrV6),
}
/// Adapter of fasync::Time to RouterTime for overnet's core library.
#[derive(PartialEq, PartialOrd, Eq, Ord, Clone, Copy, Debug)]
struct Time(fasync::Time);
impl RouterTime for Time {
type Duration = zx::Duration;
fn now() -> Self {
Time(fasync::Time::now())
}
fn after(time: Self, duration: zx::Duration) -> Self {
Self(time.0 + duration)
}
}
struct AppRuntime;
impl NodeRuntime for AppRuntime {
type Time = Time;
type LinkId = AppLinkId;
const IMPLEMENTATION: fidl_fuchsia_overnet_protocol::Implementation =
fidl_fuchsia_overnet_protocol::Implementation::OvernetStack;
fn handle_type(handle: &zx::Handle) -> Result<SendHandle, Error> {
match handle.basic_info()?.object_type {
zx::ObjectType::CHANNEL => Ok(SendHandle::Channel),
_ => failure::bail!("Handle type not proxyable {:?}", handle.basic_info()?.object_type),
}
}
fn spawn_local<F>(&mut self, future: F)
where
F: Future<Output = ()> +'static,
{
fasync::spawn_local(future)
}
fn at(&mut self, t: Self::Time, f: impl FnOnce() +'static) {
fasync::spawn_local(at(t.0, f))
}
fn router_link_id(&self, id: AppLinkId) -> LinkId<overnet_core::PhysLinkId<AppLinkId>> {
with_app_mut(|app| match id {
AppLinkId::Udp(addr) => {
app.udp_link_ids.get(&addr).copied().unwrap_or(LinkId::invalid())
}
})
}
fn send_on_link(&mut self, id: Self::LinkId, packet: &mut [u8]) -> Result<(), Error> {
match id {
AppLinkId::Udp(addr) => {
println!("UDP_SEND to:{} len:{}", addr, packet.len());
let sock = with_app_mut(|app| -> Result<_, Error> {
Ok(app
.udp_socket
.as_ref()
.ok_or_else(|| failure::format_err!("no udp socket"))?
.sock
.clone())
})?;
let sock = sock.deref().as_ref();
if let Err(e) = sock.send_to(packet, addr) {
if e.kind() == std::io::ErrorKind::BrokenPipe {
log::warn!("BrokenPipe on UDP socket: let's make a new one");
with_app_mut(|app| {
app.udp_socket.take();
app.udp_socket = Some(UdpSocketHolder::new(app.node_id)?);
Ok(())
})
} else {
Err(e.into())
}
} else {
Ok(())
}
}
}
}
}
struct UdpSocketHolder {
sock: Rc<fasync::net::UdpSocket>,
abort_publisher: AbortHandle,
}
impl UdpSocketHolder {
fn new(node_id: NodeId) -> Result<Self, Error> {
// Must not call with_app_mut here, as this is called from with_app_mut
let sock = std::net::UdpSocket::bind("[::]:0").context("Creating UDP socket")?;
let publisher =
mdns::publish(node_id, sock.local_addr().context("Getting UDP local address")?.port());
let sock = Rc::new(fasync::net::UdpSocket::from_socket(sock)?);
let (publisher, abort_publisher) = abortable(publisher);
fasync::spawn_local(async move {
let _ = publisher.await;
});
Ok(Self { sock, abort_publisher })
}
}
impl Drop for UdpSocketHolder {
fn drop(&mut self) {
self.abort_publisher.abort();
}
}
/// Global state for overnetstack.
struct App {
node_id: NodeId,
node: Node<AppRuntime>,
// TODO(ctiller): This state should be moved out into its own file.
/// Map socket addresses to udp link ids.
udp_link_ids: HashMap<SocketAddrV6, LinkId<overnet_core::PhysLinkId<AppLinkId>>>,
/// UDP socket to communicate over.
udp_socket: Option<UdpSocketHolder>,
}
thread_local! {
// Always access via with_app_mut
static APP: RefCell<App> = RefCell::new(App::new());
}
fn with_app_mut<R>(f: impl FnOnce(&mut App) -> R) -> R {
APP.with(|rcapp| f(&mut rcapp.borrow_mut()))
}
async fn at(when: fasync::Time, f: impl FnOnce()) {
fasync::Timer::new(when).await;
f();
}
impl App {
/// Create a new instance of App
fn new() -> App {
let node = Node::new(
AppRuntime,
NodeOptions::new()
.set_quic_server_key_file(Box::new("/pkg/data/cert.key".to_string()))
.set_quic_server_cert_file(Box::new("/pkg/data/cert.crt".to_string())),
)
.unwrap();
App { node_id: node.id(), node, udp_link_ids: HashMap::new(), udp_socket: None }
}
}
fn normalize_addr(addr: SocketAddr) -> SocketAddrV6 {
match addr {
SocketAddr::V6(a) => a,
SocketAddr::V4(a) => SocketAddrV6::new(a.ip().to_ipv6_mapped(), a.port(), 0, 0),
}
}
/// UDP read inner loop.
async fn read_udp_inner() -> Result<(), Error> {
let mut buf: [u8; 1500] = [0; 1500];
loop {
let sock = with_app_mut(|app| -> Result<_, Error> {
Ok(app
.udp_socket
.as_ref()
.ok_or_else(|| failure::format_err!("No udp socket to read from"))?
.sock
.clone())
})?;
let (length, sender) = sock.recv_from(&mut buf).await?;
println!("UDP_RECV from:{} len:{}", sender, length);
let sender = normalize_addr(sender);
with_app_mut(|app| -> Result<(), Error> {
if let Some(link_id) = app.udp_link_ids.get(&sender) {
app.node.queue_recv(*link_id, &mut buf[..length]);
} else {
log::warn!("No link for received packet {:?}", sender);
}
Ok(())
})?;
}
}
/// Read UDP socket until closed, logging errors.
async fn read_udp() {
if let Err(e) = read_udp_inner().await {
log::warn!("UDP read loop failed: {:?}", e);
}
}
/// Register a new UDP endpoint for some node_id.
fn register_udp(addr: SocketAddr, node_id: NodeId) -> Result<(), Error> {
with_app_mut(|app| {
app.node.mention_node(node_id);
let addr = normalize_addr(addr);
if app.udp_link_ids.get(&addr).is_none() {
let rtr_id = app.node.new_link(node_id, AppLinkId::Udp(addr))?;
println!("register peer: {} node_id={:?} rtr_id={:?}", addr, node_id, rtr_id);
app.udp_link_ids.insert(addr, rtr_id);
}
Ok(())
}) | trait ListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error>;
}
impl ListPeersResponder for ServiceConsumerListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error> {
self.send(peers)
}
}
impl ListPeersResponder for OvernetListPeersResponder {
fn respond(
self,
peers: &mut dyn ExactSizeIterator<Item = &mut fidl_fuchsia_overnet::Peer>,
) -> Result<(), fidl::Error> {
self.send(peers)
}
}
async fn run_list_peers_inner(responder: impl ListPeersResponder) -> Result<(), Error> {
let mut peers = with_app_mut(|app| app.node.clone().list_peers()).await?;
responder.respond(&mut peers.iter_mut())?;
Ok(())
}
async fn run_list_peers(responder: impl ListPeersResponder) {
if let Err(e) = run_list_peers_inner(responder).await {
log::warn!("List peers gets error: {:?}", e);
}
}
async fn run_service_publisher_server(
mut stream: ServicePublisherRequestStream,
) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
ServicePublisherRequest::PublishService { service_name, provider,.. } => {
app.node.register_service(service_name, provider)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e)
}
}
Ok(())
}
async fn run_service_consumer_server(
mut stream: ServiceConsumerRequestStream,
) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
ServiceConsumerRequest::ListPeers { responder,.. } => {
fasync::spawn_local(run_list_peers(responder));
Ok(())
}
ServiceConsumerRequest::ConnectToService { node, service_name, chan,.. } => {
app.node.connect_to_service(node.id.into(), &service_name, chan)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
async fn run_mesh_controller_server(mut stream: MeshControllerRequestStream) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
MeshControllerRequest::AttachSocketLink { socket, options,.. } => {
app.node.attach_socket_link(options.connection_label, socket)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
async fn run_legacy_overnet_server(mut stream: OvernetRequestStream) -> Result<(), Error> {
while let Some(request) = stream.try_next().await.context("error running overnet server")? {
let result = with_app_mut(|app| match request {
OvernetRequest::PublishService { service_name, provider,.. } => {
app.node.register_service(service_name, provider)
}
OvernetRequest::ListPeers { responder,.. } => {
fasync::spawn_local(run_list_peers(responder));
Ok(())
}
OvernetRequest::ConnectToService { node, service_name, chan,.. } => {
app.node.connect_to_service(node.id.into(), &service_name, chan)
}
});
if let Err(e) = result {
log::warn!("Error servicing request: {:?}", e);
}
}
Ok(())
}
enum IncomingService {
ServiceConsumer(ServiceConsumerRequestStream),
ServicePublisher(ServicePublisherRequestStream),
MeshController(MeshControllerRequestStream),
LegacyOvernet(OvernetRequestStream),
//... more services here
}
#[fasync::run_singlethreaded]
async fn main() -> Result<(), Error> {
fuchsia_syslog::init_with_tags(&["overnet"]).context("initialize logging")?;
let mut fs = ServiceFs::new_local();
let mut svc_dir = fs.dir("svc");
svc_dir.add_fidl_service(IncomingService::ServiceConsumer);
svc_dir.add_fidl_service(IncomingService::ServicePublisher);
svc_dir.add_fidl_service(IncomingService::MeshController);
svc_dir.add_fidl_service(IncomingService::LegacyOvernet);
fs.take_and_serve_directory_handle()?;
with_app_mut(|app| -> Result<(), Error> {
app.udp_socket = Some(UdpSocketHolder::new(app.node.id())?);
fasync::spawn_local(mdns::subscribe());
fasync::spawn_local(read_udp());
Ok(())
})
.context("Initializing UDP & MDNS")?;
const MAX_CONCURRENT: usize = 10_000;
fs.for_each_concurrent(MAX_CONCURRENT, |svcreq| match svcreq {
IncomingService::MeshController(stream) => {
run_mesh_controller_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::ServicePublisher(stream) => {
run_service_publisher_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::ServiceConsumer(stream) => {
run_service_consumer_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
IncomingService::LegacyOvernet(stream) => {
run_legacy_overnet_server(stream).unwrap_or_else(|e| log::trace!("{:?}", e)).boxed()
}
})
.await;
Ok(())
} | }
| random_line_split |
lib.rs | //! This crate provides the [`quote!`] macro for turning Rust syntax tree data
//! structures into tokens of source code.
//!
//! [`quote!`]: macro.quote.html
//!
//! Procedural macros in Rust receive a stream of tokens as input, execute
//! arbitrary Rust code to determine how to manipulate those tokens, and produce
//! a stream of tokens to hand back to the compiler to compile into the caller's
//! crate. Quasi-quoting is a solution to one piece of that -- producing tokens
//! to return to the compiler.
//!
//! The idea of quasi-quoting is that we write *code* that we treat as *data*.
//! Within the `quote!` macro, we can write what looks like code to our text
//! editor or IDE. We get all the benefits of the editor's brace matching,
//! syntax highlighting, indentation, and maybe autocompletion. But rather than
//! compiling that as code into the current crate, we can treat it as data, pass
//! it around, mutate it, and eventually hand it back to the compiler as tokens
//! to compile into the macro caller's crate.
//!
//! This crate is motivated by the procedural macro use case, but it is a
//! general-purpose Rust quasi-quoting library and is not specific to procedural
//! macros.
//!
//! # Example
//!
//! The following quasi-quoted block of code is something you might find in [a]
//! procedural macro having to do with data structure serialization. The `#var`
//! syntax performs interpolation of runtime variables into the quoted tokens.
//! Check out the documentation of the [`quote!`] macro for more detail about
//! the syntax. See also the [`quote_spanned!`] macro which is important for
//! implementing hygienic procedural macros.
//!
//! [a]: https://serde.rs/
//! [`quote_spanned!`]: macro.quote_spanned.html
//!
//! ```edition2018
//! # use quote::quote;
//! #
//! # let generics = "";
//! # let where_clause = "";
//! # let field_ty = "";
//! # let item_ty = "";
//! # let path = "";
//! # let value = "";
//! #
//! let tokens = quote! {
//! struct SerializeWith #generics #where_clause {
//! value: &'a #field_ty,
//! phantom: core::marker::PhantomData<#item_ty>,
//! }
//!
//! impl #generics serde::Serialize for SerializeWith #generics #where_clause {
//! fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
//! where
//! S: serde::Serializer,
//! {
//! #path(self.value, serializer)
//! }
//! }
//!
//! SerializeWith {
//! value: #value,
//! phantom: core::marker::PhantomData::<#item_ty>,
//! }
//! };
//! ```
use proc_macro_hack::proc_macro_hack;
mod repeat;
pub use self::repeat::*;
pub use quote::ToTokens;
pub use quote::TokenStreamExt;
/// The whole point.
///
/// Performs variable interpolation against the input and produces it as
/// [`TokenStream`]. For returning tokens to the compiler in a procedural macro, use
/// `into()` to build a `TokenStream`.
///
/// [`TokenStream`]: https://docs.rs/proc-macro2/0/proc_macro2/struct.TokenStream.html
///
/// # Interpolation
///
/// Variable interpolation is done with `#var` (similar to `$var` in
/// `macro_rules!` macros). This grabs the `var` variable that is currently in
/// scope and inserts it in that location in the output tokens. Any type
/// implementing the [`ToTokens`] trait can be interpolated. This includes most
/// Rust primitive types as well as most of the syntax tree types from the [Syn]
/// crate.
///
/// [`ToTokens`]: trait.ToTokens.html
/// [Syn]: https://github.com/dtolnay/syn
///
/// Repetition is done using `#(...)*` or `#(...),*` again similar to
/// `macro_rules!`. This iterates through the elements of any variable
/// interpolated within the repetition and inserts a copy of the repetition
/// body for each one.
///
/// - `#(#var)*` — no separators
/// - `#(#var),*` — the character before the asterisk is used as a separator
/// - `#( struct #var; )*` — the repetition can contain other tokens
/// - `#( #k => println!("{}", #v), )*` — even multiple interpolations
/// - `#(let #var = self.#var;)*` - the same variable can be used more than once
///
/// The [`proc_quote::Repeat`](https://docs.rs/proc-quote/0/proc_quote/trait.Repeat.html)
/// trait defines which types are allowed to be interpolated inside a repition pattern.
///
/// Which types that implement the following traits *do* `Repeat`:
/// - [`Iterator<T>`] consumes the iterator, iterating through every element.
/// - <a href="https://doc.rust-lang.org/std/borrow/trait.Borrow.html">`Borrow<[T]>`</a>
/// (includes [`Vec`], [`array`], and [`slice`]) iterates with the [`slice::iter`] method,
/// thus not consuming the original data.
/// - [`ToTokens`], interpolates the variable in every iteration.
///
/// Which types *do NOT* `Repeat`:
/// - [`IntoIterator`], to avoid ambiguity (Ex. "Which behavior would have been used for [`Vec`],
/// which implements both [`IntoIterator`] and <a href="https://doc.rust-lang.org/std/borrow/trait.Borrow.html">
/// `Borrow<[T]>`</a>?"; "Which behavior would have been used for [`TokenStream`], which implements both
/// [`IntoIterator`] and [`ToTokens`]?"). To use the iterator, you may call [`IntoIterator::into_iter`]
/// explicitly.
/// - Ambiguous types that implement at least two of the `Repeat` traits. In the very unlikely case
/// this happens, disambiguate the type by wrapping it under some structure that only implements the
/// trait you desire to use.
///
/// [`Iterator<T>`]: https://doc.rust-lang.org/std/iter/trait.Iterator.html
/// [`Vec`]: https://doc.rust-lang.org/std/vec/struct.Vec.html
/// [`array`]: https://doc.rust-lang.org/std/primitive.array.html
/// [`slice`]: https://doc.rust-lang.org/std/slice/index.html
/// [`slice::iter`]: https://doc.rust-lang.org/std/primitive.slice.html#method.iter
/// [`ToTokens`]: https://docs.rs/proc-quote/0/proc_quote/trait.ToTokens.html
/// [`IntoIterator`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html
/// [`IntoIterator::into_iter`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html#tymethod.into_iter
///
/// # Hygiene
///
/// Any interpolated tokens preserve the `Span` information provided by their
/// `ToTokens` implementation. Tokens that originate within the `quote!`
/// invocation are spanned with [`Span::call_site()`].
///
/// [`Span::call_site()`]: https://docs.rs/proc-macro2/0/proc_macro2/struct.Span.html#method.call_site
///
/// A different span can be provided through the [`quote_spanned!`] macro.
///
/// [`quote_spanned!`]: macro.quote_spanned.html
///
/// # Return type
///
/// The macro evaluates to an expression of type `proc_macro2::TokenStream`.
/// Meanwhile Rust procedural macros are expected to return the type
/// `proc_macro::TokenStream`.
///
/// The difference between the two types is that `proc_macro` types are entirely
/// specific to procedural macros and cannot ever exist in code outside of a
/// procedural macro, while `proc_macro2` types may exist anywhere including
/// tests and non-macro code like main.rs and build.rs. This is why even the
/// procedural macro ecosystem is largely built around `proc_macro2`, because
/// that ensures the libraries are unit testable and accessible in non-macro
/// contexts.
///
/// There is a [`From`]-conversion in both directions so returning the output of
/// `quote!` from a procedural macro usually looks like `tokens.into()` or
/// `proc_macro::TokenStream::from(tokens)`.
///
/// [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html
///
/// # Examples
///
/// ## Procedural macro
///
/// The structure of a basic procedural macro is as follows. Refer to the [Syn]
/// crate for further useful guidance on using `quote!` as part of a procedural
/// macro.
///
/// [Syn]: https://github.com/dtolnay/syn
///
/// ```edition2018
/// # #[cfg(any())]
/// extern crate proc_macro;
/// # use proc_macro2 as proc_macro;
///
/// use proc_macro::TokenStream;
/// use quote::quote;
///
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// #[proc_macro_derive(HeapSize)]
/// # };
/// pub fn derive_heap_size(input: TokenStream) -> TokenStream {
/// // Parse the input and figure out what implementation to generate...
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// let name = /*... */;
/// let expr = /*... */;
/// # };
/// #
/// # let name = 0;
/// # let expr = 0;
///
/// let expanded = quote! {
/// // The generated impl.
/// impl heapsize::HeapSize for #name {
/// fn heap_size_of_children(&self) -> usize {
/// #expr
/// }
/// }
/// };
///
/// // Hand the output tokens back to the compiler.
/// TokenStream::from(expanded)
/// }
/// ```
///
/// ## Combining quoted fragments
///
/// Usually you don't end up constructing an entire final `TokenStream` in one
/// piece. Different parts may come from different helper functions. The tokens
/// produced by `quote!` themselves implement `ToTokens` and so can be
/// interpolated into later `quote!` invocations to build up a final result.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// let type_definition = quote! {...};
/// let methods = quote! {...};
///
/// let tokens = quote! {
/// #type_definition
/// #methods
/// };
/// ```
///
/// ## Constructing identifiers
///
/// Suppose we have an identifier `ident` which came from somewhere in a macro
/// input and we need to modify it in some way for the macro output. Let's
/// consider prepending the identifier with an underscore.
///
/// Simply interpolating the identifier next to an underscore will not have the
/// behavior of concatenating them. The underscore and the identifier will
/// continue to be two separate tokens as if you had written `_ x`.
///
/// ```edition2018
/// # use proc_macro2::{self as syn, Span};
/// # use quote::quote;
/// #
/// # let ident = syn::Ident::new("i", Span::call_site());
/// #
/// // incorrect
/// quote! {
/// let mut _#ident = 0;
/// }
/// # ;
/// ```
///
/// The solution is to perform token-level manipulations using the APIs provided
/// by Syn and proc-macro2.
///
/// ```edition2018
/// # use proc_macro2::{self as syn, Span};
/// # use quote::quote;
/// #
/// # let ident = syn::Ident::new("i", Span::call_site());
/// #
/// let concatenated = format!("_{}", ident);
/// let varname = syn::Ident::new(&concatenated, ident.span());
/// quote! {
/// let mut #varname = 0;
/// }
/// # ;
/// ```
///
/// ## Making method calls
///
/// Let's say our macro requires some type specified in the macro input to have
/// a constructor called `new`. We have the type in a variable called
/// `field_type` of type `syn::Type` and want to invoke the constructor.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// // incorrect
/// quote! {
/// let value = #field_type::new();
/// }
/// # ;
/// ```
///
/// This works only sometimes. If `field_type` is `String`, the expanded code
/// contains `String::new()` which is fine. But if `field_type` is something
/// like `Vec<i32>` then the expanded code is `Vec<i32>::new()` which is invalid
/// syntax. Ordinarily in handwritten Rust we would write `Vec::<i32>::new()`
/// but for macros often the following is more convenient.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// quote! {
/// let value = <#field_type>::new();
/// }
/// # ;
/// ```
///
/// This expands to `<Vec<i32>>::new()` which behaves correctly.
///
/// A similar pattern is appropriate for trait methods.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// quote! {
/// let value = <#field_type as core::default::Default>::default();
/// }
/// # ;
/// ```
#[proc_macro_hack]
pub use proc_quote_impl::quote;
/// Same as `quote!`, but applies a given span to all tokens originating within
/// the macro invocation.
///
/// # Syntax
///
/// A span expression of type [`Span`], followed by `=>`, followed by the tokens
/// to quote. The span expression should be brief -- use a variable for anything
/// more than a few characters. There should be no space before the `=>` token.
///
/// [`Span`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html
///
/// ```edition2018
/// # use proc_macro2::Span;
/// # use quote::quote_spanned;
/// #
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// let span = /*... */;
/// # };
/// # let span = Span::call_site();
/// # let init = 0;
///
/// // On one line, use parentheses.
/// let tokens = quote_spanned!(span=> Box::into_raw(Box::new(#init)));
///
/// // On multiple lines, place the span at the top and use braces.
/// let tokens = quote_spanned! {span=>
/// Box::into_raw(Box::new(#init))
/// };
/// ```
///
/// The lack of space before the `=>` should look jarring to Rust programmers
/// and this is intentional. The formatting is designed to be visibly
/// off-balance and draw the eye a particular way, due to the span expression
/// being evaluated in the context of the procedural macro and the remaining
/// tokens being evaluated in the generated code.
///
/// # Hygiene
///
/// Any interpolated tokens preserve the `Span` information provided by their
/// `ToTokens` implementation. Tokens that originate within the `quote_spanned!`
/// invocation are spanned with the given span argument.
///
/// # Example
///
/// The following procedural macro code uses `quote_spanned!` to assert that a
/// particular Rust type implements the [`Sync`] trait so that references can be
/// safely shared between threads.
///
/// [`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html
///
/// ```edition2018
/// # use quote::{quote_spanned, TokenStreamExt, ToTokens};
/// # use proc_macro2::{Span, TokenStream};
/// #
/// # struct Type;
/// #
/// # impl Type {
/// # fn span(&self) -> Span {
/// # Span::call_site()
/// # }
/// # }
/// #
/// # impl ToTokens for Type {
/// # fn to_tokens(&self, _tokens: &mut TokenStream) {}
/// # }
/// #
/// # let ty = Type;
/// # let call_site = Span::call_site();
/// #
/// let ty_span = ty.span();
/// let assert_sync = quote_spanned! {ty_span=>
/// struct _AssertSync where #ty: Sync;
/// };
/// ```
///
/// If the assertion fails, the user will see an error like the following. The
/// input span of their type is hightlighted in the error.
///
/// ```text
/// error[E0277]: the trait bound `*const (): std::marker::Sync` is not satisfied
/// --> src/main.rs:10:21
/// |
/// 10 | static ref PTR: *const () = &();
/// | ^^^^^^^^^ `*const ()` cannot be shared between threads safely
/// ```
///
/// In this example it is important for the where-clause to be spanned with the
/// line/column information of the user's input type so that error messages are
/// placed appropriately by the compiler. But it is also incredibly important
/// that `Sync` resolves at the macro definition site and not the macro call
/// site. If we resolve `Sync` at the same span that the user's type is going to
/// be resolved, then they could bypass our check by defining their own trait
/// named `Sync` that is implemented for their type.
#[proc_macro_hack]
pub use proc_quote_impl::quote_spanned;
// Not public API.
#[doc(hidden)]
pub mod __rt {
use super::*;
pub use proc_macro2::*;
pub fn append_ident(stream: &mut TokenStream, ident: &str, span: Span) {
// TODO(blocked on rust-lang/rust#54723)
// https://github.com/rust-lang/rust/issues/54723
// Use `new_raw` once it's stabilized
// stream.append(Ident::new_raw(ident, span));
match syn::parse_str::<Ident>(ident) {
Ok(mut ident) => {
ident.set_span(span);
stream.append(ident);
}
Err(_) => stream.append(Ident::new(ident, span)),
}
}
pub fn append_punct(stream: &mut TokenStream, punct: char, spacing: Spacing, span: Span) {
let mut punct = Punct::new(punct, spacing);
punct.set_span(span);
stream.append(punct);
}
pub fn append_stringified_tokens(stream: &mut TokenStream, s: &str, span: Span) {
| b fn append_to_tokens<T: ToTokens>(stream: &mut TokenStream, to_tokens: &T) {
to_tokens.to_tokens(stream);
}
pub fn append_group(
stream: &mut TokenStream,
inner: TokenStream,
delimiter: Delimiter,
span: Span,
) {
let mut group = Group::new(delimiter, inner);
group.set_span(span);
stream.append(group);
}
}
| let s: TokenStream = s.parse().expect("invalid token stream");
stream.extend(s.into_iter().map(|mut t| {
t.set_span(span);
t
}));
}
pu | identifier_body |
lib.rs | //! This crate provides the [`quote!`] macro for turning Rust syntax tree data
//! structures into tokens of source code.
//!
//! [`quote!`]: macro.quote.html
//!
//! Procedural macros in Rust receive a stream of tokens as input, execute
//! arbitrary Rust code to determine how to manipulate those tokens, and produce
//! a stream of tokens to hand back to the compiler to compile into the caller's
//! crate. Quasi-quoting is a solution to one piece of that -- producing tokens
//! to return to the compiler.
//!
//! The idea of quasi-quoting is that we write *code* that we treat as *data*.
//! Within the `quote!` macro, we can write what looks like code to our text
//! editor or IDE. We get all the benefits of the editor's brace matching,
//! syntax highlighting, indentation, and maybe autocompletion. But rather than
//! compiling that as code into the current crate, we can treat it as data, pass
//! it around, mutate it, and eventually hand it back to the compiler as tokens
//! to compile into the macro caller's crate.
//!
//! This crate is motivated by the procedural macro use case, but it is a
//! general-purpose Rust quasi-quoting library and is not specific to procedural
//! macros.
//!
//! # Example
//!
//! The following quasi-quoted block of code is something you might find in [a]
//! procedural macro having to do with data structure serialization. The `#var`
//! syntax performs interpolation of runtime variables into the quoted tokens.
//! Check out the documentation of the [`quote!`] macro for more detail about
//! the syntax. See also the [`quote_spanned!`] macro which is important for
//! implementing hygienic procedural macros.
//!
//! [a]: https://serde.rs/
//! [`quote_spanned!`]: macro.quote_spanned.html
//!
//! ```edition2018
//! # use quote::quote;
//! #
//! # let generics = "";
//! # let where_clause = "";
//! # let field_ty = "";
//! # let item_ty = "";
//! # let path = "";
//! # let value = "";
//! #
//! let tokens = quote! {
//! struct SerializeWith #generics #where_clause {
//! value: &'a #field_ty,
//! phantom: core::marker::PhantomData<#item_ty>,
//! }
//!
//! impl #generics serde::Serialize for SerializeWith #generics #where_clause {
//! fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
//! where
//! S: serde::Serializer,
//! {
//! #path(self.value, serializer)
//! }
//! }
//!
//! SerializeWith {
//! value: #value,
//! phantom: core::marker::PhantomData::<#item_ty>,
//! }
//! };
//! ```
use proc_macro_hack::proc_macro_hack;
mod repeat;
pub use self::repeat::*;
pub use quote::ToTokens;
pub use quote::TokenStreamExt;
/// The whole point.
///
/// Performs variable interpolation against the input and produces it as
/// [`TokenStream`]. For returning tokens to the compiler in a procedural macro, use
/// `into()` to build a `TokenStream`.
///
/// [`TokenStream`]: https://docs.rs/proc-macro2/0/proc_macro2/struct.TokenStream.html
///
/// # Interpolation
///
/// Variable interpolation is done with `#var` (similar to `$var` in
/// `macro_rules!` macros). This grabs the `var` variable that is currently in
/// scope and inserts it in that location in the output tokens. Any type
/// implementing the [`ToTokens`] trait can be interpolated. This includes most
/// Rust primitive types as well as most of the syntax tree types from the [Syn]
/// crate.
///
/// [`ToTokens`]: trait.ToTokens.html
/// [Syn]: https://github.com/dtolnay/syn
///
/// Repetition is done using `#(...)*` or `#(...),*` again similar to
/// `macro_rules!`. This iterates through the elements of any variable
/// interpolated within the repetition and inserts a copy of the repetition
/// body for each one.
///
/// - `#(#var)*` — no separators
/// - `#(#var),*` — the character before the asterisk is used as a separator
/// - `#( struct #var; )*` — the repetition can contain other tokens
/// - `#( #k => println!("{}", #v), )*` — even multiple interpolations
/// - `#(let #var = self.#var;)*` - the same variable can be used more than once
///
/// The [`proc_quote::Repeat`](https://docs.rs/proc-quote/0/proc_quote/trait.Repeat.html)
/// trait defines which types are allowed to be interpolated inside a repition pattern.
///
/// Which types that implement the following traits *do* `Repeat`:
/// - [`Iterator<T>`] consumes the iterator, iterating through every element.
/// - <a href="https://doc.rust-lang.org/std/borrow/trait.Borrow.html">`Borrow<[T]>`</a>
/// (includes [`Vec`], [`array`], and [`slice`]) iterates with the [`slice::iter`] method,
/// thus not consuming the original data.
/// - [`ToTokens`], interpolates the variable in every iteration.
///
/// Which types *do NOT* `Repeat`:
/// - [`IntoIterator`], to avoid ambiguity (Ex. "Which behavior would have been used for [`Vec`],
/// which implements both [`IntoIterator`] and <a href="https://doc.rust-lang.org/std/borrow/trait.Borrow.html">
/// `Borrow<[T]>`</a>?"; "Which behavior would have been used for [`TokenStream`], which implements both
/// [`IntoIterator`] and [`ToTokens`]?"). To use the iterator, you may call [`IntoIterator::into_iter`]
/// explicitly.
/// - Ambiguous types that implement at least two of the `Repeat` traits. In the very unlikely case
/// this happens, disambiguate the type by wrapping it under some structure that only implements the
/// trait you desire to use.
///
/// [`Iterator<T>`]: https://doc.rust-lang.org/std/iter/trait.Iterator.html
/// [`Vec`]: https://doc.rust-lang.org/std/vec/struct.Vec.html
/// [`array`]: https://doc.rust-lang.org/std/primitive.array.html
/// [`slice`]: https://doc.rust-lang.org/std/slice/index.html
/// [`slice::iter`]: https://doc.rust-lang.org/std/primitive.slice.html#method.iter
/// [`ToTokens`]: https://docs.rs/proc-quote/0/proc_quote/trait.ToTokens.html
/// [`IntoIterator`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html
/// [`IntoIterator::into_iter`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html#tymethod.into_iter
///
/// # Hygiene
///
/// Any interpolated tokens preserve the `Span` information provided by their
/// `ToTokens` implementation. Tokens that originate within the `quote!`
/// invocation are spanned with [`Span::call_site()`].
///
/// [`Span::call_site()`]: https://docs.rs/proc-macro2/0/proc_macro2/struct.Span.html#method.call_site
///
/// A different span can be provided through the [`quote_spanned!`] macro.
///
/// [`quote_spanned!`]: macro.quote_spanned.html
///
/// # Return type
///
/// The macro evaluates to an expression of type `proc_macro2::TokenStream`.
/// Meanwhile Rust procedural macros are expected to return the type
/// `proc_macro::TokenStream`.
///
/// The difference between the two types is that `proc_macro` types are entirely
/// specific to procedural macros and cannot ever exist in code outside of a
/// procedural macro, while `proc_macro2` types may exist anywhere including
/// tests and non-macro code like main.rs and build.rs. This is why even the
/// procedural macro ecosystem is largely built around `proc_macro2`, because
/// that ensures the libraries are unit testable and accessible in non-macro
/// contexts.
///
/// There is a [`From`]-conversion in both directions so returning the output of
/// `quote!` from a procedural macro usually looks like `tokens.into()` or
/// `proc_macro::TokenStream::from(tokens)`.
///
/// [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html
///
/// # Examples
///
/// ## Procedural macro
///
/// The structure of a basic procedural macro is as follows. Refer to the [Syn]
/// crate for further useful guidance on using `quote!` as part of a procedural
/// macro.
///
/// [Syn]: https://github.com/dtolnay/syn
///
/// ```edition2018
/// # #[cfg(any())]
/// extern crate proc_macro;
/// # use proc_macro2 as proc_macro;
///
/// use proc_macro::TokenStream;
/// use quote::quote;
///
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// #[proc_macro_derive(HeapSize)]
/// # };
/// pub fn derive_heap_size(input: TokenStream) -> TokenStream {
/// // Parse the input and figure out what implementation to generate...
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// let name = /*... */;
/// let expr = /*... */;
/// # };
/// #
/// # let name = 0;
/// # let expr = 0;
///
/// let expanded = quote! {
/// // The generated impl.
/// impl heapsize::HeapSize for #name {
/// fn heap_size_of_children(&self) -> usize {
/// #expr
/// }
/// }
/// };
///
/// // Hand the output tokens back to the compiler.
/// TokenStream::from(expanded)
/// }
/// ```
///
/// ## Combining quoted fragments
///
/// Usually you don't end up constructing an entire final `TokenStream` in one
/// piece. Different parts may come from different helper functions. The tokens
/// produced by `quote!` themselves implement `ToTokens` and so can be
/// interpolated into later `quote!` invocations to build up a final result.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// let type_definition = quote! {...};
/// let methods = quote! {...};
///
/// let tokens = quote! {
/// #type_definition
/// #methods
/// };
/// ```
///
/// ## Constructing identifiers
///
/// Suppose we have an identifier `ident` which came from somewhere in a macro
/// input and we need to modify it in some way for the macro output. Let's
/// consider prepending the identifier with an underscore.
///
/// Simply interpolating the identifier next to an underscore will not have the
/// behavior of concatenating them. The underscore and the identifier will
/// continue to be two separate tokens as if you had written `_ x`.
///
/// ```edition2018
/// # use proc_macro2::{self as syn, Span};
/// # use quote::quote;
/// #
/// # let ident = syn::Ident::new("i", Span::call_site());
/// #
/// // incorrect
/// quote! {
/// let mut _#ident = 0;
/// }
/// # ;
/// ```
///
/// The solution is to perform token-level manipulations using the APIs provided
/// by Syn and proc-macro2.
///
/// ```edition2018
/// # use proc_macro2::{self as syn, Span};
/// # use quote::quote;
/// #
/// # let ident = syn::Ident::new("i", Span::call_site());
/// #
/// let concatenated = format!("_{}", ident);
/// let varname = syn::Ident::new(&concatenated, ident.span());
/// quote! {
/// let mut #varname = 0;
/// }
/// # ;
/// ```
///
/// ## Making method calls
///
/// Let's say our macro requires some type specified in the macro input to have
/// a constructor called `new`. We have the type in a variable called
/// `field_type` of type `syn::Type` and want to invoke the constructor.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// // incorrect
/// quote! {
/// let value = #field_type::new();
/// }
/// # ;
/// ```
///
/// This works only sometimes. If `field_type` is `String`, the expanded code
/// contains `String::new()` which is fine. But if `field_type` is something
/// like `Vec<i32>` then the expanded code is `Vec<i32>::new()` which is invalid
/// syntax. Ordinarily in handwritten Rust we would write `Vec::<i32>::new()`
/// but for macros often the following is more convenient.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// quote! {
/// let value = <#field_type>::new();
/// }
/// # ;
/// ```
///
/// This expands to `<Vec<i32>>::new()` which behaves correctly.
///
/// A similar pattern is appropriate for trait methods.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// quote! {
/// let value = <#field_type as core::default::Default>::default();
/// }
/// # ;
/// ```
#[proc_macro_hack]
pub use proc_quote_impl::quote;
/// Same as `quote!`, but applies a given span to all tokens originating within
/// the macro invocation.
///
/// # Syntax
///
/// A span expression of type [`Span`], followed by `=>`, followed by the tokens
/// to quote. The span expression should be brief -- use a variable for anything
/// more than a few characters. There should be no space before the `=>` token.
///
/// [`Span`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html
///
/// ```edition2018
/// # use proc_macro2::Span;
/// # use quote::quote_spanned;
/// #
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// let span = /*... */;
/// # };
/// # let span = Span::call_site();
/// # let init = 0;
///
/// // On one line, use parentheses.
/// let tokens = quote_spanned!(span=> Box::into_raw(Box::new(#init)));
///
/// // On multiple lines, place the span at the top and use braces.
/// let tokens = quote_spanned! {span=>
/// Box::into_raw(Box::new(#init))
/// };
/// ```
///
/// The lack of space before the `=>` should look jarring to Rust programmers
/// and this is intentional. The formatting is designed to be visibly
/// off-balance and draw the eye a particular way, due to the span expression
/// being evaluated in the context of the procedural macro and the remaining
/// tokens being evaluated in the generated code.
///
/// # Hygiene
///
/// Any interpolated tokens preserve the `Span` information provided by their
/// `ToTokens` implementation. Tokens that originate within the `quote_spanned!`
/// invocation are spanned with the given span argument.
///
/// # Example
///
/// The following procedural macro code uses `quote_spanned!` to assert that a
/// particular Rust type implements the [`Sync`] trait so that references can be
/// safely shared between threads.
///
/// [`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html
///
/// ```edition2018
/// # use quote::{quote_spanned, TokenStreamExt, ToTokens};
/// # use proc_macro2::{Span, TokenStream};
/// #
/// # struct Type;
/// #
/// # impl Type {
/// # fn span(&self) -> Span {
/// # Span::call_site()
/// # }
/// # }
/// #
/// # impl ToTokens for Type {
/// # fn to_tokens(&self, _tokens: &mut TokenStream) {}
/// # }
/// #
/// # let ty = Type;
/// # let call_site = Span::call_site();
/// #
/// let ty_span = ty.span();
/// let assert_sync = quote_spanned! {ty_span=>
/// struct _AssertSync where #ty: Sync;
/// };
/// ```
///
/// If the assertion fails, the user will see an error like the following. The
/// input span of their type is hightlighted in the error.
///
/// ```text
/// error[E0277]: the trait bound `*const (): std::marker::Sync` is not satisfied
/// --> src/main.rs:10:21
/// |
/// 10 | static ref PTR: *const () = &();
/// | ^^^^^^^^^ `*const ()` cannot be shared between threads safely
/// ```
///
/// In this example it is important for the where-clause to be spanned with the
/// line/column information of the user's input type so that error messages are
/// placed appropriately by the compiler. But it is also incredibly important
/// that `Sync` resolves at the macro definition site and not the macro call
/// site. If we resolve `Sync` at the same span that the user's type is going to
/// be resolved, then they could bypass our check by defining their own trait
/// named `Sync` that is implemented for their type.
#[proc_macro_hack]
pub use proc_quote_impl::quote_spanned;
// Not public API.
#[doc(hidden)]
pub mod __rt {
use super::*;
pub use proc_macro2::*;
pub fn append_ident(stream: &mut TokenStream, ident: &str, span: Span) {
// TODO(blocked on rust-lang/rust#54723)
// https://github.com/rust-lang/rust/issues/54723
// Use `new_raw` once it's stabilized
// stream.append(Ident::new_raw(ident, span));
match syn::parse_str::<Ident>(ident) {
Ok(mut ident) => {
ident.set_span(span);
stream.append(ident);
}
Err(_) => stream.append(Ident::new(ident, span)),
}
}
pub fn append_p | &mut TokenStream, punct: char, spacing: Spacing, span: Span) {
let mut punct = Punct::new(punct, spacing);
punct.set_span(span);
stream.append(punct);
}
pub fn append_stringified_tokens(stream: &mut TokenStream, s: &str, span: Span) {
let s: TokenStream = s.parse().expect("invalid token stream");
stream.extend(s.into_iter().map(|mut t| {
t.set_span(span);
t
}));
}
pub fn append_to_tokens<T: ToTokens>(stream: &mut TokenStream, to_tokens: &T) {
to_tokens.to_tokens(stream);
}
pub fn append_group(
stream: &mut TokenStream,
inner: TokenStream,
delimiter: Delimiter,
span: Span,
) {
let mut group = Group::new(delimiter, inner);
group.set_span(span);
stream.append(group);
}
}
| unct(stream: | identifier_name |
lib.rs | //! This crate provides the [`quote!`] macro for turning Rust syntax tree data
//! structures into tokens of source code.
//!
//! [`quote!`]: macro.quote.html
//!
//! Procedural macros in Rust receive a stream of tokens as input, execute
//! arbitrary Rust code to determine how to manipulate those tokens, and produce
//! a stream of tokens to hand back to the compiler to compile into the caller's
//! crate. Quasi-quoting is a solution to one piece of that -- producing tokens
//! to return to the compiler.
//!
//! The idea of quasi-quoting is that we write *code* that we treat as *data*.
//! Within the `quote!` macro, we can write what looks like code to our text
//! editor or IDE. We get all the benefits of the editor's brace matching,
//! syntax highlighting, indentation, and maybe autocompletion. But rather than
//! compiling that as code into the current crate, we can treat it as data, pass
//! it around, mutate it, and eventually hand it back to the compiler as tokens
//! to compile into the macro caller's crate.
//!
//! This crate is motivated by the procedural macro use case, but it is a
//! general-purpose Rust quasi-quoting library and is not specific to procedural
//! macros.
//!
//! # Example
//!
//! The following quasi-quoted block of code is something you might find in [a]
//! procedural macro having to do with data structure serialization. The `#var`
//! syntax performs interpolation of runtime variables into the quoted tokens.
//! Check out the documentation of the [`quote!`] macro for more detail about
//! the syntax. See also the [`quote_spanned!`] macro which is important for
//! implementing hygienic procedural macros.
//!
//! [a]: https://serde.rs/
//! [`quote_spanned!`]: macro.quote_spanned.html
//!
//! ```edition2018
//! # use quote::quote;
//! #
//! # let generics = "";
//! # let where_clause = "";
//! # let field_ty = "";
//! # let item_ty = "";
//! # let path = "";
//! # let value = "";
//! #
//! let tokens = quote! {
//! struct SerializeWith #generics #where_clause {
//! value: &'a #field_ty,
//! phantom: core::marker::PhantomData<#item_ty>,
//! }
//!
//! impl #generics serde::Serialize for SerializeWith #generics #where_clause {
//! fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
//! where
//! S: serde::Serializer,
//! {
//! #path(self.value, serializer)
//! }
//! }
//!
//! SerializeWith {
//! value: #value,
//! phantom: core::marker::PhantomData::<#item_ty>,
//! }
//! };
//! ```
use proc_macro_hack::proc_macro_hack;
mod repeat;
pub use self::repeat::*;
pub use quote::ToTokens;
pub use quote::TokenStreamExt;
/// The whole point.
///
/// Performs variable interpolation against the input and produces it as
/// [`TokenStream`]. For returning tokens to the compiler in a procedural macro, use
/// `into()` to build a `TokenStream`.
///
/// [`TokenStream`]: https://docs.rs/proc-macro2/0/proc_macro2/struct.TokenStream.html
///
/// # Interpolation
///
/// Variable interpolation is done with `#var` (similar to `$var` in
/// `macro_rules!` macros). This grabs the `var` variable that is currently in
/// scope and inserts it in that location in the output tokens. Any type
/// implementing the [`ToTokens`] trait can be interpolated. This includes most
/// Rust primitive types as well as most of the syntax tree types from the [Syn]
/// crate.
///
/// [`ToTokens`]: trait.ToTokens.html
/// [Syn]: https://github.com/dtolnay/syn
///
/// Repetition is done using `#(...)*` or `#(...),*` again similar to
/// `macro_rules!`. This iterates through the elements of any variable
/// interpolated within the repetition and inserts a copy of the repetition
/// body for each one.
///
/// - `#(#var)*` — no separators
/// - `#(#var),*` — the character before the asterisk is used as a separator
/// - `#( struct #var; )*` — the repetition can contain other tokens
/// - `#( #k => println!("{}", #v), )*` — even multiple interpolations
/// - `#(let #var = self.#var;)*` - the same variable can be used more than once
///
/// The [`proc_quote::Repeat`](https://docs.rs/proc-quote/0/proc_quote/trait.Repeat.html)
/// trait defines which types are allowed to be interpolated inside a repition pattern.
///
/// Which types that implement the following traits *do* `Repeat`:
/// - [`Iterator<T>`] consumes the iterator, iterating through every element.
/// - <a href="https://doc.rust-lang.org/std/borrow/trait.Borrow.html">`Borrow<[T]>`</a>
/// (includes [`Vec`], [`array`], and [`slice`]) iterates with the [`slice::iter`] method,
/// thus not consuming the original data.
/// - [`ToTokens`], interpolates the variable in every iteration.
///
/// Which types *do NOT* `Repeat`:
/// - [`IntoIterator`], to avoid ambiguity (Ex. "Which behavior would have been used for [`Vec`],
/// which implements both [`IntoIterator`] and <a href="https://doc.rust-lang.org/std/borrow/trait.Borrow.html">
/// `Borrow<[T]>`</a>?"; "Which behavior would have been used for [`TokenStream`], which implements both
/// [`IntoIterator`] and [`ToTokens`]?"). To use the iterator, you may call [`IntoIterator::into_iter`]
/// explicitly.
/// - Ambiguous types that implement at least two of the `Repeat` traits. In the very unlikely case
/// this happens, disambiguate the type by wrapping it under some structure that only implements the
/// trait you desire to use.
///
/// [`Iterator<T>`]: https://doc.rust-lang.org/std/iter/trait.Iterator.html
/// [`Vec`]: https://doc.rust-lang.org/std/vec/struct.Vec.html
/// [`array`]: https://doc.rust-lang.org/std/primitive.array.html
/// [`slice`]: https://doc.rust-lang.org/std/slice/index.html
/// [`slice::iter`]: https://doc.rust-lang.org/std/primitive.slice.html#method.iter
/// [`ToTokens`]: https://docs.rs/proc-quote/0/proc_quote/trait.ToTokens.html
/// [`IntoIterator`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html
/// [`IntoIterator::into_iter`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html#tymethod.into_iter
///
/// # Hygiene
///
/// Any interpolated tokens preserve the `Span` information provided by their
/// `ToTokens` implementation. Tokens that originate within the `quote!`
/// invocation are spanned with [`Span::call_site()`].
///
/// [`Span::call_site()`]: https://docs.rs/proc-macro2/0/proc_macro2/struct.Span.html#method.call_site
///
/// A different span can be provided through the [`quote_spanned!`] macro.
///
/// [`quote_spanned!`]: macro.quote_spanned.html
///
/// # Return type
///
/// The macro evaluates to an expression of type `proc_macro2::TokenStream`.
/// Meanwhile Rust procedural macros are expected to return the type
/// `proc_macro::TokenStream`.
///
/// The difference between the two types is that `proc_macro` types are entirely
/// specific to procedural macros and cannot ever exist in code outside of a
/// procedural macro, while `proc_macro2` types may exist anywhere including
/// tests and non-macro code like main.rs and build.rs. This is why even the
/// procedural macro ecosystem is largely built around `proc_macro2`, because
/// that ensures the libraries are unit testable and accessible in non-macro
/// contexts.
///
/// There is a [`From`]-conversion in both directions so returning the output of
/// `quote!` from a procedural macro usually looks like `tokens.into()` or
/// `proc_macro::TokenStream::from(tokens)`.
///
/// [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html
///
/// # Examples
///
/// ## Procedural macro
///
/// The structure of a basic procedural macro is as follows. Refer to the [Syn]
/// crate for further useful guidance on using `quote!` as part of a procedural
/// macro.
///
/// [Syn]: https://github.com/dtolnay/syn
///
/// ```edition2018
/// # #[cfg(any())]
/// extern crate proc_macro;
/// # use proc_macro2 as proc_macro;
///
/// use proc_macro::TokenStream;
/// use quote::quote;
///
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// #[proc_macro_derive(HeapSize)]
/// # };
/// pub fn derive_heap_size(input: TokenStream) -> TokenStream {
/// // Parse the input and figure out what implementation to generate...
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// let name = /*... */;
/// let expr = /*... */;
/// # };
/// #
/// # let name = 0;
/// # let expr = 0;
///
/// let expanded = quote! {
/// // The generated impl.
/// impl heapsize::HeapSize for #name {
/// fn heap_size_of_children(&self) -> usize {
/// #expr
/// }
/// }
/// };
///
/// // Hand the output tokens back to the compiler.
/// TokenStream::from(expanded)
/// }
/// ```
///
/// ## Combining quoted fragments
///
/// Usually you don't end up constructing an entire final `TokenStream` in one
/// piece. Different parts may come from different helper functions. The tokens
/// produced by `quote!` themselves implement `ToTokens` and so can be
/// interpolated into later `quote!` invocations to build up a final result.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// let type_definition = quote! {...};
/// let methods = quote! {...};
///
/// let tokens = quote! {
/// #type_definition
/// #methods
/// };
/// ```
///
/// ## Constructing identifiers
///
/// Suppose we have an identifier `ident` which came from somewhere in a macro
/// input and we need to modify it in some way for the macro output. Let's
/// consider prepending the identifier with an underscore.
///
/// Simply interpolating the identifier next to an underscore will not have the
/// behavior of concatenating them. The underscore and the identifier will
/// continue to be two separate tokens as if you had written `_ x`.
///
/// ```edition2018
/// # use proc_macro2::{self as syn, Span};
/// # use quote::quote;
/// #
/// # let ident = syn::Ident::new("i", Span::call_site());
/// #
/// // incorrect | /// # ;
/// ```
///
/// The solution is to perform token-level manipulations using the APIs provided
/// by Syn and proc-macro2.
///
/// ```edition2018
/// # use proc_macro2::{self as syn, Span};
/// # use quote::quote;
/// #
/// # let ident = syn::Ident::new("i", Span::call_site());
/// #
/// let concatenated = format!("_{}", ident);
/// let varname = syn::Ident::new(&concatenated, ident.span());
/// quote! {
/// let mut #varname = 0;
/// }
/// # ;
/// ```
///
/// ## Making method calls
///
/// Let's say our macro requires some type specified in the macro input to have
/// a constructor called `new`. We have the type in a variable called
/// `field_type` of type `syn::Type` and want to invoke the constructor.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// // incorrect
/// quote! {
/// let value = #field_type::new();
/// }
/// # ;
/// ```
///
/// This works only sometimes. If `field_type` is `String`, the expanded code
/// contains `String::new()` which is fine. But if `field_type` is something
/// like `Vec<i32>` then the expanded code is `Vec<i32>::new()` which is invalid
/// syntax. Ordinarily in handwritten Rust we would write `Vec::<i32>::new()`
/// but for macros often the following is more convenient.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// quote! {
/// let value = <#field_type>::new();
/// }
/// # ;
/// ```
///
/// This expands to `<Vec<i32>>::new()` which behaves correctly.
///
/// A similar pattern is appropriate for trait methods.
///
/// ```edition2018
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// quote! {
/// let value = <#field_type as core::default::Default>::default();
/// }
/// # ;
/// ```
#[proc_macro_hack]
pub use proc_quote_impl::quote;
/// Same as `quote!`, but applies a given span to all tokens originating within
/// the macro invocation.
///
/// # Syntax
///
/// A span expression of type [`Span`], followed by `=>`, followed by the tokens
/// to quote. The span expression should be brief -- use a variable for anything
/// more than a few characters. There should be no space before the `=>` token.
///
/// [`Span`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html
///
/// ```edition2018
/// # use proc_macro2::Span;
/// # use quote::quote_spanned;
/// #
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// let span = /*... */;
/// # };
/// # let span = Span::call_site();
/// # let init = 0;
///
/// // On one line, use parentheses.
/// let tokens = quote_spanned!(span=> Box::into_raw(Box::new(#init)));
///
/// // On multiple lines, place the span at the top and use braces.
/// let tokens = quote_spanned! {span=>
/// Box::into_raw(Box::new(#init))
/// };
/// ```
///
/// The lack of space before the `=>` should look jarring to Rust programmers
/// and this is intentional. The formatting is designed to be visibly
/// off-balance and draw the eye a particular way, due to the span expression
/// being evaluated in the context of the procedural macro and the remaining
/// tokens being evaluated in the generated code.
///
/// # Hygiene
///
/// Any interpolated tokens preserve the `Span` information provided by their
/// `ToTokens` implementation. Tokens that originate within the `quote_spanned!`
/// invocation are spanned with the given span argument.
///
/// # Example
///
/// The following procedural macro code uses `quote_spanned!` to assert that a
/// particular Rust type implements the [`Sync`] trait so that references can be
/// safely shared between threads.
///
/// [`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html
///
/// ```edition2018
/// # use quote::{quote_spanned, TokenStreamExt, ToTokens};
/// # use proc_macro2::{Span, TokenStream};
/// #
/// # struct Type;
/// #
/// # impl Type {
/// # fn span(&self) -> Span {
/// # Span::call_site()
/// # }
/// # }
/// #
/// # impl ToTokens for Type {
/// # fn to_tokens(&self, _tokens: &mut TokenStream) {}
/// # }
/// #
/// # let ty = Type;
/// # let call_site = Span::call_site();
/// #
/// let ty_span = ty.span();
/// let assert_sync = quote_spanned! {ty_span=>
/// struct _AssertSync where #ty: Sync;
/// };
/// ```
///
/// If the assertion fails, the user will see an error like the following. The
/// input span of their type is hightlighted in the error.
///
/// ```text
/// error[E0277]: the trait bound `*const (): std::marker::Sync` is not satisfied
/// --> src/main.rs:10:21
/// |
/// 10 | static ref PTR: *const () = &();
/// | ^^^^^^^^^ `*const ()` cannot be shared between threads safely
/// ```
///
/// In this example it is important for the where-clause to be spanned with the
/// line/column information of the user's input type so that error messages are
/// placed appropriately by the compiler. But it is also incredibly important
/// that `Sync` resolves at the macro definition site and not the macro call
/// site. If we resolve `Sync` at the same span that the user's type is going to
/// be resolved, then they could bypass our check by defining their own trait
/// named `Sync` that is implemented for their type.
#[proc_macro_hack]
pub use proc_quote_impl::quote_spanned;
// Not public API.
#[doc(hidden)]
pub mod __rt {
use super::*;
pub use proc_macro2::*;
pub fn append_ident(stream: &mut TokenStream, ident: &str, span: Span) {
// TODO(blocked on rust-lang/rust#54723)
// https://github.com/rust-lang/rust/issues/54723
// Use `new_raw` once it's stabilized
// stream.append(Ident::new_raw(ident, span));
match syn::parse_str::<Ident>(ident) {
Ok(mut ident) => {
ident.set_span(span);
stream.append(ident);
}
Err(_) => stream.append(Ident::new(ident, span)),
}
}
pub fn append_punct(stream: &mut TokenStream, punct: char, spacing: Spacing, span: Span) {
let mut punct = Punct::new(punct, spacing);
punct.set_span(span);
stream.append(punct);
}
pub fn append_stringified_tokens(stream: &mut TokenStream, s: &str, span: Span) {
let s: TokenStream = s.parse().expect("invalid token stream");
stream.extend(s.into_iter().map(|mut t| {
t.set_span(span);
t
}));
}
pub fn append_to_tokens<T: ToTokens>(stream: &mut TokenStream, to_tokens: &T) {
to_tokens.to_tokens(stream);
}
pub fn append_group(
stream: &mut TokenStream,
inner: TokenStream,
delimiter: Delimiter,
span: Span,
) {
let mut group = Group::new(delimiter, inner);
group.set_span(span);
stream.append(group);
}
} | /// quote! {
/// let mut _#ident = 0;
/// } | random_line_split |
tpl.rs | //! TeX templating
//!
//! The `tpl` module contains a way of constructing a TeX-document programmatically. It ensures
//! documents are well-formed syntactically, but not semantically (e.g. it is possible to express
//! documents that contain multiple `\documentclass` macro calls inside the document but not a
//! `\begin{foo}` without a matching `\end`).
//!
//! As a result of this deliberate limitation, the API is fairly simple. The core module offers the
//! entire abstraction through the `TexElement` trait, while the `elements` module contains
//! syntactic sugar for building documents quickly.
//!
//! ## "Hello, world" using `TexElement` directly.
//!
//! ```rust
//! use texrender::tpl::{Args, BeginEndBlock, Group, IntoTexElement, MacroCall, OptArgs, RawTex,
//! TexElement, Text};
//!
//! let doctype = MacroCall::new("documentclass",
//! OptArgs::single("12pt"),
//! Args::single("article"));
//! let mut contents: Vec<Box<dyn TexElement>> = Vec::new();
//! contents.push(Box::new(MacroCall::new("section",
//! Default::default(),
//! Args::single("Hello, world"))));
//! contents.push("This is fun & easy.".into_tex_element());
//! let document = BeginEndBlock::new("document", Default::default(), Default::default(), contents);
//! let tex = Group::new(vec![Box::new(doctype) as Box<dyn TexElement>, Box::new(document)]);
//! let output = tex.render().expect("rendering failed");
//! assert_eq!(output,
//! "\\documentclass[12pt]{article}\n\
//! \\begin{document}\n\
//! \\section{Hello, world}\n\
//! This is fun \\& easy.\n\
//! \\end{document}\n");
//! ```
//!
//! While this form uses no macros, it is rather inconvenient to write. Luckily there is an
//! alternative:
//!
//! ## "Hello, world" using elements and macros.
//!
//! ```rust
//! use texrender::elems;
//! use texrender::tpl::TexElement;
//! use texrender::tpl::elements::{N, doc, document, documentclass, section};
//!
//! let tex = doc(elems!(
//! documentclass(elems!(), "article"),
//! document(elems!(
//! section("Hello, world"),
//! "This is fun & easy."
//! ))
//! ));
//!
//! let output = tex.render().expect("rendering failed");
//!
//! assert_eq!(output,
//! "\\documentclass{article}\n\
//! \\begin{document}\n\
//! \\section{Hello, world}\n\
//! This is fun \\& easy.\n\
//! \\end{document}\n");
//! ```
//!
//! Element functions like `section` above typically cover most use cases, while not preventing the
//! u ser to drop back to the raw functions above. The `elems` macro conveniently boxes and
//! type-erases children, while `N` can be used for "no arguments" for both args and optargs.
#[macro_use]
pub mod macros;
pub mod elements;
use std::fmt::Debug;
use std::io::Write;
use std::{io, string};
/// Renderable Tex element.
pub trait TexElement: Debug {
/// Type-erases a `TexElement`.
fn boxed(self) -> Box<dyn TexElement>
where
Self: Sized +'static,
{
Box::new(self) as Box<dyn TexElement>
}
/// Renders the element into a string.
///
/// May return an error if a non-utf8 element has been given.
fn render(&self) -> Result<String, string::FromUtf8Error> {
let mut buffer: Vec<u8> = Vec::new();
self.write_tex(&mut buffer)
.expect("should always be able to write to in-memory buffer");
String::from_utf8(buffer)
}
/// Writes a rendering of the element to the given writer.
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()>;
}
/// Conversion trait for various types.
///
/// Used for primitive conversions of various types directly into tex elements. Implementations
/// include:
///
/// * `Box<dyn TexElement>` are passed through unchanged.
/// * Any other `TexElement` will be boxed.
/// * `str` and `String` are converted to escaped `Text` elements.
/// * Any number (`u8`,...) is converted to escaped `Text` using display.
/// * A `Vec<Box<dyn TexElement>>` is converted into a `Group`.
/// * The unit type `()` is converted into an empty element.
pub trait IntoTexElement {
/// Converts the given element into a `TexElement`.
fn into_tex_element(self) -> Box<dyn TexElement>;
}
impl IntoTexElement for Box<dyn TexElement> {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
self
}
}
impl<'a> IntoTexElement for &'a str {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
self.to_owned().into_tex_element()
}
}
impl IntoTexElement for String {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Text::new(self))
}
}
impl IntoTexElement for () {
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(RawTex(Vec::new()))
}
}
impl<T: TexElement + Sized +'static> IntoTexElement for T {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(self)
}
}
impl IntoTexElement for Vec<Box<dyn TexElement>> {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Group::new(self))
}
}
macro_rules! using_display {
($ty:ty) => {
impl IntoTexElement for $ty {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Text::new(format!("{}", self)))
}
}
};
}
using_display!(u8);
using_display!(u16);
using_display!(u32);
using_display!(u64);
using_display!(u128);
using_display!(i8);
using_display!(i16);
using_display!(i32);
using_display!(i64);
using_display!(i128);
using_display!(f32);
using_display!(f64);
/// Writes a list of tex elements to a stream with a separator.
pub fn write_list<'a, I>(writer: &mut dyn Write, separator: &str, iter: I) -> io::Result<()>
where
I: Iterator<Item = &'a Box<dyn TexElement>> + 'a,
{
for (idx, arg) in iter.enumerate() {
if idx!= 0 {
writer.write_all(separator.as_bytes())?;
}
arg.write_tex(writer)?;
}
Ok(())
}
/// A raw, unescaped piece of tex code.
///
/// Tex is not guaranteed to be UTF-8 encoded, thus `RawTex` internally keeps bytes. The value will
/// be inserted into the document without any escaping. The value is unchecked, thus it is possible
/// to create syntactically incorrect invalid documents using this element.
#[derive(Clone, Debug)]
pub struct RawTex(Vec<u8>);
impl RawTex {
/// Crates a new raw tex element from a string.
#[inline]
pub fn new(raw: Vec<u8>) -> Self {
RawTex(raw)
}
}
impl TexElement for RawTex {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(self.0.as_slice())
}
}
/// A text string.
///
/// Text strings will be escaped before insertion.
#[derive(Clone, Debug)]
pub struct Text(String);
impl Text {
/// Creates a new text string.
#[inline]
pub fn new(raw: String) -> Self {
Text(raw)
}
}
impl TexElement for Text {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
crate::tex_escape::write_escaped(writer, &self.0)
}
}
/// A set of optional arguments.
///
/// Optional arguments in LaTeX are typically denoted using square brackets and comma-separated.
#[derive(Debug, Default)]
pub struct OptArgs(Vec<Box<dyn TexElement>>);
impl OptArgs {
/// Creates a new set of optional arguments.
#[inline]
pub fn new(elements: Vec<Box<dyn TexElement>>) -> Self {
OptArgs(elements)
}
/// Creates a new optinal argument from a single value.
#[inline]
pub fn single<T: IntoTexElement>(elem: T) -> Self |
}
impl TexElement for OptArgs {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
if!self.0.is_empty() {
writer.write_all(b"[")?;
write_list(writer, ",", self.0.iter())?;
writer.write_all(b"]")?;
}
Ok(())
}
}
/// A set of arguments.
///
/// Each argument is enclosed by curly braces when rendered, otherwise arguments are just
/// concatenated.
#[derive(Debug, Default)]
pub struct Args(Vec<Box<dyn TexElement>>);
impl Args {
/// Creates a new set of arguments.
#[inline]
pub fn new(elements: Vec<Box<dyn TexElement>>) -> Self {
Args(elements)
}
/// Creates a new optinal argument from a single value.
#[inline]
pub fn single<T: IntoTexElement>(elem: T) -> Self {
Args(vec![elem.into_tex_element()])
}
}
impl TexElement for Args {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
if!self.0.is_empty() {
writer.write_all(b"{")?;
write_list(writer, "}{", self.0.iter())?;
writer.write_all(b"}")?;
}
Ok(())
}
}
/// A TeX-macro invocation.
///
/// This is the typical `\macroname[opt1]{arg1}{arg2}` call that is common in latex documents.
#[derive(Debug)]
pub struct MacroCall {
/// Name of the instruction.
ident: Box<dyn TexElement>,
/// Optional arguments.
opt_args: OptArgs,
/// Mandatory arguments.
args: Args,
/// Whether or not to append a newline afterwards.
newline: bool,
}
impl MacroCall {
/// Creates a new macro call.
///
/// The resulting call will end with a newline when output.
pub fn new<T: IntoTexElement>(ident: T, opt_args: OptArgs, args: Args) -> Self {
MacroCall {
ident: ident.into_tex_element(),
opt_args,
args,
newline: true,
}
}
/// Creates a new inline macro call.
///
/// Does not end with a newline.
pub fn new_inline<T: IntoTexElement>(ident: T, opt_args: OptArgs, args: Args) -> Self {
MacroCall {
ident: ident.into_tex_element(),
opt_args,
args,
newline: false,
}
}
}
impl TexElement for MacroCall {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(br"\")?;
self.ident.write_tex(writer)?;
self.opt_args.write_tex(writer)?;
self.args.write_tex(writer)?;
if self.newline {
writer.write_all(b"\n")?;
}
Ok(())
}
}
/// A block with a begin and end instruction.
///
/// Begin-end blocks usually start with a `\begin{blockname}` and end with `\end{blockname}`.
#[derive(Debug)]
pub struct BeginEndBlock {
/// The identifier for the block.
ident: Box<dyn TexElement>,
/// Optional arguments.
opt_args: OptArgs,
/// Actual arguments.
args: Args,
/// Child elements of the block.
children: Vec<Box<dyn TexElement>>,
}
impl BeginEndBlock {
/// Creates a new begin/end block.
pub fn new<T: IntoTexElement>(
ident: T,
opt_args: OptArgs,
args: Args,
children: Vec<Box<dyn TexElement>>,
) -> Self {
BeginEndBlock {
ident: ident.into_tex_element(),
opt_args,
args,
children,
}
}
}
impl TexElement for BeginEndBlock {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(b"\\begin{")?;
self.ident.write_tex(writer)?;
writer.write_all(b"}")?;
self.opt_args.write_tex(writer)?;
self.args.write_tex(writer)?;
writer.write_all(b"\n")?;
for child in &self.children {
child.write_tex(writer)?;
}
writer.write_all(b"\n\\end{")?;
self.ident.write_tex(writer)?;
writer.write_all(b"}\n")?;
Ok(())
}
}
/// An anonymous block.
///
/// Anonymous blocks are other elements enclosed in curly braces when output.
#[derive(Debug)]
pub struct AnonymousBlock(Vec<Box<dyn TexElement>>);
impl AnonymousBlock {
/// Creates a new anonymous block.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
AnonymousBlock(elems)
}
}
impl TexElement for AnonymousBlock {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(b"{")?;
for child in &self.0 {
child.write_tex(writer)?;
}
writer.write_all(b"}")?;
Ok(())
}
}
/// Grouping of elements.
///
/// Groups multiple elements together; when output they are written in order, without any characters
/// added.
#[derive(Debug)]
pub struct Group(Vec<Box<dyn TexElement>>);
impl Group {
/// Creates a new group.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
Group(elems)
}
}
impl TexElement for Group {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
for child in &self.0 {
child.write_tex(writer)?;
}
Ok(())
}
}
/// Table row.
///
/// Multiple elements joined by ` & ` when rendered.
#[derive(Debug)]
pub struct TableRow(Vec<Box<dyn TexElement>>);
impl TableRow {
/// Creates a new table row.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
TableRow(elems)
}
}
impl TexElement for TableRow {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
write_list(writer, " & ", self.0.iter())?;
writer.write_all(b"\\\\\n")
}
}
| {
OptArgs(vec![elem.into_tex_element()])
} | identifier_body |
tpl.rs | //! TeX templating
//!
//! The `tpl` module contains a way of constructing a TeX-document programmatically. It ensures
//! documents are well-formed syntactically, but not semantically (e.g. it is possible to express
//! documents that contain multiple `\documentclass` macro calls inside the document but not a
//! `\begin{foo}` without a matching `\end`).
//!
//! As a result of this deliberate limitation, the API is fairly simple. The core module offers the
//! entire abstraction through the `TexElement` trait, while the `elements` module contains
//! syntactic sugar for building documents quickly.
//!
//! ## "Hello, world" using `TexElement` directly.
//!
//! ```rust
//! use texrender::tpl::{Args, BeginEndBlock, Group, IntoTexElement, MacroCall, OptArgs, RawTex,
//! TexElement, Text};
//!
//! let doctype = MacroCall::new("documentclass",
//! OptArgs::single("12pt"),
//! Args::single("article"));
//! let mut contents: Vec<Box<dyn TexElement>> = Vec::new();
//! contents.push(Box::new(MacroCall::new("section",
//! Default::default(),
//! Args::single("Hello, world"))));
//! contents.push("This is fun & easy.".into_tex_element());
//! let document = BeginEndBlock::new("document", Default::default(), Default::default(), contents);
//! let tex = Group::new(vec![Box::new(doctype) as Box<dyn TexElement>, Box::new(document)]);
//! let output = tex.render().expect("rendering failed");
//! assert_eq!(output,
//! "\\documentclass[12pt]{article}\n\
//! \\begin{document}\n\
//! \\section{Hello, world}\n\
//! This is fun \\& easy.\n\
//! \\end{document}\n");
//! ```
//!
//! While this form uses no macros, it is rather inconvenient to write. Luckily there is an
//! alternative:
//!
//! ## "Hello, world" using elements and macros.
//!
//! ```rust
//! use texrender::elems;
//! use texrender::tpl::TexElement;
//! use texrender::tpl::elements::{N, doc, document, documentclass, section};
//!
//! let tex = doc(elems!(
//! documentclass(elems!(), "article"),
//! document(elems!(
//! section("Hello, world"),
//! "This is fun & easy."
//! ))
//! ));
//!
//! let output = tex.render().expect("rendering failed");
//!
//! assert_eq!(output,
//! "\\documentclass{article}\n\
//! \\begin{document}\n\
//! \\section{Hello, world}\n\
//! This is fun \\& easy.\n\
//! \\end{document}\n");
//! ```
//!
//! Element functions like `section` above typically cover most use cases, while not preventing the
//! u ser to drop back to the raw functions above. The `elems` macro conveniently boxes and
//! type-erases children, while `N` can be used for "no arguments" for both args and optargs.
#[macro_use]
pub mod macros;
pub mod elements;
use std::fmt::Debug;
use std::io::Write;
use std::{io, string};
/// Renderable Tex element.
pub trait TexElement: Debug {
/// Type-erases a `TexElement`.
fn boxed(self) -> Box<dyn TexElement>
where
Self: Sized +'static,
{
Box::new(self) as Box<dyn TexElement>
}
/// Renders the element into a string.
///
/// May return an error if a non-utf8 element has been given.
fn render(&self) -> Result<String, string::FromUtf8Error> {
let mut buffer: Vec<u8> = Vec::new();
self.write_tex(&mut buffer)
.expect("should always be able to write to in-memory buffer");
String::from_utf8(buffer)
}
/// Writes a rendering of the element to the given writer.
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()>;
}
/// Conversion trait for various types.
///
/// Used for primitive conversions of various types directly into tex elements. Implementations
/// include:
///
/// * `Box<dyn TexElement>` are passed through unchanged.
/// * Any other `TexElement` will be boxed.
/// * `str` and `String` are converted to escaped `Text` elements.
/// * Any number (`u8`,...) is converted to escaped `Text` using display.
/// * A `Vec<Box<dyn TexElement>>` is converted into a `Group`.
/// * The unit type `()` is converted into an empty element.
pub trait IntoTexElement {
/// Converts the given element into a `TexElement`.
fn into_tex_element(self) -> Box<dyn TexElement>;
}
impl IntoTexElement for Box<dyn TexElement> {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
self
}
}
impl<'a> IntoTexElement for &'a str {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
self.to_owned().into_tex_element()
}
}
impl IntoTexElement for String {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Text::new(self))
}
}
impl IntoTexElement for () {
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(RawTex(Vec::new()))
}
}
impl<T: TexElement + Sized +'static> IntoTexElement for T {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(self)
}
}
impl IntoTexElement for Vec<Box<dyn TexElement>> {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Group::new(self))
}
}
macro_rules! using_display {
($ty:ty) => {
impl IntoTexElement for $ty {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Text::new(format!("{}", self)))
}
}
};
}
using_display!(u8);
using_display!(u16);
using_display!(u32);
using_display!(u64);
using_display!(u128);
using_display!(i8);
using_display!(i16);
using_display!(i32);
using_display!(i64);
using_display!(i128);
using_display!(f32);
using_display!(f64);
/// Writes a list of tex elements to a stream with a separator.
pub fn write_list<'a, I>(writer: &mut dyn Write, separator: &str, iter: I) -> io::Result<()>
where
I: Iterator<Item = &'a Box<dyn TexElement>> + 'a,
{
for (idx, arg) in iter.enumerate() {
if idx!= 0 {
writer.write_all(separator.as_bytes())?;
}
arg.write_tex(writer)?;
}
Ok(())
}
/// A raw, unescaped piece of tex code.
///
/// Tex is not guaranteed to be UTF-8 encoded, thus `RawTex` internally keeps bytes. The value will
/// be inserted into the document without any escaping. The value is unchecked, thus it is possible
/// to create syntactically incorrect invalid documents using this element.
#[derive(Clone, Debug)]
pub struct RawTex(Vec<u8>);
impl RawTex {
/// Crates a new raw tex element from a string.
#[inline]
pub fn new(raw: Vec<u8>) -> Self {
RawTex(raw)
}
}
impl TexElement for RawTex {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(self.0.as_slice())
}
}
/// A text string.
///
/// Text strings will be escaped before insertion.
#[derive(Clone, Debug)]
pub struct Text(String);
impl Text {
/// Creates a new text string.
#[inline]
pub fn new(raw: String) -> Self {
Text(raw)
}
}
impl TexElement for Text {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
crate::tex_escape::write_escaped(writer, &self.0)
}
}
/// A set of optional arguments.
///
/// Optional arguments in LaTeX are typically denoted using square brackets and comma-separated.
#[derive(Debug, Default)]
pub struct OptArgs(Vec<Box<dyn TexElement>>);
impl OptArgs {
/// Creates a new set of optional arguments.
#[inline]
pub fn new(elements: Vec<Box<dyn TexElement>>) -> Self {
OptArgs(elements)
}
/// Creates a new optinal argument from a single value.
#[inline]
pub fn single<T: IntoTexElement>(elem: T) -> Self {
OptArgs(vec![elem.into_tex_element()])
}
}
impl TexElement for OptArgs {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
if!self.0.is_empty() {
writer.write_all(b"[")?;
write_list(writer, ",", self.0.iter())?;
writer.write_all(b"]")?;
}
Ok(())
}
}
/// A set of arguments.
///
/// Each argument is enclosed by curly braces when rendered, otherwise arguments are just
/// concatenated.
#[derive(Debug, Default)]
pub struct Args(Vec<Box<dyn TexElement>>);
impl Args {
/// Creates a new set of arguments.
#[inline]
pub fn new(elements: Vec<Box<dyn TexElement>>) -> Self {
Args(elements)
}
/// Creates a new optinal argument from a single value.
#[inline]
pub fn single<T: IntoTexElement>(elem: T) -> Self {
Args(vec![elem.into_tex_element()])
}
}
impl TexElement for Args {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
if!self.0.is_empty() {
writer.write_all(b"{")?;
write_list(writer, "}{", self.0.iter())?;
writer.write_all(b"}")?;
}
Ok(())
}
}
/// A TeX-macro invocation.
///
/// This is the typical `\macroname[opt1]{arg1}{arg2}` call that is common in latex documents.
#[derive(Debug)]
pub struct MacroCall {
/// Name of the instruction.
ident: Box<dyn TexElement>,
/// Optional arguments.
opt_args: OptArgs,
/// Mandatory arguments.
args: Args,
/// Whether or not to append a newline afterwards.
newline: bool,
}
impl MacroCall {
/// Creates a new macro call.
///
/// The resulting call will end with a newline when output.
pub fn new<T: IntoTexElement>(ident: T, opt_args: OptArgs, args: Args) -> Self {
MacroCall {
ident: ident.into_tex_element(),
opt_args,
args,
newline: true,
}
}
/// Creates a new inline macro call.
///
/// Does not end with a newline.
pub fn new_inline<T: IntoTexElement>(ident: T, opt_args: OptArgs, args: Args) -> Self {
MacroCall {
ident: ident.into_tex_element(),
opt_args,
args,
newline: false,
}
}
}
impl TexElement for MacroCall {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(br"\")?;
self.ident.write_tex(writer)?;
self.opt_args.write_tex(writer)?;
self.args.write_tex(writer)?;
if self.newline {
writer.write_all(b"\n")?;
}
Ok(())
}
}
/// A block with a begin and end instruction.
///
/// Begin-end blocks usually start with a `\begin{blockname}` and end with `\end{blockname}`.
#[derive(Debug)]
pub struct BeginEndBlock {
/// The identifier for the block.
ident: Box<dyn TexElement>,
/// Optional arguments.
opt_args: OptArgs,
/// Actual arguments.
args: Args,
/// Child elements of the block.
children: Vec<Box<dyn TexElement>>,
}
impl BeginEndBlock {
/// Creates a new begin/end block.
pub fn new<T: IntoTexElement>(
ident: T,
opt_args: OptArgs,
args: Args,
children: Vec<Box<dyn TexElement>>,
) -> Self {
BeginEndBlock {
ident: ident.into_tex_element(),
opt_args,
args,
children,
}
}
}
impl TexElement for BeginEndBlock {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(b"\\begin{")?;
self.ident.write_tex(writer)?;
writer.write_all(b"}")?;
self.opt_args.write_tex(writer)?;
self.args.write_tex(writer)?;
writer.write_all(b"\n")?;
for child in &self.children {
child.write_tex(writer)?;
}
writer.write_all(b"\n\\end{")?;
self.ident.write_tex(writer)?;
writer.write_all(b"}\n")?;
Ok(())
}
}
/// An anonymous block.
///
/// Anonymous blocks are other elements enclosed in curly braces when output.
#[derive(Debug)]
pub struct AnonymousBlock(Vec<Box<dyn TexElement>>);
impl AnonymousBlock {
/// Creates a new anonymous block.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
AnonymousBlock(elems)
}
}
impl TexElement for AnonymousBlock {
fn | (&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(b"{")?;
for child in &self.0 {
child.write_tex(writer)?;
}
writer.write_all(b"}")?;
Ok(())
}
}
/// Grouping of elements.
///
/// Groups multiple elements together; when output they are written in order, without any characters
/// added.
#[derive(Debug)]
pub struct Group(Vec<Box<dyn TexElement>>);
impl Group {
/// Creates a new group.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
Group(elems)
}
}
impl TexElement for Group {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
for child in &self.0 {
child.write_tex(writer)?;
}
Ok(())
}
}
/// Table row.
///
/// Multiple elements joined by ` & ` when rendered.
#[derive(Debug)]
pub struct TableRow(Vec<Box<dyn TexElement>>);
impl TableRow {
/// Creates a new table row.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
TableRow(elems)
}
}
impl TexElement for TableRow {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
write_list(writer, " & ", self.0.iter())?;
writer.write_all(b"\\\\\n")
}
}
| write_tex | identifier_name |
tpl.rs | //! TeX templating
//!
//! The `tpl` module contains a way of constructing a TeX-document programmatically. It ensures
//! documents are well-formed syntactically, but not semantically (e.g. it is possible to express
//! documents that contain multiple `\documentclass` macro calls inside the document but not a
//! `\begin{foo}` without a matching `\end`).
//!
//! As a result of this deliberate limitation, the API is fairly simple. The core module offers the
//! entire abstraction through the `TexElement` trait, while the `elements` module contains
//! syntactic sugar for building documents quickly.
//!
//! ## "Hello, world" using `TexElement` directly.
//!
//! ```rust
//! use texrender::tpl::{Args, BeginEndBlock, Group, IntoTexElement, MacroCall, OptArgs, RawTex,
//! TexElement, Text};
//!
//! let doctype = MacroCall::new("documentclass",
//! OptArgs::single("12pt"),
//! Args::single("article"));
//! let mut contents: Vec<Box<dyn TexElement>> = Vec::new();
//! contents.push(Box::new(MacroCall::new("section",
//! Default::default(),
//! Args::single("Hello, world"))));
//! contents.push("This is fun & easy.".into_tex_element());
//! let document = BeginEndBlock::new("document", Default::default(), Default::default(), contents);
//! let tex = Group::new(vec![Box::new(doctype) as Box<dyn TexElement>, Box::new(document)]);
//! let output = tex.render().expect("rendering failed");
//! assert_eq!(output,
//! "\\documentclass[12pt]{article}\n\
//! \\begin{document}\n\
//! \\section{Hello, world}\n\
//! This is fun \\& easy.\n\
//! \\end{document}\n");
//! ```
//!
//! While this form uses no macros, it is rather inconvenient to write. Luckily there is an
//! alternative:
//!
//! ## "Hello, world" using elements and macros.
//!
//! ```rust
//! use texrender::elems;
//! use texrender::tpl::TexElement;
//! use texrender::tpl::elements::{N, doc, document, documentclass, section};
//!
//! let tex = doc(elems!(
//! documentclass(elems!(), "article"),
//! document(elems!(
//! section("Hello, world"),
//! "This is fun & easy."
//! ))
//! ));
//!
//! let output = tex.render().expect("rendering failed");
//!
//! assert_eq!(output,
//! "\\documentclass{article}\n\
//! \\begin{document}\n\
//! \\section{Hello, world}\n\
//! This is fun \\& easy.\n\
//! \\end{document}\n");
//! ```
//!
//! Element functions like `section` above typically cover most use cases, while not preventing the
//! u ser to drop back to the raw functions above. The `elems` macro conveniently boxes and
//! type-erases children, while `N` can be used for "no arguments" for both args and optargs.
#[macro_use]
pub mod macros;
pub mod elements;
use std::fmt::Debug;
use std::io::Write;
use std::{io, string};
/// Renderable Tex element.
pub trait TexElement: Debug {
/// Type-erases a `TexElement`.
fn boxed(self) -> Box<dyn TexElement>
where
Self: Sized +'static,
{
Box::new(self) as Box<dyn TexElement>
}
/// Renders the element into a string.
///
/// May return an error if a non-utf8 element has been given.
fn render(&self) -> Result<String, string::FromUtf8Error> {
let mut buffer: Vec<u8> = Vec::new();
self.write_tex(&mut buffer)
.expect("should always be able to write to in-memory buffer");
String::from_utf8(buffer)
}
/// Writes a rendering of the element to the given writer.
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()>;
}
/// Conversion trait for various types.
///
/// Used for primitive conversions of various types directly into tex elements. Implementations
/// include:
///
/// * `Box<dyn TexElement>` are passed through unchanged.
/// * Any other `TexElement` will be boxed.
/// * `str` and `String` are converted to escaped `Text` elements.
/// * Any number (`u8`,...) is converted to escaped `Text` using display.
/// * A `Vec<Box<dyn TexElement>>` is converted into a `Group`.
/// * The unit type `()` is converted into an empty element.
pub trait IntoTexElement {
/// Converts the given element into a `TexElement`.
fn into_tex_element(self) -> Box<dyn TexElement>;
}
impl IntoTexElement for Box<dyn TexElement> {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
self
}
}
impl<'a> IntoTexElement for &'a str {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
self.to_owned().into_tex_element()
}
}
impl IntoTexElement for String {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Text::new(self))
}
}
impl IntoTexElement for () {
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(RawTex(Vec::new()))
}
}
impl<T: TexElement + Sized +'static> IntoTexElement for T {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(self)
}
}
impl IntoTexElement for Vec<Box<dyn TexElement>> {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Group::new(self))
}
}
macro_rules! using_display {
($ty:ty) => {
impl IntoTexElement for $ty {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Text::new(format!("{}", self)))
}
}
};
}
using_display!(u8);
using_display!(u16);
using_display!(u32);
using_display!(u64);
using_display!(u128);
using_display!(i8);
using_display!(i16);
using_display!(i32);
using_display!(i64);
using_display!(i128);
using_display!(f32);
using_display!(f64);
/// Writes a list of tex elements to a stream with a separator.
pub fn write_list<'a, I>(writer: &mut dyn Write, separator: &str, iter: I) -> io::Result<()>
where
I: Iterator<Item = &'a Box<dyn TexElement>> + 'a,
{
for (idx, arg) in iter.enumerate() {
if idx!= 0 {
writer.write_all(separator.as_bytes())?;
}
arg.write_tex(writer)?;
}
Ok(())
}
/// A raw, unescaped piece of tex code.
///
/// Tex is not guaranteed to be UTF-8 encoded, thus `RawTex` internally keeps bytes. The value will
/// be inserted into the document without any escaping. The value is unchecked, thus it is possible
/// to create syntactically incorrect invalid documents using this element.
#[derive(Clone, Debug)]
pub struct RawTex(Vec<u8>);
impl RawTex {
/// Crates a new raw tex element from a string.
#[inline]
pub fn new(raw: Vec<u8>) -> Self {
RawTex(raw)
}
}
impl TexElement for RawTex {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(self.0.as_slice())
}
}
/// A text string.
///
/// Text strings will be escaped before insertion.
#[derive(Clone, Debug)]
pub struct Text(String);
impl Text {
/// Creates a new text string.
#[inline]
pub fn new(raw: String) -> Self {
Text(raw)
}
}
impl TexElement for Text {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
crate::tex_escape::write_escaped(writer, &self.0)
}
}
/// A set of optional arguments.
///
/// Optional arguments in LaTeX are typically denoted using square brackets and comma-separated.
#[derive(Debug, Default)]
pub struct OptArgs(Vec<Box<dyn TexElement>>);
impl OptArgs {
/// Creates a new set of optional arguments.
#[inline]
pub fn new(elements: Vec<Box<dyn TexElement>>) -> Self {
OptArgs(elements)
}
/// Creates a new optinal argument from a single value.
#[inline]
pub fn single<T: IntoTexElement>(elem: T) -> Self {
OptArgs(vec![elem.into_tex_element()])
}
}
impl TexElement for OptArgs {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
if!self.0.is_empty() {
writer.write_all(b"[")?;
write_list(writer, ",", self.0.iter())?;
writer.write_all(b"]")?;
}
Ok(())
}
}
/// A set of arguments.
///
/// Each argument is enclosed by curly braces when rendered, otherwise arguments are just
/// concatenated.
#[derive(Debug, Default)]
pub struct Args(Vec<Box<dyn TexElement>>);
impl Args {
/// Creates a new set of arguments.
#[inline]
pub fn new(elements: Vec<Box<dyn TexElement>>) -> Self {
Args(elements)
}
/// Creates a new optinal argument from a single value.
#[inline]
pub fn single<T: IntoTexElement>(elem: T) -> Self {
Args(vec![elem.into_tex_element()])
}
}
impl TexElement for Args {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
if!self.0.is_empty() {
writer.write_all(b"{")?;
write_list(writer, "}{", self.0.iter())?;
writer.write_all(b"}")?;
}
Ok(())
}
}
/// A TeX-macro invocation.
///
/// This is the typical `\macroname[opt1]{arg1}{arg2}` call that is common in latex documents.
#[derive(Debug)]
pub struct MacroCall {
/// Name of the instruction.
ident: Box<dyn TexElement>,
/// Optional arguments.
opt_args: OptArgs,
/// Mandatory arguments.
args: Args,
/// Whether or not to append a newline afterwards.
newline: bool,
}
impl MacroCall {
/// Creates a new macro call.
///
/// The resulting call will end with a newline when output.
pub fn new<T: IntoTexElement>(ident: T, opt_args: OptArgs, args: Args) -> Self {
MacroCall {
ident: ident.into_tex_element(),
opt_args,
args,
newline: true,
}
}
/// Creates a new inline macro call.
///
/// Does not end with a newline.
pub fn new_inline<T: IntoTexElement>(ident: T, opt_args: OptArgs, args: Args) -> Self {
MacroCall {
ident: ident.into_tex_element(),
opt_args, | newline: false,
}
}
}
impl TexElement for MacroCall {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(br"\")?;
self.ident.write_tex(writer)?;
self.opt_args.write_tex(writer)?;
self.args.write_tex(writer)?;
if self.newline {
writer.write_all(b"\n")?;
}
Ok(())
}
}
/// A block with a begin and end instruction.
///
/// Begin-end blocks usually start with a `\begin{blockname}` and end with `\end{blockname}`.
#[derive(Debug)]
pub struct BeginEndBlock {
/// The identifier for the block.
ident: Box<dyn TexElement>,
/// Optional arguments.
opt_args: OptArgs,
/// Actual arguments.
args: Args,
/// Child elements of the block.
children: Vec<Box<dyn TexElement>>,
}
impl BeginEndBlock {
/// Creates a new begin/end block.
pub fn new<T: IntoTexElement>(
ident: T,
opt_args: OptArgs,
args: Args,
children: Vec<Box<dyn TexElement>>,
) -> Self {
BeginEndBlock {
ident: ident.into_tex_element(),
opt_args,
args,
children,
}
}
}
impl TexElement for BeginEndBlock {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(b"\\begin{")?;
self.ident.write_tex(writer)?;
writer.write_all(b"}")?;
self.opt_args.write_tex(writer)?;
self.args.write_tex(writer)?;
writer.write_all(b"\n")?;
for child in &self.children {
child.write_tex(writer)?;
}
writer.write_all(b"\n\\end{")?;
self.ident.write_tex(writer)?;
writer.write_all(b"}\n")?;
Ok(())
}
}
/// An anonymous block.
///
/// Anonymous blocks are other elements enclosed in curly braces when output.
#[derive(Debug)]
pub struct AnonymousBlock(Vec<Box<dyn TexElement>>);
impl AnonymousBlock {
/// Creates a new anonymous block.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
AnonymousBlock(elems)
}
}
impl TexElement for AnonymousBlock {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(b"{")?;
for child in &self.0 {
child.write_tex(writer)?;
}
writer.write_all(b"}")?;
Ok(())
}
}
/// Grouping of elements.
///
/// Groups multiple elements together; when output they are written in order, without any characters
/// added.
#[derive(Debug)]
pub struct Group(Vec<Box<dyn TexElement>>);
impl Group {
/// Creates a new group.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
Group(elems)
}
}
impl TexElement for Group {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
for child in &self.0 {
child.write_tex(writer)?;
}
Ok(())
}
}
/// Table row.
///
/// Multiple elements joined by ` & ` when rendered.
#[derive(Debug)]
pub struct TableRow(Vec<Box<dyn TexElement>>);
impl TableRow {
/// Creates a new table row.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
TableRow(elems)
}
}
impl TexElement for TableRow {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
write_list(writer, " & ", self.0.iter())?;
writer.write_all(b"\\\\\n")
}
} | args, | random_line_split |
tpl.rs | //! TeX templating
//!
//! The `tpl` module contains a way of constructing a TeX-document programmatically. It ensures
//! documents are well-formed syntactically, but not semantically (e.g. it is possible to express
//! documents that contain multiple `\documentclass` macro calls inside the document but not a
//! `\begin{foo}` without a matching `\end`).
//!
//! As a result of this deliberate limitation, the API is fairly simple. The core module offers the
//! entire abstraction through the `TexElement` trait, while the `elements` module contains
//! syntactic sugar for building documents quickly.
//!
//! ## "Hello, world" using `TexElement` directly.
//!
//! ```rust
//! use texrender::tpl::{Args, BeginEndBlock, Group, IntoTexElement, MacroCall, OptArgs, RawTex,
//! TexElement, Text};
//!
//! let doctype = MacroCall::new("documentclass",
//! OptArgs::single("12pt"),
//! Args::single("article"));
//! let mut contents: Vec<Box<dyn TexElement>> = Vec::new();
//! contents.push(Box::new(MacroCall::new("section",
//! Default::default(),
//! Args::single("Hello, world"))));
//! contents.push("This is fun & easy.".into_tex_element());
//! let document = BeginEndBlock::new("document", Default::default(), Default::default(), contents);
//! let tex = Group::new(vec![Box::new(doctype) as Box<dyn TexElement>, Box::new(document)]);
//! let output = tex.render().expect("rendering failed");
//! assert_eq!(output,
//! "\\documentclass[12pt]{article}\n\
//! \\begin{document}\n\
//! \\section{Hello, world}\n\
//! This is fun \\& easy.\n\
//! \\end{document}\n");
//! ```
//!
//! While this form uses no macros, it is rather inconvenient to write. Luckily there is an
//! alternative:
//!
//! ## "Hello, world" using elements and macros.
//!
//! ```rust
//! use texrender::elems;
//! use texrender::tpl::TexElement;
//! use texrender::tpl::elements::{N, doc, document, documentclass, section};
//!
//! let tex = doc(elems!(
//! documentclass(elems!(), "article"),
//! document(elems!(
//! section("Hello, world"),
//! "This is fun & easy."
//! ))
//! ));
//!
//! let output = tex.render().expect("rendering failed");
//!
//! assert_eq!(output,
//! "\\documentclass{article}\n\
//! \\begin{document}\n\
//! \\section{Hello, world}\n\
//! This is fun \\& easy.\n\
//! \\end{document}\n");
//! ```
//!
//! Element functions like `section` above typically cover most use cases, while not preventing the
//! u ser to drop back to the raw functions above. The `elems` macro conveniently boxes and
//! type-erases children, while `N` can be used for "no arguments" for both args and optargs.
#[macro_use]
pub mod macros;
pub mod elements;
use std::fmt::Debug;
use std::io::Write;
use std::{io, string};
/// Renderable Tex element.
pub trait TexElement: Debug {
/// Type-erases a `TexElement`.
fn boxed(self) -> Box<dyn TexElement>
where
Self: Sized +'static,
{
Box::new(self) as Box<dyn TexElement>
}
/// Renders the element into a string.
///
/// May return an error if a non-utf8 element has been given.
fn render(&self) -> Result<String, string::FromUtf8Error> {
let mut buffer: Vec<u8> = Vec::new();
self.write_tex(&mut buffer)
.expect("should always be able to write to in-memory buffer");
String::from_utf8(buffer)
}
/// Writes a rendering of the element to the given writer.
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()>;
}
/// Conversion trait for various types.
///
/// Used for primitive conversions of various types directly into tex elements. Implementations
/// include:
///
/// * `Box<dyn TexElement>` are passed through unchanged.
/// * Any other `TexElement` will be boxed.
/// * `str` and `String` are converted to escaped `Text` elements.
/// * Any number (`u8`,...) is converted to escaped `Text` using display.
/// * A `Vec<Box<dyn TexElement>>` is converted into a `Group`.
/// * The unit type `()` is converted into an empty element.
pub trait IntoTexElement {
/// Converts the given element into a `TexElement`.
fn into_tex_element(self) -> Box<dyn TexElement>;
}
impl IntoTexElement for Box<dyn TexElement> {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
self
}
}
impl<'a> IntoTexElement for &'a str {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
self.to_owned().into_tex_element()
}
}
impl IntoTexElement for String {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Text::new(self))
}
}
impl IntoTexElement for () {
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(RawTex(Vec::new()))
}
}
impl<T: TexElement + Sized +'static> IntoTexElement for T {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(self)
}
}
impl IntoTexElement for Vec<Box<dyn TexElement>> {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Group::new(self))
}
}
macro_rules! using_display {
($ty:ty) => {
impl IntoTexElement for $ty {
#[inline]
fn into_tex_element(self) -> Box<dyn TexElement> {
Box::new(Text::new(format!("{}", self)))
}
}
};
}
using_display!(u8);
using_display!(u16);
using_display!(u32);
using_display!(u64);
using_display!(u128);
using_display!(i8);
using_display!(i16);
using_display!(i32);
using_display!(i64);
using_display!(i128);
using_display!(f32);
using_display!(f64);
/// Writes a list of tex elements to a stream with a separator.
pub fn write_list<'a, I>(writer: &mut dyn Write, separator: &str, iter: I) -> io::Result<()>
where
I: Iterator<Item = &'a Box<dyn TexElement>> + 'a,
{
for (idx, arg) in iter.enumerate() {
if idx!= 0 |
arg.write_tex(writer)?;
}
Ok(())
}
/// A raw, unescaped piece of tex code.
///
/// Tex is not guaranteed to be UTF-8 encoded, thus `RawTex` internally keeps bytes. The value will
/// be inserted into the document without any escaping. The value is unchecked, thus it is possible
/// to create syntactically incorrect invalid documents using this element.
#[derive(Clone, Debug)]
pub struct RawTex(Vec<u8>);
impl RawTex {
/// Crates a new raw tex element from a string.
#[inline]
pub fn new(raw: Vec<u8>) -> Self {
RawTex(raw)
}
}
impl TexElement for RawTex {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(self.0.as_slice())
}
}
/// A text string.
///
/// Text strings will be escaped before insertion.
#[derive(Clone, Debug)]
pub struct Text(String);
impl Text {
/// Creates a new text string.
#[inline]
pub fn new(raw: String) -> Self {
Text(raw)
}
}
impl TexElement for Text {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
crate::tex_escape::write_escaped(writer, &self.0)
}
}
/// A set of optional arguments.
///
/// Optional arguments in LaTeX are typically denoted using square brackets and comma-separated.
#[derive(Debug, Default)]
pub struct OptArgs(Vec<Box<dyn TexElement>>);
impl OptArgs {
/// Creates a new set of optional arguments.
#[inline]
pub fn new(elements: Vec<Box<dyn TexElement>>) -> Self {
OptArgs(elements)
}
/// Creates a new optinal argument from a single value.
#[inline]
pub fn single<T: IntoTexElement>(elem: T) -> Self {
OptArgs(vec![elem.into_tex_element()])
}
}
impl TexElement for OptArgs {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
if!self.0.is_empty() {
writer.write_all(b"[")?;
write_list(writer, ",", self.0.iter())?;
writer.write_all(b"]")?;
}
Ok(())
}
}
/// A set of arguments.
///
/// Each argument is enclosed by curly braces when rendered, otherwise arguments are just
/// concatenated.
#[derive(Debug, Default)]
pub struct Args(Vec<Box<dyn TexElement>>);
impl Args {
/// Creates a new set of arguments.
#[inline]
pub fn new(elements: Vec<Box<dyn TexElement>>) -> Self {
Args(elements)
}
/// Creates a new optinal argument from a single value.
#[inline]
pub fn single<T: IntoTexElement>(elem: T) -> Self {
Args(vec![elem.into_tex_element()])
}
}
impl TexElement for Args {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
if!self.0.is_empty() {
writer.write_all(b"{")?;
write_list(writer, "}{", self.0.iter())?;
writer.write_all(b"}")?;
}
Ok(())
}
}
/// A TeX-macro invocation.
///
/// This is the typical `\macroname[opt1]{arg1}{arg2}` call that is common in latex documents.
#[derive(Debug)]
pub struct MacroCall {
/// Name of the instruction.
ident: Box<dyn TexElement>,
/// Optional arguments.
opt_args: OptArgs,
/// Mandatory arguments.
args: Args,
/// Whether or not to append a newline afterwards.
newline: bool,
}
impl MacroCall {
/// Creates a new macro call.
///
/// The resulting call will end with a newline when output.
pub fn new<T: IntoTexElement>(ident: T, opt_args: OptArgs, args: Args) -> Self {
MacroCall {
ident: ident.into_tex_element(),
opt_args,
args,
newline: true,
}
}
/// Creates a new inline macro call.
///
/// Does not end with a newline.
pub fn new_inline<T: IntoTexElement>(ident: T, opt_args: OptArgs, args: Args) -> Self {
MacroCall {
ident: ident.into_tex_element(),
opt_args,
args,
newline: false,
}
}
}
impl TexElement for MacroCall {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(br"\")?;
self.ident.write_tex(writer)?;
self.opt_args.write_tex(writer)?;
self.args.write_tex(writer)?;
if self.newline {
writer.write_all(b"\n")?;
}
Ok(())
}
}
/// A block with a begin and end instruction.
///
/// Begin-end blocks usually start with a `\begin{blockname}` and end with `\end{blockname}`.
#[derive(Debug)]
pub struct BeginEndBlock {
/// The identifier for the block.
ident: Box<dyn TexElement>,
/// Optional arguments.
opt_args: OptArgs,
/// Actual arguments.
args: Args,
/// Child elements of the block.
children: Vec<Box<dyn TexElement>>,
}
impl BeginEndBlock {
/// Creates a new begin/end block.
pub fn new<T: IntoTexElement>(
ident: T,
opt_args: OptArgs,
args: Args,
children: Vec<Box<dyn TexElement>>,
) -> Self {
BeginEndBlock {
ident: ident.into_tex_element(),
opt_args,
args,
children,
}
}
}
impl TexElement for BeginEndBlock {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(b"\\begin{")?;
self.ident.write_tex(writer)?;
writer.write_all(b"}")?;
self.opt_args.write_tex(writer)?;
self.args.write_tex(writer)?;
writer.write_all(b"\n")?;
for child in &self.children {
child.write_tex(writer)?;
}
writer.write_all(b"\n\\end{")?;
self.ident.write_tex(writer)?;
writer.write_all(b"}\n")?;
Ok(())
}
}
/// An anonymous block.
///
/// Anonymous blocks are other elements enclosed in curly braces when output.
#[derive(Debug)]
pub struct AnonymousBlock(Vec<Box<dyn TexElement>>);
impl AnonymousBlock {
/// Creates a new anonymous block.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
AnonymousBlock(elems)
}
}
impl TexElement for AnonymousBlock {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
writer.write_all(b"{")?;
for child in &self.0 {
child.write_tex(writer)?;
}
writer.write_all(b"}")?;
Ok(())
}
}
/// Grouping of elements.
///
/// Groups multiple elements together; when output they are written in order, without any characters
/// added.
#[derive(Debug)]
pub struct Group(Vec<Box<dyn TexElement>>);
impl Group {
/// Creates a new group.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
Group(elems)
}
}
impl TexElement for Group {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
for child in &self.0 {
child.write_tex(writer)?;
}
Ok(())
}
}
/// Table row.
///
/// Multiple elements joined by ` & ` when rendered.
#[derive(Debug)]
pub struct TableRow(Vec<Box<dyn TexElement>>);
impl TableRow {
/// Creates a new table row.
pub fn new(elems: Vec<Box<dyn TexElement>>) -> Self {
TableRow(elems)
}
}
impl TexElement for TableRow {
fn write_tex(&self, writer: &mut dyn Write) -> io::Result<()> {
write_list(writer, " & ", self.0.iter())?;
writer.write_all(b"\\\\\n")
}
}
| {
writer.write_all(separator.as_bytes())?;
} | conditional_block |
minijail.rs |
sys::signal,
unistd::{self, chown, pipe},
};
use npk::manifest::{Dev, Mount, MountFlag};
use std::{
fmt, iter, ops,
os::unix::prelude::RawFd,
path::{Path, PathBuf},
pin::Pin,
task::{Context, Poll},
};
use tokio::{
fs,
io::{self, unix::AsyncFd, AsyncBufReadExt, AsyncRead, AsyncWriteExt, ReadBuf},
select, task, time,
};
// We need a Send + Sync version of minijail::Minijail
struct MinijailHandle(::minijail::Minijail);
unsafe impl Send for MinijailHandle {}
unsafe impl Sync for MinijailHandle {}
impl ops::Deref for MinijailHandle {
type Target = ::minijail::Minijail;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl ops::DerefMut for MinijailHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Debug)]
pub struct Minijail {
log_fd: i32,
event_tx: EventTx,
run_dir: PathBuf,
data_dir: PathBuf,
uid: u32,
gid: u32,
}
impl Minijail {
pub(crate) fn new(
event_tx: EventTx,
run_dir: &Path,
data_dir: &Path,
uid: u32,
gid: u32,
) -> Result<Minijail, Error> {
let pipe = AsyncPipe::new()?;
let log_fd = pipe.writefd();
let mut lines = io::BufReader::new(pipe).lines();
// Spawn a task that forwards logs from minijail to the rust logger.
task::spawn(async move {
while let Ok(Some(line)) = lines.next_line().await {
let l = line.split_whitespace().skip(2).collect::<String>();
match line.chars().next() {
Some('D') => debug!("{}", l),
Some('I') => info!("{}", l),
Some('W') => warn!("{}", l),
Some('E') => error!("{}", l),
_ => trace!("{}", line),
}
}
});
let minijail_log_level = match log::max_level().to_level().unwrap_or(Level::Warn) {
Level::Error => 3,
Level::Warn => 4,
Level::Info => 6,
Level::Debug => 7,
Level::Trace => i32::MAX,
};
::minijail::Minijail::log_to_fd(log_fd, minijail_log_level as i32);
Ok(Minijail {
event_tx,
run_dir: run_dir.into(),
data_dir: data_dir.into(),
uid,
gid,
log_fd,
})
}
pub(crate) fn shutdown(&self) -> Result<(), Error> {
// Just make clippy happy
if false {
Err(Error::Stop)
} else {
Ok(())
}
}
pub(crate) async fn start(&self, container: &Container) -> Result<Process, Error> {
let root = &container.root;
let manifest = &container.manifest;
let mut jail = MinijailHandle(::minijail::Minijail::new().map_err(Error::Minijail)?);
let init = manifest
.init
.as_ref()
.ok_or_else(|| Error::Start("Cannot start a resource".to_string()))?;
let tmpdir = tempfile::TempDir::new()
.map_err(|e| Error::Io(format!("Failed to create tmpdir for {}", manifest.name), e))?;
let tmpdir_path = tmpdir.path();
// Dump seccomp config to process tmpdir. This is a subject to be changed since
// minijail provides a API to configure seccomp without writing to a file.
// TODO: configure seccomp via API instead of a file
if let Some(ref seccomp) = container.manifest.seccomp |
// Configure UID
jail.change_uid(self.uid);
// Configure PID
jail.change_gid(self.gid);
// Update the capability mask if specified
if let Some(capabilities) = &manifest.capabilities {
// TODO: the capabilities should be passed as an array
jail.update_caps(&capabilities.join(" "))
.map_err(Error::Minijail)?;
}
// Update the supplementary group list if specified
if let Some(suppl_groups) = &manifest.suppl_groups {
// TODO: the groups should be passed an array
jail.update_suppl_groups(&suppl_groups.join(" "))
.map_err(Error::Minijail)?;
}
// TODO: Do not use pid namespace because of multithreadding
// issues discovered by minijail. See libminijail.c for details.
// Make the process enter a pid namespace
//jail.namespace_pids();
// Make the process enter a vfs namespace
jail.namespace_vfs();
// Set no_new_privs. See </kernel/seccomp.c> and </kernel/sys.c>
// in the kernel source tree for an explanation of the parameters.
jail.no_new_privs();
// Set chroot dir for process
jail.enter_chroot(&root.as_path())?;
// Make the application the init process
jail.run_as_init();
self.setup_mounts(&mut jail, container).await?;
// Arguments
let args = manifest.args.clone().unwrap_or_default();
let init_str = init.display().to_string();
let argv: Vec<&str> = iter::once(init_str.as_str())
.chain(args.iter().map(|s| s.as_str()))
.collect();
// Create environment for process. Set data directory, container name and version
let mut env = manifest.env.clone().unwrap_or_default();
env.insert(ENV_NAME.to_string(), manifest.name.to_string());
env.insert(ENV_VERSION.to_string(), manifest.version.to_string());
let env = env
.iter()
.map(|(k, v)| format!("{}={}", k, v))
.collect::<Vec<String>>();
let env = env.iter().map(|a| a.as_str()).collect::<Vec<&str>>();
debug!(
"Executing \"{}{}{}\"",
init.display(),
if args.len() > 1 { " " } else { "" },
argv.iter().skip(1).join(" ")
);
let stdout =
CaptureOutput::new(OutputStream::Stdout, &manifest.name, self.event_tx.clone()).await?;
let stderr =
CaptureOutput::new(OutputStream::Stderr, &manifest.name, self.event_tx.clone()).await?;
// Prevent minijail to close the log fd so that errors aren't missed
let log_fd = (self.log_fd, self.log_fd);
let pid = jail.run_remap_env_preload(
&init.as_path(),
&[(stdout.0, 1), (stderr.0, 2), log_fd],
&argv,
&env,
false,
)? as u32;
let (exit_handle_signal, exit_handle_wait) = exit_handle();
// Spawn a task thats waits for the child to exit
waitpid(
&manifest.name,
pid,
exit_handle_signal,
self.event_tx.clone(),
)
.await;
Ok(Process {
pid,
_jail: jail,
_tmpdir: tmpdir,
_stdout: stdout,
_stderr: stderr,
exit_handle_wait,
})
}
async fn setup_mounts(
&self,
jail: &mut MinijailHandle,
container: &Container,
) -> Result<(), Error> {
let proc = Path::new("/proc");
jail.mount_bind(&proc, &proc, false)
.map_err(Error::Minijail)?;
jail.remount_proc_readonly();
// If there's no explicit mount for /dev add a minimal variant
if!container
.manifest
.mounts
.contains_key(&PathBuf::from("/dev"))
{
debug!("Mounting minimal /dev");
jail.mount_dev();
}
for (target, mount) in &container.manifest.mounts {
match &mount {
Mount::Bind { host, flags } => {
if!&host.exists() {
warn!(
"Cannot bind mount nonexitent source {} to {}",
host.display(),
target.display()
);
continue;
}
let rw = flags.contains(&MountFlag::Rw);
debug!(
"Mounting {} on {}{}",
host.display(),
target.display(),
if rw { " (rw)" } else { "" }
);
jail.mount_bind(&host, &target, rw)
.map_err(Error::Minijail)?;
}
Mount::Persist => {
let dir = self.data_dir.join(&container.manifest.name);
if!dir.exists() {
debug!("Creating {}", dir.display());
fs::create_dir_all(&dir).await.map_err(|e| {
Error::Io(format!("Failed to create {}", dir.display()), e)
})?;
}
debug!("Chowning {} to {}:{}", dir.display(), self.uid, self.gid);
chown(
dir.as_os_str(),
Some(unistd::Uid::from_raw(self.uid)),
Some(unistd::Gid::from_raw(self.gid)),
)
.map_err(|e| {
Error::Os(
format!(
"Failed to chown {} to {}:{}",
dir.display(),
self.uid,
self.gid
),
e,
)
})?;
debug!("Mounting {} on {}", dir.display(), target.display(),);
jail.mount_bind(&dir, &target, true)
.map_err(Error::Minijail)?;
}
Mount::Resource { name, version, dir } => {
let src = {
// Join the source of the resource container with the mount dir
let resource_root = self.run_dir.join(&name).join(&version.to_string());
let dir = dir
.strip_prefix("/")
.map(|d| resource_root.join(d))
.unwrap_or(resource_root);
if!dir.exists() {
return Err(Error::Start(format!(
"Resource folder {} is missing",
dir.display()
)));
}
dir
};
debug!("Mounting {} on {}", src.display(), target.display());
jail.mount_bind(&src, &target, false)
.map_err(Error::Minijail)?;
}
Mount::Tmpfs { size } => {
debug!(
"Mounting tmpfs with size {} on {}",
bytesize::ByteSize::b(*size),
target.display()
);
let data = format!("size={},mode=1777", size);
jail.mount_with_data(&Path::new("none"), &target, "tmpfs", 0, &data)
.map_err(Error::Minijail)?;
}
Mount::Dev { r#type } => {
match r#type {
// The Full mount of /dev is a simple rw bind mount of /dev
Dev::Full => {
let dev = Path::new("/dev");
jail.mount_bind(&dev, &dev, true).map_err(Error::Minijail)?;
}
}
}
}
}
Ok(())
}
}
pub(crate) struct Process {
/// PID of this process
pid: u32,
/// Handle to a libminijail configuration
_jail: MinijailHandle,
/// Temporary directory created in the systems tmp folder.
/// This directory holds process instance specific data that needs
/// to be dumped to disk for startup. e.g seccomp config (TODO)
_tmpdir: tempfile::TempDir,
/// Captured stdout output
_stdout: CaptureOutput,
/// Captured stderr output
_stderr: CaptureOutput,
/// Rx part of the exit handle of this process
exit_handle_wait: ExitHandleWait,
}
impl fmt::Debug for Process {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Process").field("pid", &self.pid).finish()
}
}
impl Process {
pub fn pid(&self) -> Pid {
self.pid
}
pub async fn stop(&mut self, timeout: time::Duration) -> Result<ExitStatus, Error> {
// Send a SIGTERM to the application. If the application does not terminate with a timeout
// it is SIGKILLed.
let sigterm = signal::Signal::SIGTERM;
signal::kill(unistd::Pid::from_raw(self.pid as i32), Some(sigterm))
.map_err(|e| Error::Os(format!("Failed to SIGTERM {}", self.pid), e))?;
let timeout = Box::pin(time::sleep(timeout));
let exited = Box::pin(self.exit_handle_wait.recv());
let pid = self.pid;
Ok(select! {
s = exited => {
s.expect("Internal channel error during process termination") // This is the happy path...
},
_ = timeout => {
signal::kill(unistd::Pid::from_raw(pid as i32), Some(signal::Signal::SIGKILL))
.map_err(|e| Error::Os("Failed to kill process".to_string(), e))?;
ExitStatus::Signaled(signal::Signal::SIGKILL)
}
})
}
}
struct AsyncPipe {
inner: AsyncFd<std::fs::File>,
writefd: i32,
}
impl AsyncPipe {
fn new() -> Result<AsyncPipe, Error> {
let (readfd, writefd) =
pipe().map_err(|e| Error::Os("Failed to create pipe".to_string(), e))?;
let mut flags = OFlag::from_bits(fcntl(readfd, fcntl::FcntlArg::F_GETFL).unwrap()).unwrap();
flags.set(OFlag::O_NONBLOCK, true);
fcntl(readfd, fcntl::FcntlArg::F_SETFL(flags)).expect("Failed to configure pipe fd");
let pipe =
unsafe { <std::fs::File as std::os::unix::prelude::FromRawFd>::from_raw_fd(readfd) };
let inner = AsyncFd::new(pipe).map_err(|e| Error::Io("Async fd".to_string(), e))?;
Ok(AsyncPipe { inner, writefd })
}
fn writefd(&self) -> RawFd {
self.writefd
}
}
impl AsyncRead for AsyncPipe {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
loop {
let mut guard = futures::ready!(self.inner.poll_read_ready(cx))?;
match guard
.try_io(|inner| std::io::Read::read(&mut inner.get_ref(), buf.initialized_mut()))
{
Ok(Ok(n)) => {
buf.advance(n);
break Poll::Ready(Ok(()));
}
Ok(Err(e)) => break Poll::Ready(Err(e)),
Err(_would_block) => continue,
}
}
}
}
// Capture output of a child process. Create a pipe and spawn a task that forwards each line to
// the main loop. When this struct is dropped the internal spawned tasks are stopped.
#[derive(Debug)]
struct CaptureOutput(i32, oneshot::Sender<()>);
impl CaptureOutput {
pub async fn new(
stream: OutputStream,
tag: &str,
event_tx: EventTx,
) -> Result<CaptureOutput, Error> {
| {
let seccomp_config = tmpdir_path.join("seccomp");
let mut f = fs::File::create(&seccomp_config)
.await
.map_err(|e| Error::Io("Failed to create seccomp configuraiton".to_string(), e))?;
let s = itertools::join(seccomp.iter().map(|(k, v)| format!("{}: {}", k, v)), "\n");
f.write_all(s.as_bytes())
.await
.map_err(|e| Error::Io("Failed to write seccomp configuraiton".to_string(), e))?;
// Temporary disabled
// Must be called before parse_seccomp_filters
// jail.log_seccomp_filter_failures();
// let p: std::path::PathBuf = seccomp_config.into();
// jail.parse_seccomp_filters(p.as_path())
// .context("Failed parse seccomp config")?;
// jail.use_seccomp_filter();
} | conditional_block |
minijail.rs |
sys::signal,
unistd::{self, chown, pipe},
};
use npk::manifest::{Dev, Mount, MountFlag};
use std::{
fmt, iter, ops,
os::unix::prelude::RawFd,
path::{Path, PathBuf},
pin::Pin,
task::{Context, Poll},
};
use tokio::{
fs,
io::{self, unix::AsyncFd, AsyncBufReadExt, AsyncRead, AsyncWriteExt, ReadBuf},
select, task, time,
};
// We need a Send + Sync version of minijail::Minijail
struct MinijailHandle(::minijail::Minijail);
unsafe impl Send for MinijailHandle {}
unsafe impl Sync for MinijailHandle {}
impl ops::Deref for MinijailHandle {
type Target = ::minijail::Minijail;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl ops::DerefMut for MinijailHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Debug)]
pub struct Minijail {
log_fd: i32,
event_tx: EventTx,
run_dir: PathBuf,
data_dir: PathBuf,
uid: u32,
gid: u32,
}
impl Minijail {
pub(crate) fn new(
event_tx: EventTx,
run_dir: &Path,
data_dir: &Path,
uid: u32,
gid: u32,
) -> Result<Minijail, Error> {
let pipe = AsyncPipe::new()?;
let log_fd = pipe.writefd();
let mut lines = io::BufReader::new(pipe).lines();
// Spawn a task that forwards logs from minijail to the rust logger.
task::spawn(async move {
while let Ok(Some(line)) = lines.next_line().await {
let l = line.split_whitespace().skip(2).collect::<String>();
match line.chars().next() {
Some('D') => debug!("{}", l),
Some('I') => info!("{}", l),
Some('W') => warn!("{}", l),
Some('E') => error!("{}", l),
_ => trace!("{}", line),
}
}
});
let minijail_log_level = match log::max_level().to_level().unwrap_or(Level::Warn) {
Level::Error => 3,
Level::Warn => 4,
Level::Info => 6,
Level::Debug => 7,
Level::Trace => i32::MAX,
};
::minijail::Minijail::log_to_fd(log_fd, minijail_log_level as i32);
Ok(Minijail {
event_tx,
run_dir: run_dir.into(),
data_dir: data_dir.into(),
uid,
gid,
log_fd,
})
}
pub(crate) fn | (&self) -> Result<(), Error> {
// Just make clippy happy
if false {
Err(Error::Stop)
} else {
Ok(())
}
}
pub(crate) async fn start(&self, container: &Container) -> Result<Process, Error> {
let root = &container.root;
let manifest = &container.manifest;
let mut jail = MinijailHandle(::minijail::Minijail::new().map_err(Error::Minijail)?);
let init = manifest
.init
.as_ref()
.ok_or_else(|| Error::Start("Cannot start a resource".to_string()))?;
let tmpdir = tempfile::TempDir::new()
.map_err(|e| Error::Io(format!("Failed to create tmpdir for {}", manifest.name), e))?;
let tmpdir_path = tmpdir.path();
// Dump seccomp config to process tmpdir. This is a subject to be changed since
// minijail provides a API to configure seccomp without writing to a file.
// TODO: configure seccomp via API instead of a file
if let Some(ref seccomp) = container.manifest.seccomp {
let seccomp_config = tmpdir_path.join("seccomp");
let mut f = fs::File::create(&seccomp_config)
.await
.map_err(|e| Error::Io("Failed to create seccomp configuraiton".to_string(), e))?;
let s = itertools::join(seccomp.iter().map(|(k, v)| format!("{}: {}", k, v)), "\n");
f.write_all(s.as_bytes())
.await
.map_err(|e| Error::Io("Failed to write seccomp configuraiton".to_string(), e))?;
// Temporary disabled
// Must be called before parse_seccomp_filters
// jail.log_seccomp_filter_failures();
// let p: std::path::PathBuf = seccomp_config.into();
// jail.parse_seccomp_filters(p.as_path())
// .context("Failed parse seccomp config")?;
// jail.use_seccomp_filter();
}
// Configure UID
jail.change_uid(self.uid);
// Configure PID
jail.change_gid(self.gid);
// Update the capability mask if specified
if let Some(capabilities) = &manifest.capabilities {
// TODO: the capabilities should be passed as an array
jail.update_caps(&capabilities.join(" "))
.map_err(Error::Minijail)?;
}
// Update the supplementary group list if specified
if let Some(suppl_groups) = &manifest.suppl_groups {
// TODO: the groups should be passed an array
jail.update_suppl_groups(&suppl_groups.join(" "))
.map_err(Error::Minijail)?;
}
// TODO: Do not use pid namespace because of multithreadding
// issues discovered by minijail. See libminijail.c for details.
// Make the process enter a pid namespace
//jail.namespace_pids();
// Make the process enter a vfs namespace
jail.namespace_vfs();
// Set no_new_privs. See </kernel/seccomp.c> and </kernel/sys.c>
// in the kernel source tree for an explanation of the parameters.
jail.no_new_privs();
// Set chroot dir for process
jail.enter_chroot(&root.as_path())?;
// Make the application the init process
jail.run_as_init();
self.setup_mounts(&mut jail, container).await?;
// Arguments
let args = manifest.args.clone().unwrap_or_default();
let init_str = init.display().to_string();
let argv: Vec<&str> = iter::once(init_str.as_str())
.chain(args.iter().map(|s| s.as_str()))
.collect();
// Create environment for process. Set data directory, container name and version
let mut env = manifest.env.clone().unwrap_or_default();
env.insert(ENV_NAME.to_string(), manifest.name.to_string());
env.insert(ENV_VERSION.to_string(), manifest.version.to_string());
let env = env
.iter()
.map(|(k, v)| format!("{}={}", k, v))
.collect::<Vec<String>>();
let env = env.iter().map(|a| a.as_str()).collect::<Vec<&str>>();
debug!(
"Executing \"{}{}{}\"",
init.display(),
if args.len() > 1 { " " } else { "" },
argv.iter().skip(1).join(" ")
);
let stdout =
CaptureOutput::new(OutputStream::Stdout, &manifest.name, self.event_tx.clone()).await?;
let stderr =
CaptureOutput::new(OutputStream::Stderr, &manifest.name, self.event_tx.clone()).await?;
// Prevent minijail to close the log fd so that errors aren't missed
let log_fd = (self.log_fd, self.log_fd);
let pid = jail.run_remap_env_preload(
&init.as_path(),
&[(stdout.0, 1), (stderr.0, 2), log_fd],
&argv,
&env,
false,
)? as u32;
let (exit_handle_signal, exit_handle_wait) = exit_handle();
// Spawn a task thats waits for the child to exit
waitpid(
&manifest.name,
pid,
exit_handle_signal,
self.event_tx.clone(),
)
.await;
Ok(Process {
pid,
_jail: jail,
_tmpdir: tmpdir,
_stdout: stdout,
_stderr: stderr,
exit_handle_wait,
})
}
async fn setup_mounts(
&self,
jail: &mut MinijailHandle,
container: &Container,
) -> Result<(), Error> {
let proc = Path::new("/proc");
jail.mount_bind(&proc, &proc, false)
.map_err(Error::Minijail)?;
jail.remount_proc_readonly();
// If there's no explicit mount for /dev add a minimal variant
if!container
.manifest
.mounts
.contains_key(&PathBuf::from("/dev"))
{
debug!("Mounting minimal /dev");
jail.mount_dev();
}
for (target, mount) in &container.manifest.mounts {
match &mount {
Mount::Bind { host, flags } => {
if!&host.exists() {
warn!(
"Cannot bind mount nonexitent source {} to {}",
host.display(),
target.display()
);
continue;
}
let rw = flags.contains(&MountFlag::Rw);
debug!(
"Mounting {} on {}{}",
host.display(),
target.display(),
if rw { " (rw)" } else { "" }
);
jail.mount_bind(&host, &target, rw)
.map_err(Error::Minijail)?;
}
Mount::Persist => {
let dir = self.data_dir.join(&container.manifest.name);
if!dir.exists() {
debug!("Creating {}", dir.display());
fs::create_dir_all(&dir).await.map_err(|e| {
Error::Io(format!("Failed to create {}", dir.display()), e)
})?;
}
debug!("Chowning {} to {}:{}", dir.display(), self.uid, self.gid);
chown(
dir.as_os_str(),
Some(unistd::Uid::from_raw(self.uid)),
Some(unistd::Gid::from_raw(self.gid)),
)
.map_err(|e| {
Error::Os(
format!(
"Failed to chown {} to {}:{}",
dir.display(),
self.uid,
self.gid
),
e,
)
})?;
debug!("Mounting {} on {}", dir.display(), target.display(),);
jail.mount_bind(&dir, &target, true)
.map_err(Error::Minijail)?;
}
Mount::Resource { name, version, dir } => {
let src = {
// Join the source of the resource container with the mount dir
let resource_root = self.run_dir.join(&name).join(&version.to_string());
let dir = dir
.strip_prefix("/")
.map(|d| resource_root.join(d))
.unwrap_or(resource_root);
if!dir.exists() {
return Err(Error::Start(format!(
"Resource folder {} is missing",
dir.display()
)));
}
dir
};
debug!("Mounting {} on {}", src.display(), target.display());
jail.mount_bind(&src, &target, false)
.map_err(Error::Minijail)?;
}
Mount::Tmpfs { size } => {
debug!(
"Mounting tmpfs with size {} on {}",
bytesize::ByteSize::b(*size),
target.display()
);
let data = format!("size={},mode=1777", size);
jail.mount_with_data(&Path::new("none"), &target, "tmpfs", 0, &data)
.map_err(Error::Minijail)?;
}
Mount::Dev { r#type } => {
match r#type {
// The Full mount of /dev is a simple rw bind mount of /dev
Dev::Full => {
let dev = Path::new("/dev");
jail.mount_bind(&dev, &dev, true).map_err(Error::Minijail)?;
}
}
}
}
}
Ok(())
}
}
pub(crate) struct Process {
/// PID of this process
pid: u32,
/// Handle to a libminijail configuration
_jail: MinijailHandle,
/// Temporary directory created in the systems tmp folder.
/// This directory holds process instance specific data that needs
/// to be dumped to disk for startup. e.g seccomp config (TODO)
_tmpdir: tempfile::TempDir,
/// Captured stdout output
_stdout: CaptureOutput,
/// Captured stderr output
_stderr: CaptureOutput,
/// Rx part of the exit handle of this process
exit_handle_wait: ExitHandleWait,
}
impl fmt::Debug for Process {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Process").field("pid", &self.pid).finish()
}
}
impl Process {
pub fn pid(&self) -> Pid {
self.pid
}
pub async fn stop(&mut self, timeout: time::Duration) -> Result<ExitStatus, Error> {
// Send a SIGTERM to the application. If the application does not terminate with a timeout
// it is SIGKILLed.
let sigterm = signal::Signal::SIGTERM;
signal::kill(unistd::Pid::from_raw(self.pid as i32), Some(sigterm))
.map_err(|e| Error::Os(format!("Failed to SIGTERM {}", self.pid), e))?;
let timeout = Box::pin(time::sleep(timeout));
let exited = Box::pin(self.exit_handle_wait.recv());
let pid = self.pid;
Ok(select! {
s = exited => {
s.expect("Internal channel error during process termination") // This is the happy path...
},
_ = timeout => {
signal::kill(unistd::Pid::from_raw(pid as i32), Some(signal::Signal::SIGKILL))
.map_err(|e| Error::Os("Failed to kill process".to_string(), e))?;
ExitStatus::Signaled(signal::Signal::SIGKILL)
}
})
}
}
struct AsyncPipe {
inner: AsyncFd<std::fs::File>,
writefd: i32,
}
impl AsyncPipe {
fn new() -> Result<AsyncPipe, Error> {
let (readfd, writefd) =
pipe().map_err(|e| Error::Os("Failed to create pipe".to_string(), e))?;
let mut flags = OFlag::from_bits(fcntl(readfd, fcntl::FcntlArg::F_GETFL).unwrap()).unwrap();
flags.set(OFlag::O_NONBLOCK, true);
fcntl(readfd, fcntl::FcntlArg::F_SETFL(flags)).expect("Failed to configure pipe fd");
let pipe =
unsafe { <std::fs::File as std::os::unix::prelude::FromRawFd>::from_raw_fd(readfd) };
let inner = AsyncFd::new(pipe).map_err(|e| Error::Io("Async fd".to_string(), e))?;
Ok(AsyncPipe { inner, writefd })
}
fn writefd(&self) -> RawFd {
self.writefd
}
}
impl AsyncRead for AsyncPipe {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
loop {
let mut guard = futures::ready!(self.inner.poll_read_ready(cx))?;
match guard
.try_io(|inner| std::io::Read::read(&mut inner.get_ref(), buf.initialized_mut()))
{
Ok(Ok(n)) => {
buf.advance(n);
break Poll::Ready(Ok(()));
}
Ok(Err(e)) => break Poll::Ready(Err(e)),
Err(_would_block) => continue,
}
}
}
}
// Capture output of a child process. Create a pipe and spawn a task that forwards each line to
// the main loop. When this struct is dropped the internal spawned tasks are stopped.
#[derive(Debug)]
struct CaptureOutput(i32, oneshot::Sender<()>);
impl CaptureOutput {
pub async fn new(
stream: OutputStream,
tag: &str,
event_tx: EventTx,
) -> Result<CaptureOutput, Error> {
| shutdown | identifier_name |
minijail.rs |
sys::signal,
unistd::{self, chown, pipe},
};
use npk::manifest::{Dev, Mount, MountFlag};
use std::{
fmt, iter, ops,
os::unix::prelude::RawFd,
path::{Path, PathBuf},
pin::Pin,
task::{Context, Poll},
};
use tokio::{
fs,
io::{self, unix::AsyncFd, AsyncBufReadExt, AsyncRead, AsyncWriteExt, ReadBuf},
select, task, time,
};
// We need a Send + Sync version of minijail::Minijail
struct MinijailHandle(::minijail::Minijail);
unsafe impl Send for MinijailHandle {}
unsafe impl Sync for MinijailHandle {}
impl ops::Deref for MinijailHandle {
type Target = ::minijail::Minijail;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl ops::DerefMut for MinijailHandle {
fn deref_mut(&mut self) -> &mut Self::Target |
}
#[derive(Debug)]
pub struct Minijail {
log_fd: i32,
event_tx: EventTx,
run_dir: PathBuf,
data_dir: PathBuf,
uid: u32,
gid: u32,
}
impl Minijail {
pub(crate) fn new(
event_tx: EventTx,
run_dir: &Path,
data_dir: &Path,
uid: u32,
gid: u32,
) -> Result<Minijail, Error> {
let pipe = AsyncPipe::new()?;
let log_fd = pipe.writefd();
let mut lines = io::BufReader::new(pipe).lines();
// Spawn a task that forwards logs from minijail to the rust logger.
task::spawn(async move {
while let Ok(Some(line)) = lines.next_line().await {
let l = line.split_whitespace().skip(2).collect::<String>();
match line.chars().next() {
Some('D') => debug!("{}", l),
Some('I') => info!("{}", l),
Some('W') => warn!("{}", l),
Some('E') => error!("{}", l),
_ => trace!("{}", line),
}
}
});
let minijail_log_level = match log::max_level().to_level().unwrap_or(Level::Warn) {
Level::Error => 3,
Level::Warn => 4,
Level::Info => 6,
Level::Debug => 7,
Level::Trace => i32::MAX,
};
::minijail::Minijail::log_to_fd(log_fd, minijail_log_level as i32);
Ok(Minijail {
event_tx,
run_dir: run_dir.into(),
data_dir: data_dir.into(),
uid,
gid,
log_fd,
})
}
pub(crate) fn shutdown(&self) -> Result<(), Error> {
// Just make clippy happy
if false {
Err(Error::Stop)
} else {
Ok(())
}
}
pub(crate) async fn start(&self, container: &Container) -> Result<Process, Error> {
let root = &container.root;
let manifest = &container.manifest;
let mut jail = MinijailHandle(::minijail::Minijail::new().map_err(Error::Minijail)?);
let init = manifest
.init
.as_ref()
.ok_or_else(|| Error::Start("Cannot start a resource".to_string()))?;
let tmpdir = tempfile::TempDir::new()
.map_err(|e| Error::Io(format!("Failed to create tmpdir for {}", manifest.name), e))?;
let tmpdir_path = tmpdir.path();
// Dump seccomp config to process tmpdir. This is a subject to be changed since
// minijail provides a API to configure seccomp without writing to a file.
// TODO: configure seccomp via API instead of a file
if let Some(ref seccomp) = container.manifest.seccomp {
let seccomp_config = tmpdir_path.join("seccomp");
let mut f = fs::File::create(&seccomp_config)
.await
.map_err(|e| Error::Io("Failed to create seccomp configuraiton".to_string(), e))?;
let s = itertools::join(seccomp.iter().map(|(k, v)| format!("{}: {}", k, v)), "\n");
f.write_all(s.as_bytes())
.await
.map_err(|e| Error::Io("Failed to write seccomp configuraiton".to_string(), e))?;
// Temporary disabled
// Must be called before parse_seccomp_filters
// jail.log_seccomp_filter_failures();
// let p: std::path::PathBuf = seccomp_config.into();
// jail.parse_seccomp_filters(p.as_path())
// .context("Failed parse seccomp config")?;
// jail.use_seccomp_filter();
}
// Configure UID
jail.change_uid(self.uid);
// Configure PID
jail.change_gid(self.gid);
// Update the capability mask if specified
if let Some(capabilities) = &manifest.capabilities {
// TODO: the capabilities should be passed as an array
jail.update_caps(&capabilities.join(" "))
.map_err(Error::Minijail)?;
}
// Update the supplementary group list if specified
if let Some(suppl_groups) = &manifest.suppl_groups {
// TODO: the groups should be passed an array
jail.update_suppl_groups(&suppl_groups.join(" "))
.map_err(Error::Minijail)?;
}
// TODO: Do not use pid namespace because of multithreadding
// issues discovered by minijail. See libminijail.c for details.
// Make the process enter a pid namespace
//jail.namespace_pids();
// Make the process enter a vfs namespace
jail.namespace_vfs();
// Set no_new_privs. See </kernel/seccomp.c> and </kernel/sys.c>
// in the kernel source tree for an explanation of the parameters.
jail.no_new_privs();
// Set chroot dir for process
jail.enter_chroot(&root.as_path())?;
// Make the application the init process
jail.run_as_init();
self.setup_mounts(&mut jail, container).await?;
// Arguments
let args = manifest.args.clone().unwrap_or_default();
let init_str = init.display().to_string();
let argv: Vec<&str> = iter::once(init_str.as_str())
.chain(args.iter().map(|s| s.as_str()))
.collect();
// Create environment for process. Set data directory, container name and version
let mut env = manifest.env.clone().unwrap_or_default();
env.insert(ENV_NAME.to_string(), manifest.name.to_string());
env.insert(ENV_VERSION.to_string(), manifest.version.to_string());
let env = env
.iter()
.map(|(k, v)| format!("{}={}", k, v))
.collect::<Vec<String>>();
let env = env.iter().map(|a| a.as_str()).collect::<Vec<&str>>();
debug!(
"Executing \"{}{}{}\"",
init.display(),
if args.len() > 1 { " " } else { "" },
argv.iter().skip(1).join(" ")
);
let stdout =
CaptureOutput::new(OutputStream::Stdout, &manifest.name, self.event_tx.clone()).await?;
let stderr =
CaptureOutput::new(OutputStream::Stderr, &manifest.name, self.event_tx.clone()).await?;
// Prevent minijail to close the log fd so that errors aren't missed
let log_fd = (self.log_fd, self.log_fd);
let pid = jail.run_remap_env_preload(
&init.as_path(),
&[(stdout.0, 1), (stderr.0, 2), log_fd],
&argv,
&env,
false,
)? as u32;
let (exit_handle_signal, exit_handle_wait) = exit_handle();
// Spawn a task thats waits for the child to exit
waitpid(
&manifest.name,
pid,
exit_handle_signal,
self.event_tx.clone(),
)
.await;
Ok(Process {
pid,
_jail: jail,
_tmpdir: tmpdir,
_stdout: stdout,
_stderr: stderr,
exit_handle_wait,
})
}
async fn setup_mounts(
&self,
jail: &mut MinijailHandle,
container: &Container,
) -> Result<(), Error> {
let proc = Path::new("/proc");
jail.mount_bind(&proc, &proc, false)
.map_err(Error::Minijail)?;
jail.remount_proc_readonly();
// If there's no explicit mount for /dev add a minimal variant
if!container
.manifest
.mounts
.contains_key(&PathBuf::from("/dev"))
{
debug!("Mounting minimal /dev");
jail.mount_dev();
}
for (target, mount) in &container.manifest.mounts {
match &mount {
Mount::Bind { host, flags } => {
if!&host.exists() {
warn!(
"Cannot bind mount nonexitent source {} to {}",
host.display(),
target.display()
);
continue;
}
let rw = flags.contains(&MountFlag::Rw);
debug!(
"Mounting {} on {}{}",
host.display(),
target.display(),
if rw { " (rw)" } else { "" }
);
jail.mount_bind(&host, &target, rw)
.map_err(Error::Minijail)?;
}
Mount::Persist => {
let dir = self.data_dir.join(&container.manifest.name);
if!dir.exists() {
debug!("Creating {}", dir.display());
fs::create_dir_all(&dir).await.map_err(|e| {
Error::Io(format!("Failed to create {}", dir.display()), e)
})?;
}
debug!("Chowning {} to {}:{}", dir.display(), self.uid, self.gid);
chown(
dir.as_os_str(),
Some(unistd::Uid::from_raw(self.uid)),
Some(unistd::Gid::from_raw(self.gid)),
)
.map_err(|e| {
Error::Os(
format!(
"Failed to chown {} to {}:{}",
dir.display(),
self.uid,
self.gid
),
e,
)
})?;
debug!("Mounting {} on {}", dir.display(), target.display(),);
jail.mount_bind(&dir, &target, true)
.map_err(Error::Minijail)?;
}
Mount::Resource { name, version, dir } => {
let src = {
// Join the source of the resource container with the mount dir
let resource_root = self.run_dir.join(&name).join(&version.to_string());
let dir = dir
.strip_prefix("/")
.map(|d| resource_root.join(d))
.unwrap_or(resource_root);
if!dir.exists() {
return Err(Error::Start(format!(
"Resource folder {} is missing",
dir.display()
)));
}
dir
};
debug!("Mounting {} on {}", src.display(), target.display());
jail.mount_bind(&src, &target, false)
.map_err(Error::Minijail)?;
}
Mount::Tmpfs { size } => {
debug!(
"Mounting tmpfs with size {} on {}",
bytesize::ByteSize::b(*size),
target.display()
);
let data = format!("size={},mode=1777", size);
jail.mount_with_data(&Path::new("none"), &target, "tmpfs", 0, &data)
.map_err(Error::Minijail)?;
}
Mount::Dev { r#type } => {
match r#type {
// The Full mount of /dev is a simple rw bind mount of /dev
Dev::Full => {
let dev = Path::new("/dev");
jail.mount_bind(&dev, &dev, true).map_err(Error::Minijail)?;
}
}
}
}
}
Ok(())
}
}
pub(crate) struct Process {
/// PID of this process
pid: u32,
/// Handle to a libminijail configuration
_jail: MinijailHandle,
/// Temporary directory created in the systems tmp folder.
/// This directory holds process instance specific data that needs
/// to be dumped to disk for startup. e.g seccomp config (TODO)
_tmpdir: tempfile::TempDir,
/// Captured stdout output
_stdout: CaptureOutput,
/// Captured stderr output
_stderr: CaptureOutput,
/// Rx part of the exit handle of this process
exit_handle_wait: ExitHandleWait,
}
impl fmt::Debug for Process {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Process").field("pid", &self.pid).finish()
}
}
impl Process {
pub fn pid(&self) -> Pid {
self.pid
}
pub async fn stop(&mut self, timeout: time::Duration) -> Result<ExitStatus, Error> {
// Send a SIGTERM to the application. If the application does not terminate with a timeout
// it is SIGKILLed.
let sigterm = signal::Signal::SIGTERM;
signal::kill(unistd::Pid::from_raw(self.pid as i32), Some(sigterm))
.map_err(|e| Error::Os(format!("Failed to SIGTERM {}", self.pid), e))?;
let timeout = Box::pin(time::sleep(timeout));
let exited = Box::pin(self.exit_handle_wait.recv());
let pid = self.pid;
Ok(select! {
s = exited => {
s.expect("Internal channel error during process termination") // This is the happy path...
},
_ = timeout => {
signal::kill(unistd::Pid::from_raw(pid as i32), Some(signal::Signal::SIGKILL))
.map_err(|e| Error::Os("Failed to kill process".to_string(), e))?;
ExitStatus::Signaled(signal::Signal::SIGKILL)
}
})
}
}
struct AsyncPipe {
inner: AsyncFd<std::fs::File>,
writefd: i32,
}
impl AsyncPipe {
fn new() -> Result<AsyncPipe, Error> {
let (readfd, writefd) =
pipe().map_err(|e| Error::Os("Failed to create pipe".to_string(), e))?;
let mut flags = OFlag::from_bits(fcntl(readfd, fcntl::FcntlArg::F_GETFL).unwrap()).unwrap();
flags.set(OFlag::O_NONBLOCK, true);
fcntl(readfd, fcntl::FcntlArg::F_SETFL(flags)).expect("Failed to configure pipe fd");
let pipe =
unsafe { <std::fs::File as std::os::unix::prelude::FromRawFd>::from_raw_fd(readfd) };
let inner = AsyncFd::new(pipe).map_err(|e| Error::Io("Async fd".to_string(), e))?;
Ok(AsyncPipe { inner, writefd })
}
fn writefd(&self) -> RawFd {
self.writefd
}
}
impl AsyncRead for AsyncPipe {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
loop {
let mut guard = futures::ready!(self.inner.poll_read_ready(cx))?;
match guard
.try_io(|inner| std::io::Read::read(&mut inner.get_ref(), buf.initialized_mut()))
{
Ok(Ok(n)) => {
buf.advance(n);
break Poll::Ready(Ok(()));
}
Ok(Err(e)) => break Poll::Ready(Err(e)),
Err(_would_block) => continue,
}
}
}
}
// Capture output of a child process. Create a pipe and spawn a task that forwards each line to
// the main loop. When this struct is dropped the internal spawned tasks are stopped.
#[derive(Debug)]
struct CaptureOutput(i32, oneshot::Sender<()>);
impl CaptureOutput {
pub async fn new(
stream: OutputStream,
tag: &str,
event_tx: EventTx,
) -> Result<CaptureOutput, Error> {
| {
&mut self.0
} | identifier_body |
minijail.rs | },
sys::signal,
unistd::{self, chown, pipe},
};
use npk::manifest::{Dev, Mount, MountFlag};
use std::{
fmt, iter, ops,
os::unix::prelude::RawFd,
path::{Path, PathBuf},
pin::Pin,
task::{Context, Poll},
};
use tokio::{
fs,
io::{self, unix::AsyncFd, AsyncBufReadExt, AsyncRead, AsyncWriteExt, ReadBuf},
select, task, time,
};
// We need a Send + Sync version of minijail::Minijail
struct MinijailHandle(::minijail::Minijail);
unsafe impl Send for MinijailHandle {}
unsafe impl Sync for MinijailHandle {}
impl ops::Deref for MinijailHandle {
type Target = ::minijail::Minijail;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl ops::DerefMut for MinijailHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Debug)]
pub struct Minijail {
log_fd: i32,
event_tx: EventTx,
run_dir: PathBuf,
data_dir: PathBuf,
uid: u32,
gid: u32,
}
impl Minijail {
pub(crate) fn new(
event_tx: EventTx,
run_dir: &Path,
data_dir: &Path,
uid: u32,
gid: u32,
) -> Result<Minijail, Error> {
let pipe = AsyncPipe::new()?;
let log_fd = pipe.writefd();
let mut lines = io::BufReader::new(pipe).lines();
// Spawn a task that forwards logs from minijail to the rust logger.
task::spawn(async move {
while let Ok(Some(line)) = lines.next_line().await {
let l = line.split_whitespace().skip(2).collect::<String>();
match line.chars().next() {
Some('D') => debug!("{}", l),
Some('I') => info!("{}", l),
Some('W') => warn!("{}", l),
Some('E') => error!("{}", l),
_ => trace!("{}", line),
}
}
});
let minijail_log_level = match log::max_level().to_level().unwrap_or(Level::Warn) {
Level::Error => 3,
Level::Warn => 4,
Level::Info => 6,
Level::Debug => 7,
Level::Trace => i32::MAX,
};
::minijail::Minijail::log_to_fd(log_fd, minijail_log_level as i32);
Ok(Minijail {
event_tx,
run_dir: run_dir.into(),
data_dir: data_dir.into(),
uid,
gid,
log_fd,
})
}
pub(crate) fn shutdown(&self) -> Result<(), Error> {
// Just make clippy happy
if false {
Err(Error::Stop)
} else {
Ok(())
}
}
pub(crate) async fn start(&self, container: &Container) -> Result<Process, Error> {
let root = &container.root;
let manifest = &container.manifest;
let mut jail = MinijailHandle(::minijail::Minijail::new().map_err(Error::Minijail)?);
let init = manifest
.init
.as_ref()
.ok_or_else(|| Error::Start("Cannot start a resource".to_string()))?;
let tmpdir = tempfile::TempDir::new()
.map_err(|e| Error::Io(format!("Failed to create tmpdir for {}", manifest.name), e))?;
let tmpdir_path = tmpdir.path();
// Dump seccomp config to process tmpdir. This is a subject to be changed since
// minijail provides a API to configure seccomp without writing to a file.
// TODO: configure seccomp via API instead of a file
if let Some(ref seccomp) = container.manifest.seccomp {
let seccomp_config = tmpdir_path.join("seccomp");
let mut f = fs::File::create(&seccomp_config)
.await
.map_err(|e| Error::Io("Failed to create seccomp configuraiton".to_string(), e))?;
let s = itertools::join(seccomp.iter().map(|(k, v)| format!("{}: {}", k, v)), "\n");
f.write_all(s.as_bytes())
.await
.map_err(|e| Error::Io("Failed to write seccomp configuraiton".to_string(), e))?;
// Temporary disabled
// Must be called before parse_seccomp_filters
// jail.log_seccomp_filter_failures();
// let p: std::path::PathBuf = seccomp_config.into();
// jail.parse_seccomp_filters(p.as_path())
// .context("Failed parse seccomp config")?;
// jail.use_seccomp_filter();
}
// Configure UID
jail.change_uid(self.uid);
// Configure PID
jail.change_gid(self.gid);
// Update the capability mask if specified
if let Some(capabilities) = &manifest.capabilities {
// TODO: the capabilities should be passed as an array
jail.update_caps(&capabilities.join(" "))
.map_err(Error::Minijail)?;
}
// Update the supplementary group list if specified
if let Some(suppl_groups) = &manifest.suppl_groups {
// TODO: the groups should be passed an array
jail.update_suppl_groups(&suppl_groups.join(" "))
.map_err(Error::Minijail)?;
}
// TODO: Do not use pid namespace because of multithreadding
// issues discovered by minijail. See libminijail.c for details.
// Make the process enter a pid namespace
//jail.namespace_pids();
// Make the process enter a vfs namespace
jail.namespace_vfs();
// Set no_new_privs. See </kernel/seccomp.c> and </kernel/sys.c>
// in the kernel source tree for an explanation of the parameters.
jail.no_new_privs();
// Set chroot dir for process
jail.enter_chroot(&root.as_path())?;
// Make the application the init process
jail.run_as_init();
self.setup_mounts(&mut jail, container).await?;
// Arguments
let args = manifest.args.clone().unwrap_or_default();
let init_str = init.display().to_string();
let argv: Vec<&str> = iter::once(init_str.as_str())
.chain(args.iter().map(|s| s.as_str()))
.collect();
// Create environment for process. Set data directory, container name and version
let mut env = manifest.env.clone().unwrap_or_default();
env.insert(ENV_NAME.to_string(), manifest.name.to_string());
env.insert(ENV_VERSION.to_string(), manifest.version.to_string());
let env = env
.iter()
.map(|(k, v)| format!("{}={}", k, v))
.collect::<Vec<String>>();
let env = env.iter().map(|a| a.as_str()).collect::<Vec<&str>>();
debug!(
"Executing \"{}{}{}\"",
init.display(),
if args.len() > 1 { " " } else { "" },
argv.iter().skip(1).join(" ")
);
let stdout =
CaptureOutput::new(OutputStream::Stdout, &manifest.name, self.event_tx.clone()).await?;
let stderr =
CaptureOutput::new(OutputStream::Stderr, &manifest.name, self.event_tx.clone()).await?;
// Prevent minijail to close the log fd so that errors aren't missed
let log_fd = (self.log_fd, self.log_fd);
let pid = jail.run_remap_env_preload(
&init.as_path(),
&[(stdout.0, 1), (stderr.0, 2), log_fd],
&argv,
&env,
false,
)? as u32;
let (exit_handle_signal, exit_handle_wait) = exit_handle();
// Spawn a task thats waits for the child to exit
waitpid(
&manifest.name,
pid,
exit_handle_signal,
self.event_tx.clone(),
)
.await;
Ok(Process {
pid,
_jail: jail,
_tmpdir: tmpdir,
_stdout: stdout,
_stderr: stderr,
exit_handle_wait,
})
}
| let proc = Path::new("/proc");
jail.mount_bind(&proc, &proc, false)
.map_err(Error::Minijail)?;
jail.remount_proc_readonly();
// If there's no explicit mount for /dev add a minimal variant
if!container
.manifest
.mounts
.contains_key(&PathBuf::from("/dev"))
{
debug!("Mounting minimal /dev");
jail.mount_dev();
}
for (target, mount) in &container.manifest.mounts {
match &mount {
Mount::Bind { host, flags } => {
if!&host.exists() {
warn!(
"Cannot bind mount nonexitent source {} to {}",
host.display(),
target.display()
);
continue;
}
let rw = flags.contains(&MountFlag::Rw);
debug!(
"Mounting {} on {}{}",
host.display(),
target.display(),
if rw { " (rw)" } else { "" }
);
jail.mount_bind(&host, &target, rw)
.map_err(Error::Minijail)?;
}
Mount::Persist => {
let dir = self.data_dir.join(&container.manifest.name);
if!dir.exists() {
debug!("Creating {}", dir.display());
fs::create_dir_all(&dir).await.map_err(|e| {
Error::Io(format!("Failed to create {}", dir.display()), e)
})?;
}
debug!("Chowning {} to {}:{}", dir.display(), self.uid, self.gid);
chown(
dir.as_os_str(),
Some(unistd::Uid::from_raw(self.uid)),
Some(unistd::Gid::from_raw(self.gid)),
)
.map_err(|e| {
Error::Os(
format!(
"Failed to chown {} to {}:{}",
dir.display(),
self.uid,
self.gid
),
e,
)
})?;
debug!("Mounting {} on {}", dir.display(), target.display(),);
jail.mount_bind(&dir, &target, true)
.map_err(Error::Minijail)?;
}
Mount::Resource { name, version, dir } => {
let src = {
// Join the source of the resource container with the mount dir
let resource_root = self.run_dir.join(&name).join(&version.to_string());
let dir = dir
.strip_prefix("/")
.map(|d| resource_root.join(d))
.unwrap_or(resource_root);
if!dir.exists() {
return Err(Error::Start(format!(
"Resource folder {} is missing",
dir.display()
)));
}
dir
};
debug!("Mounting {} on {}", src.display(), target.display());
jail.mount_bind(&src, &target, false)
.map_err(Error::Minijail)?;
}
Mount::Tmpfs { size } => {
debug!(
"Mounting tmpfs with size {} on {}",
bytesize::ByteSize::b(*size),
target.display()
);
let data = format!("size={},mode=1777", size);
jail.mount_with_data(&Path::new("none"), &target, "tmpfs", 0, &data)
.map_err(Error::Minijail)?;
}
Mount::Dev { r#type } => {
match r#type {
// The Full mount of /dev is a simple rw bind mount of /dev
Dev::Full => {
let dev = Path::new("/dev");
jail.mount_bind(&dev, &dev, true).map_err(Error::Minijail)?;
}
}
}
}
}
Ok(())
}
}
pub(crate) struct Process {
/// PID of this process
pid: u32,
/// Handle to a libminijail configuration
_jail: MinijailHandle,
/// Temporary directory created in the systems tmp folder.
/// This directory holds process instance specific data that needs
/// to be dumped to disk for startup. e.g seccomp config (TODO)
_tmpdir: tempfile::TempDir,
/// Captured stdout output
_stdout: CaptureOutput,
/// Captured stderr output
_stderr: CaptureOutput,
/// Rx part of the exit handle of this process
exit_handle_wait: ExitHandleWait,
}
impl fmt::Debug for Process {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Process").field("pid", &self.pid).finish()
}
}
impl Process {
pub fn pid(&self) -> Pid {
self.pid
}
pub async fn stop(&mut self, timeout: time::Duration) -> Result<ExitStatus, Error> {
// Send a SIGTERM to the application. If the application does not terminate with a timeout
// it is SIGKILLed.
let sigterm = signal::Signal::SIGTERM;
signal::kill(unistd::Pid::from_raw(self.pid as i32), Some(sigterm))
.map_err(|e| Error::Os(format!("Failed to SIGTERM {}", self.pid), e))?;
let timeout = Box::pin(time::sleep(timeout));
let exited = Box::pin(self.exit_handle_wait.recv());
let pid = self.pid;
Ok(select! {
s = exited => {
s.expect("Internal channel error during process termination") // This is the happy path...
},
_ = timeout => {
signal::kill(unistd::Pid::from_raw(pid as i32), Some(signal::Signal::SIGKILL))
.map_err(|e| Error::Os("Failed to kill process".to_string(), e))?;
ExitStatus::Signaled(signal::Signal::SIGKILL)
}
})
}
}
struct AsyncPipe {
inner: AsyncFd<std::fs::File>,
writefd: i32,
}
impl AsyncPipe {
fn new() -> Result<AsyncPipe, Error> {
let (readfd, writefd) =
pipe().map_err(|e| Error::Os("Failed to create pipe".to_string(), e))?;
let mut flags = OFlag::from_bits(fcntl(readfd, fcntl::FcntlArg::F_GETFL).unwrap()).unwrap();
flags.set(OFlag::O_NONBLOCK, true);
fcntl(readfd, fcntl::FcntlArg::F_SETFL(flags)).expect("Failed to configure pipe fd");
let pipe =
unsafe { <std::fs::File as std::os::unix::prelude::FromRawFd>::from_raw_fd(readfd) };
let inner = AsyncFd::new(pipe).map_err(|e| Error::Io("Async fd".to_string(), e))?;
Ok(AsyncPipe { inner, writefd })
}
fn writefd(&self) -> RawFd {
self.writefd
}
}
impl AsyncRead for AsyncPipe {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
loop {
let mut guard = futures::ready!(self.inner.poll_read_ready(cx))?;
match guard
.try_io(|inner| std::io::Read::read(&mut inner.get_ref(), buf.initialized_mut()))
{
Ok(Ok(n)) => {
buf.advance(n);
break Poll::Ready(Ok(()));
}
Ok(Err(e)) => break Poll::Ready(Err(e)),
Err(_would_block) => continue,
}
}
}
}
// Capture output of a child process. Create a pipe and spawn a task that forwards each line to
// the main loop. When this struct is dropped the internal spawned tasks are stopped.
#[derive(Debug)]
struct CaptureOutput(i32, oneshot::Sender<()>);
impl CaptureOutput {
pub async fn new(
stream: OutputStream,
tag: &str,
event_tx: EventTx,
) -> Result<CaptureOutput, Error> {
let pipe | async fn setup_mounts(
&self,
jail: &mut MinijailHandle,
container: &Container,
) -> Result<(), Error> { | random_line_split |
mod.rs | use std::error::Error as StdError;
use std::ffi::{CStr, CString};
use std::fmt;
use std::io;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::collections::HashSet;
use std::fs::File;
use digest::{Digest, FixedOutput};
use nix::fcntl::{fcntl, open, OFlag};
use nix::sys::stat::Mode;
use nix::unistd::{execveat, fork, lseek64, write, ForkResult, Gid, Pid, Uid, Whence};
use sha2::{Sha256, Sha512};
use sha3::{Sha3_512, Keccak512};
use tracing::{event, Level};
use users::{get_group_by_name, get_user_by_name};
use yscloud_config_model::ImageType;
use memfd::{MemFd, MemFdOptions, SealFlag};
use owned_fd::{OwnedFd, IntoOwnedFd};
use super::posix_imp::relabel_file_descriptors;
pub use super::posix_imp::run_reified;
use crate::{Void, AppPreforkConfiguration};
pub mod arch;
pub mod seccomp;
pub mod unshare;
pub mod container;
pub mod mount;
pub struct ContainerImage {
file: PathBuf,
mounts: Vec<(PathBuf, PathBuf)>,
}
impl ContainerImage {
pub fn start(&self) -> io::Result<Void> {
return Err(io::Error::new(io::ErrorKind::Other, "containers not yet implemented"));
}
}
#[derive(Debug)]
pub struct Executable {
file: OwnedFd,
}
impl fmt::Display for Executable {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "fd+executable:{}", self.file.as_raw_fd())
}
}
impl AsRawFd for Executable {
fn as_raw_fd(&self) -> RawFd {
self.file.as_raw_fd()
}
}
fn nix_error_to_io_error(err: nix::Error) -> io::Error {
match err {
nix::Error::Sys(syserr) => io::Error::from_raw_os_error(syserr as i32),
nix::Error::InvalidPath => io::Error::new(io::ErrorKind::Other, "Invalid path (nix)"),
nix::Error::InvalidUtf8 => io::Error::new(io::ErrorKind::Other, "Invalid UTF-8 (nix)"),
nix::Error::UnsupportedOperation => {
io::Error::new(io::ErrorKind::Other, "Unsupported operation (nix)")
}
}
}
pub struct ExecutableFactory {
storage: File,
expected_path: PathBuf,
disk_linked: bool,
}
impl ExecutableFactory {
pub fn new_unspecified(name: &str, capacity: i64) -> io::Result<ExecutableFactory> {
Self::new_in_memory(name, capacity)
}
pub fn new_in_memory(name: &str, capacity: i64) -> io::Result<ExecutableFactory> {
let mut mem_fd = MemFdOptions::new()
.cloexec(true)
.allow_sealing(true)
.with_capacity(capacity)
.set_mode(Mode::S_IRWXU | Mode::S_IRGRP | Mode::S_IXGRP | Mode::S_IROTH | Mode::S_IXOTH)
.open(name)
.map_err(|e| nix_error_to_io_error(e))?;
mem_fd
.seal(SealFlag::F_SEAL_SEAL | SealFlag::F_SEAL_SHRINK | SealFlag::F_SEAL_GROW)
.map_err(|e| nix_error_to_io_error(e))?;
Ok(ExecutableFactory {
storage: mem_fd.into(),
expected_path: Default::default(),
disk_linked: false,
})
}
pub fn new_on_disk(name: &str, capacity: i64, root: &Path) -> io::Result<ExecutableFactory> {
let mut full_path = root.to_owned();
full_path.push(name);
let storage = File::create(&full_path)?;
Ok(ExecutableFactory {
storage,
expected_path: full_path,
disk_linked: true,
})
}
pub fn finalize_container(self) -> ContainerImage {
assert!(self.disk_linked, "must be disk-linked");
ContainerImage {
file: self.expected_path,
mounts: Vec::new(),
}
}
pub fn finalize_executable(self) -> Executable {
Executable {
file: self.storage.into_owned_fd(),
}
}
}
impl io::Write for ExecutableFactory {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
self.storage.write(data)
}
fn flush(&mut self) -> io::Result<()> {
self.storage.flush()
}
}
impl Executable {
pub fn open<P>(path: P) -> io::Result<Executable>
where
P: AsRef<Path>,
{
let path: &Path = path.as_ref();
let artifact_file = open(path, OFlag::O_RDONLY | OFlag::O_CLOEXEC, Mode::empty())
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
Ok(Executable {
file: unsafe { OwnedFd::from_raw_fd(artifact_file) },
})
}
pub fn execute(&self, arguments: &[&CStr], env: &[&CStr]) -> io::Result<Void> {
use nix::fcntl::{AtFlags, FcntlArg, FdFlag};
fcntl(self.file.as_raw_fd(), FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
let program_name = CString::new("").unwrap();
execveat(
self.file.as_raw_fd(),
&program_name,
arguments,
env,
AtFlags::AT_EMPTY_PATH,
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// successful invokations of execveat don't return.
unreachable!();
}
}
pub const EXTENSION: &str = "";
#[cfg(target_arch = "x86_64")]
pub const PLATFORM_TRIPLES: &[&str] = &[
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
"x86_64-unknown-linux",
];
pub fn keep_hook(c: &AppPreforkConfiguration, keep_map: &mut [bool]) {
keep_map[c.artifact.0.as_raw_fd() as usize] = true;
}
pub trait SandboxingStrategy {
fn preexec(&self) -> io::Result<()>;
}
impl SandboxingStrategy for () {
fn preexec(&self) -> io::Result<()> {
Ok(())
}
}
pub struct UserChangeStrategy {
workdir: Option<PathBuf>,
set_user: Option<Uid>,
set_group: Option<Gid>,
seccomp_props: HashSet<&'static str>,
}
fn io_other<E>(e: E) -> io::Error
where | io::Error::new(io::ErrorKind::Other, e)
}
impl SandboxingStrategy for UserChangeStrategy {
fn preexec(&self) -> io::Result<()> {
if let Some(ref wd) = self.workdir {
std::fs::create_dir_all(wd)?;
nix::unistd::chown(wd, self.set_user, self.set_group)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
}
// seccomp::setup(&self.seccomp_props).unwrap();
if let Some(ref wd) = self.workdir {
event!(Level::INFO, "setting cwd = {}", wd.display());
nix::unistd::chdir(wd).map_err(io_other)?;
}
if!self.seccomp_props.contains("*") {
if!self.seccomp_props.contains("@filesystem") {
unshare::restrict_filesystem()?;
}
if!self.seccomp_props.contains("@network") {
unshare::restrict_network()?;
}
}
if let Some(gid) = self.set_group {
event!(Level::INFO, "setting gid = {:?}", gid);
nix::unistd::setgid(gid).map_err(io_other)?;
}
if let Some(uid) = self.set_user {
event!(Level::INFO, "setting uid = {:?}", uid);
nix::unistd::setuid(uid).map_err(io_other)?;
}
Ok(())
}
}
pub struct ExecExtras {
sandboxing_strategy: Option<Arc<dyn SandboxingStrategy>>,
}
impl ExecExtras {
pub fn builder() -> ExecExtrasBuilder {
let mut builder: ExecExtrasBuilder = Default::default();
builder.seccomp_props.insert("@network");
builder.seccomp_props.insert("*");
builder
}
}
#[derive(Default)]
pub struct ExecExtrasBuilder {
workdir: Option<PathBuf>,
set_user: Option<Uid>,
set_group: Option<Gid>,
seccomp_props: HashSet<&'static str>,
}
impl ExecExtrasBuilder {
pub fn set_user(&mut self, name: &str) -> io::Result<()> {
let uid = get_user_by_name(name).ok_or_else(|| {
let msg = format!("unknown user {}", name);
io::Error::new(io::ErrorKind::Other, msg)
})?;
self.set_user = Some(Uid::from_raw(uid.uid()));
Ok(())
}
pub fn set_group(&mut self, name: &str) -> io::Result<()> {
let gid = get_group_by_name(name).ok_or_else(|| {
let msg = format!("unknown group {}", name);
io::Error::new(io::ErrorKind::Other, msg)
})?;
self.set_group = Some(Gid::from_raw(gid.gid()));
Ok(())
}
pub fn set_workdir(&mut self, workdir: &Path) -> io::Result<()> {
self.workdir = Some(workdir.to_owned());
Ok(())
}
pub fn clear_seccomp_permission(&mut self) {
self.seccomp_props.clear();
}
pub fn add_seccomp_permission(&mut self, value: &'static str) {
self.seccomp_props.insert(value);
}
pub fn build(&self) -> ExecExtras {
let mut sandboxing_strategy = None;
if self.set_user.is_some() || self.set_group.is_some() {
let obj: Box<dyn SandboxingStrategy> = Box::new(UserChangeStrategy {
workdir: self.workdir.clone(),
set_user: self.set_user.clone(),
set_group: self.set_group.clone(),
seccomp_props: self.seccomp_props.clone(),
});
sandboxing_strategy = Some(obj.into());
}
ExecExtras {
sandboxing_strategy,
}
}
}
fn exec_artifact_child(ext: &ExecExtras, c: &AppPreforkConfiguration) -> io::Result<Void> {
let package_id = c.package_id.clone();
let app_config = relabel_file_descriptors(&c)?;
let tmpfile = open(
"/tmp",
OFlag::O_RDWR | OFlag::O_TMPFILE,
Mode::S_IRUSR | Mode::S_IWUSR,
)
.map_err(|e| {
event!(Level::WARN, "error opening temporary: {:?}", e);
io_other(e)
})?;
let data = serde_json::to_string(&app_config)?;
let data_len =
write(tmpfile, data.as_bytes()).map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
lseek64(tmpfile, 0, Whence::SeekSet)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
assert_eq!(data_len, data.len());
let tmpfile = format!("{}\0", tmpfile);
let arguments: &[&CStr] = &[
CStr::from_bytes_with_nul(b"yscloud-executable\0").unwrap(),
CStr::from_bytes_with_nul(b"--config-fd\0").unwrap(),
CStr::from_bytes_with_nul(tmpfile.as_bytes()).unwrap(),
];
event!(
Level::INFO,
"running {} {:?} -- {}",
package_id,
arguments,
data
);
if let Some(ref sandbox) = ext.sandboxing_strategy {
sandbox.preexec()?;
}
let env: &[&CStr] = &[
CStr::from_bytes_with_nul(b"RUST_BACKTRACE=1\0").unwrap(),
CStr::from_bytes_with_nul(b"YSCLOUD=1\0").unwrap(),
];
c.artifact.execute(arguments, env)?;
unreachable!();
}
pub fn exec_artifact(e: &ExecExtras, c: AppPreforkConfiguration) -> io::Result<Pid> {
match unsafe { fork() } {
Ok(ForkResult::Child) => {
if let Err(err) = exec_artifact_child(e, &c) {
event!(Level::WARN, "failed to execute: {:?}", err);
std::process::exit(1);
} else {
unreachable!();
}
}
Ok(ForkResult::Parent { child,.. }) => Ok(child),
Err(err) => Err(io::Error::new(io::ErrorKind::Other, err)),
}
} | E: Into<Box<dyn StdError + Send + Sync>>,
{ | random_line_split |
mod.rs | use std::error::Error as StdError;
use std::ffi::{CStr, CString};
use std::fmt;
use std::io;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::collections::HashSet;
use std::fs::File;
use digest::{Digest, FixedOutput};
use nix::fcntl::{fcntl, open, OFlag};
use nix::sys::stat::Mode;
use nix::unistd::{execveat, fork, lseek64, write, ForkResult, Gid, Pid, Uid, Whence};
use sha2::{Sha256, Sha512};
use sha3::{Sha3_512, Keccak512};
use tracing::{event, Level};
use users::{get_group_by_name, get_user_by_name};
use yscloud_config_model::ImageType;
use memfd::{MemFd, MemFdOptions, SealFlag};
use owned_fd::{OwnedFd, IntoOwnedFd};
use super::posix_imp::relabel_file_descriptors;
pub use super::posix_imp::run_reified;
use crate::{Void, AppPreforkConfiguration};
pub mod arch;
pub mod seccomp;
pub mod unshare;
pub mod container;
pub mod mount;
pub struct ContainerImage {
file: PathBuf,
mounts: Vec<(PathBuf, PathBuf)>,
}
impl ContainerImage {
pub fn start(&self) -> io::Result<Void> {
return Err(io::Error::new(io::ErrorKind::Other, "containers not yet implemented"));
}
}
#[derive(Debug)]
pub struct Executable {
file: OwnedFd,
}
impl fmt::Display for Executable {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "fd+executable:{}", self.file.as_raw_fd())
}
}
impl AsRawFd for Executable {
fn as_raw_fd(&self) -> RawFd {
self.file.as_raw_fd()
}
}
fn nix_error_to_io_error(err: nix::Error) -> io::Error {
match err {
nix::Error::Sys(syserr) => io::Error::from_raw_os_error(syserr as i32),
nix::Error::InvalidPath => io::Error::new(io::ErrorKind::Other, "Invalid path (nix)"),
nix::Error::InvalidUtf8 => io::Error::new(io::ErrorKind::Other, "Invalid UTF-8 (nix)"),
nix::Error::UnsupportedOperation => {
io::Error::new(io::ErrorKind::Other, "Unsupported operation (nix)")
}
}
}
pub struct ExecutableFactory {
storage: File,
expected_path: PathBuf,
disk_linked: bool,
}
impl ExecutableFactory {
pub fn new_unspecified(name: &str, capacity: i64) -> io::Result<ExecutableFactory> {
Self::new_in_memory(name, capacity)
}
pub fn new_in_memory(name: &str, capacity: i64) -> io::Result<ExecutableFactory> {
let mut mem_fd = MemFdOptions::new()
.cloexec(true)
.allow_sealing(true)
.with_capacity(capacity)
.set_mode(Mode::S_IRWXU | Mode::S_IRGRP | Mode::S_IXGRP | Mode::S_IROTH | Mode::S_IXOTH)
.open(name)
.map_err(|e| nix_error_to_io_error(e))?;
mem_fd
.seal(SealFlag::F_SEAL_SEAL | SealFlag::F_SEAL_SHRINK | SealFlag::F_SEAL_GROW)
.map_err(|e| nix_error_to_io_error(e))?;
Ok(ExecutableFactory {
storage: mem_fd.into(),
expected_path: Default::default(),
disk_linked: false,
})
}
pub fn new_on_disk(name: &str, capacity: i64, root: &Path) -> io::Result<ExecutableFactory> {
let mut full_path = root.to_owned();
full_path.push(name);
let storage = File::create(&full_path)?;
Ok(ExecutableFactory {
storage,
expected_path: full_path,
disk_linked: true,
})
}
pub fn | (self) -> ContainerImage {
assert!(self.disk_linked, "must be disk-linked");
ContainerImage {
file: self.expected_path,
mounts: Vec::new(),
}
}
pub fn finalize_executable(self) -> Executable {
Executable {
file: self.storage.into_owned_fd(),
}
}
}
impl io::Write for ExecutableFactory {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
self.storage.write(data)
}
fn flush(&mut self) -> io::Result<()> {
self.storage.flush()
}
}
impl Executable {
pub fn open<P>(path: P) -> io::Result<Executable>
where
P: AsRef<Path>,
{
let path: &Path = path.as_ref();
let artifact_file = open(path, OFlag::O_RDONLY | OFlag::O_CLOEXEC, Mode::empty())
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
Ok(Executable {
file: unsafe { OwnedFd::from_raw_fd(artifact_file) },
})
}
pub fn execute(&self, arguments: &[&CStr], env: &[&CStr]) -> io::Result<Void> {
use nix::fcntl::{AtFlags, FcntlArg, FdFlag};
fcntl(self.file.as_raw_fd(), FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
let program_name = CString::new("").unwrap();
execveat(
self.file.as_raw_fd(),
&program_name,
arguments,
env,
AtFlags::AT_EMPTY_PATH,
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// successful invokations of execveat don't return.
unreachable!();
}
}
pub const EXTENSION: &str = "";
#[cfg(target_arch = "x86_64")]
pub const PLATFORM_TRIPLES: &[&str] = &[
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
"x86_64-unknown-linux",
];
pub fn keep_hook(c: &AppPreforkConfiguration, keep_map: &mut [bool]) {
keep_map[c.artifact.0.as_raw_fd() as usize] = true;
}
pub trait SandboxingStrategy {
fn preexec(&self) -> io::Result<()>;
}
impl SandboxingStrategy for () {
fn preexec(&self) -> io::Result<()> {
Ok(())
}
}
pub struct UserChangeStrategy {
workdir: Option<PathBuf>,
set_user: Option<Uid>,
set_group: Option<Gid>,
seccomp_props: HashSet<&'static str>,
}
fn io_other<E>(e: E) -> io::Error
where
E: Into<Box<dyn StdError + Send + Sync>>,
{
io::Error::new(io::ErrorKind::Other, e)
}
impl SandboxingStrategy for UserChangeStrategy {
fn preexec(&self) -> io::Result<()> {
if let Some(ref wd) = self.workdir {
std::fs::create_dir_all(wd)?;
nix::unistd::chown(wd, self.set_user, self.set_group)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
}
// seccomp::setup(&self.seccomp_props).unwrap();
if let Some(ref wd) = self.workdir {
event!(Level::INFO, "setting cwd = {}", wd.display());
nix::unistd::chdir(wd).map_err(io_other)?;
}
if!self.seccomp_props.contains("*") {
if!self.seccomp_props.contains("@filesystem") {
unshare::restrict_filesystem()?;
}
if!self.seccomp_props.contains("@network") {
unshare::restrict_network()?;
}
}
if let Some(gid) = self.set_group {
event!(Level::INFO, "setting gid = {:?}", gid);
nix::unistd::setgid(gid).map_err(io_other)?;
}
if let Some(uid) = self.set_user {
event!(Level::INFO, "setting uid = {:?}", uid);
nix::unistd::setuid(uid).map_err(io_other)?;
}
Ok(())
}
}
pub struct ExecExtras {
sandboxing_strategy: Option<Arc<dyn SandboxingStrategy>>,
}
impl ExecExtras {
pub fn builder() -> ExecExtrasBuilder {
let mut builder: ExecExtrasBuilder = Default::default();
builder.seccomp_props.insert("@network");
builder.seccomp_props.insert("*");
builder
}
}
#[derive(Default)]
pub struct ExecExtrasBuilder {
workdir: Option<PathBuf>,
set_user: Option<Uid>,
set_group: Option<Gid>,
seccomp_props: HashSet<&'static str>,
}
impl ExecExtrasBuilder {
pub fn set_user(&mut self, name: &str) -> io::Result<()> {
let uid = get_user_by_name(name).ok_or_else(|| {
let msg = format!("unknown user {}", name);
io::Error::new(io::ErrorKind::Other, msg)
})?;
self.set_user = Some(Uid::from_raw(uid.uid()));
Ok(())
}
pub fn set_group(&mut self, name: &str) -> io::Result<()> {
let gid = get_group_by_name(name).ok_or_else(|| {
let msg = format!("unknown group {}", name);
io::Error::new(io::ErrorKind::Other, msg)
})?;
self.set_group = Some(Gid::from_raw(gid.gid()));
Ok(())
}
pub fn set_workdir(&mut self, workdir: &Path) -> io::Result<()> {
self.workdir = Some(workdir.to_owned());
Ok(())
}
pub fn clear_seccomp_permission(&mut self) {
self.seccomp_props.clear();
}
pub fn add_seccomp_permission(&mut self, value: &'static str) {
self.seccomp_props.insert(value);
}
pub fn build(&self) -> ExecExtras {
let mut sandboxing_strategy = None;
if self.set_user.is_some() || self.set_group.is_some() {
let obj: Box<dyn SandboxingStrategy> = Box::new(UserChangeStrategy {
workdir: self.workdir.clone(),
set_user: self.set_user.clone(),
set_group: self.set_group.clone(),
seccomp_props: self.seccomp_props.clone(),
});
sandboxing_strategy = Some(obj.into());
}
ExecExtras {
sandboxing_strategy,
}
}
}
fn exec_artifact_child(ext: &ExecExtras, c: &AppPreforkConfiguration) -> io::Result<Void> {
let package_id = c.package_id.clone();
let app_config = relabel_file_descriptors(&c)?;
let tmpfile = open(
"/tmp",
OFlag::O_RDWR | OFlag::O_TMPFILE,
Mode::S_IRUSR | Mode::S_IWUSR,
)
.map_err(|e| {
event!(Level::WARN, "error opening temporary: {:?}", e);
io_other(e)
})?;
let data = serde_json::to_string(&app_config)?;
let data_len =
write(tmpfile, data.as_bytes()).map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
lseek64(tmpfile, 0, Whence::SeekSet)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
assert_eq!(data_len, data.len());
let tmpfile = format!("{}\0", tmpfile);
let arguments: &[&CStr] = &[
CStr::from_bytes_with_nul(b"yscloud-executable\0").unwrap(),
CStr::from_bytes_with_nul(b"--config-fd\0").unwrap(),
CStr::from_bytes_with_nul(tmpfile.as_bytes()).unwrap(),
];
event!(
Level::INFO,
"running {} {:?} -- {}",
package_id,
arguments,
data
);
if let Some(ref sandbox) = ext.sandboxing_strategy {
sandbox.preexec()?;
}
let env: &[&CStr] = &[
CStr::from_bytes_with_nul(b"RUST_BACKTRACE=1\0").unwrap(),
CStr::from_bytes_with_nul(b"YSCLOUD=1\0").unwrap(),
];
c.artifact.execute(arguments, env)?;
unreachable!();
}
pub fn exec_artifact(e: &ExecExtras, c: AppPreforkConfiguration) -> io::Result<Pid> {
match unsafe { fork() } {
Ok(ForkResult::Child) => {
if let Err(err) = exec_artifact_child(e, &c) {
event!(Level::WARN, "failed to execute: {:?}", err);
std::process::exit(1);
} else {
unreachable!();
}
}
Ok(ForkResult::Parent { child,.. }) => Ok(child),
Err(err) => Err(io::Error::new(io::ErrorKind::Other, err)),
}
}
| finalize_container | identifier_name |
mod.rs | use std::error::Error as StdError;
use std::ffi::{CStr, CString};
use std::fmt;
use std::io;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::collections::HashSet;
use std::fs::File;
use digest::{Digest, FixedOutput};
use nix::fcntl::{fcntl, open, OFlag};
use nix::sys::stat::Mode;
use nix::unistd::{execveat, fork, lseek64, write, ForkResult, Gid, Pid, Uid, Whence};
use sha2::{Sha256, Sha512};
use sha3::{Sha3_512, Keccak512};
use tracing::{event, Level};
use users::{get_group_by_name, get_user_by_name};
use yscloud_config_model::ImageType;
use memfd::{MemFd, MemFdOptions, SealFlag};
use owned_fd::{OwnedFd, IntoOwnedFd};
use super::posix_imp::relabel_file_descriptors;
pub use super::posix_imp::run_reified;
use crate::{Void, AppPreforkConfiguration};
pub mod arch;
pub mod seccomp;
pub mod unshare;
pub mod container;
pub mod mount;
pub struct ContainerImage {
file: PathBuf,
mounts: Vec<(PathBuf, PathBuf)>,
}
impl ContainerImage {
pub fn start(&self) -> io::Result<Void> {
return Err(io::Error::new(io::ErrorKind::Other, "containers not yet implemented"));
}
}
#[derive(Debug)]
pub struct Executable {
file: OwnedFd,
}
impl fmt::Display for Executable {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "fd+executable:{}", self.file.as_raw_fd())
}
}
impl AsRawFd for Executable {
fn as_raw_fd(&self) -> RawFd {
self.file.as_raw_fd()
}
}
fn nix_error_to_io_error(err: nix::Error) -> io::Error {
match err {
nix::Error::Sys(syserr) => io::Error::from_raw_os_error(syserr as i32),
nix::Error::InvalidPath => io::Error::new(io::ErrorKind::Other, "Invalid path (nix)"),
nix::Error::InvalidUtf8 => io::Error::new(io::ErrorKind::Other, "Invalid UTF-8 (nix)"),
nix::Error::UnsupportedOperation => {
io::Error::new(io::ErrorKind::Other, "Unsupported operation (nix)")
}
}
}
pub struct ExecutableFactory {
storage: File,
expected_path: PathBuf,
disk_linked: bool,
}
impl ExecutableFactory {
pub fn new_unspecified(name: &str, capacity: i64) -> io::Result<ExecutableFactory> {
Self::new_in_memory(name, capacity)
}
pub fn new_in_memory(name: &str, capacity: i64) -> io::Result<ExecutableFactory> {
let mut mem_fd = MemFdOptions::new()
.cloexec(true)
.allow_sealing(true)
.with_capacity(capacity)
.set_mode(Mode::S_IRWXU | Mode::S_IRGRP | Mode::S_IXGRP | Mode::S_IROTH | Mode::S_IXOTH)
.open(name)
.map_err(|e| nix_error_to_io_error(e))?;
mem_fd
.seal(SealFlag::F_SEAL_SEAL | SealFlag::F_SEAL_SHRINK | SealFlag::F_SEAL_GROW)
.map_err(|e| nix_error_to_io_error(e))?;
Ok(ExecutableFactory {
storage: mem_fd.into(),
expected_path: Default::default(),
disk_linked: false,
})
}
pub fn new_on_disk(name: &str, capacity: i64, root: &Path) -> io::Result<ExecutableFactory> {
let mut full_path = root.to_owned();
full_path.push(name);
let storage = File::create(&full_path)?;
Ok(ExecutableFactory {
storage,
expected_path: full_path,
disk_linked: true,
})
}
pub fn finalize_container(self) -> ContainerImage {
assert!(self.disk_linked, "must be disk-linked");
ContainerImage {
file: self.expected_path,
mounts: Vec::new(),
}
}
pub fn finalize_executable(self) -> Executable {
Executable {
file: self.storage.into_owned_fd(),
}
}
}
impl io::Write for ExecutableFactory {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
self.storage.write(data)
}
fn flush(&mut self) -> io::Result<()> {
self.storage.flush()
}
}
impl Executable {
pub fn open<P>(path: P) -> io::Result<Executable>
where
P: AsRef<Path>,
{
let path: &Path = path.as_ref();
let artifact_file = open(path, OFlag::O_RDONLY | OFlag::O_CLOEXEC, Mode::empty())
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
Ok(Executable {
file: unsafe { OwnedFd::from_raw_fd(artifact_file) },
})
}
pub fn execute(&self, arguments: &[&CStr], env: &[&CStr]) -> io::Result<Void> {
use nix::fcntl::{AtFlags, FcntlArg, FdFlag};
fcntl(self.file.as_raw_fd(), FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
let program_name = CString::new("").unwrap();
execveat(
self.file.as_raw_fd(),
&program_name,
arguments,
env,
AtFlags::AT_EMPTY_PATH,
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// successful invokations of execveat don't return.
unreachable!();
}
}
pub const EXTENSION: &str = "";
#[cfg(target_arch = "x86_64")]
pub const PLATFORM_TRIPLES: &[&str] = &[
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
"x86_64-unknown-linux",
];
pub fn keep_hook(c: &AppPreforkConfiguration, keep_map: &mut [bool]) {
keep_map[c.artifact.0.as_raw_fd() as usize] = true;
}
pub trait SandboxingStrategy {
fn preexec(&self) -> io::Result<()>;
}
impl SandboxingStrategy for () {
fn preexec(&self) -> io::Result<()> {
Ok(())
}
}
pub struct UserChangeStrategy {
workdir: Option<PathBuf>,
set_user: Option<Uid>,
set_group: Option<Gid>,
seccomp_props: HashSet<&'static str>,
}
fn io_other<E>(e: E) -> io::Error
where
E: Into<Box<dyn StdError + Send + Sync>>,
{
io::Error::new(io::ErrorKind::Other, e)
}
impl SandboxingStrategy for UserChangeStrategy {
fn preexec(&self) -> io::Result<()> {
if let Some(ref wd) = self.workdir {
std::fs::create_dir_all(wd)?;
nix::unistd::chown(wd, self.set_user, self.set_group)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
}
// seccomp::setup(&self.seccomp_props).unwrap();
if let Some(ref wd) = self.workdir {
event!(Level::INFO, "setting cwd = {}", wd.display());
nix::unistd::chdir(wd).map_err(io_other)?;
}
if!self.seccomp_props.contains("*") {
if!self.seccomp_props.contains("@filesystem") {
unshare::restrict_filesystem()?;
}
if!self.seccomp_props.contains("@network") {
unshare::restrict_network()?;
}
}
if let Some(gid) = self.set_group {
event!(Level::INFO, "setting gid = {:?}", gid);
nix::unistd::setgid(gid).map_err(io_other)?;
}
if let Some(uid) = self.set_user {
event!(Level::INFO, "setting uid = {:?}", uid);
nix::unistd::setuid(uid).map_err(io_other)?;
}
Ok(())
}
}
pub struct ExecExtras {
sandboxing_strategy: Option<Arc<dyn SandboxingStrategy>>,
}
impl ExecExtras {
pub fn builder() -> ExecExtrasBuilder {
let mut builder: ExecExtrasBuilder = Default::default();
builder.seccomp_props.insert("@network");
builder.seccomp_props.insert("*");
builder
}
}
#[derive(Default)]
pub struct ExecExtrasBuilder {
workdir: Option<PathBuf>,
set_user: Option<Uid>,
set_group: Option<Gid>,
seccomp_props: HashSet<&'static str>,
}
impl ExecExtrasBuilder {
pub fn set_user(&mut self, name: &str) -> io::Result<()> {
let uid = get_user_by_name(name).ok_or_else(|| {
let msg = format!("unknown user {}", name);
io::Error::new(io::ErrorKind::Other, msg)
})?;
self.set_user = Some(Uid::from_raw(uid.uid()));
Ok(())
}
pub fn set_group(&mut self, name: &str) -> io::Result<()> {
let gid = get_group_by_name(name).ok_or_else(|| {
let msg = format!("unknown group {}", name);
io::Error::new(io::ErrorKind::Other, msg)
})?;
self.set_group = Some(Gid::from_raw(gid.gid()));
Ok(())
}
pub fn set_workdir(&mut self, workdir: &Path) -> io::Result<()> {
self.workdir = Some(workdir.to_owned());
Ok(())
}
pub fn clear_seccomp_permission(&mut self) {
self.seccomp_props.clear();
}
pub fn add_seccomp_permission(&mut self, value: &'static str) {
self.seccomp_props.insert(value);
}
pub fn build(&self) -> ExecExtras {
let mut sandboxing_strategy = None;
if self.set_user.is_some() || self.set_group.is_some() {
let obj: Box<dyn SandboxingStrategy> = Box::new(UserChangeStrategy {
workdir: self.workdir.clone(),
set_user: self.set_user.clone(),
set_group: self.set_group.clone(),
seccomp_props: self.seccomp_props.clone(),
});
sandboxing_strategy = Some(obj.into());
}
ExecExtras {
sandboxing_strategy,
}
}
}
fn exec_artifact_child(ext: &ExecExtras, c: &AppPreforkConfiguration) -> io::Result<Void> {
let package_id = c.package_id.clone();
let app_config = relabel_file_descriptors(&c)?;
let tmpfile = open(
"/tmp",
OFlag::O_RDWR | OFlag::O_TMPFILE,
Mode::S_IRUSR | Mode::S_IWUSR,
)
.map_err(|e| {
event!(Level::WARN, "error opening temporary: {:?}", e);
io_other(e)
})?;
let data = serde_json::to_string(&app_config)?;
let data_len =
write(tmpfile, data.as_bytes()).map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
lseek64(tmpfile, 0, Whence::SeekSet)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
assert_eq!(data_len, data.len());
let tmpfile = format!("{}\0", tmpfile);
let arguments: &[&CStr] = &[
CStr::from_bytes_with_nul(b"yscloud-executable\0").unwrap(),
CStr::from_bytes_with_nul(b"--config-fd\0").unwrap(),
CStr::from_bytes_with_nul(tmpfile.as_bytes()).unwrap(),
];
event!(
Level::INFO,
"running {} {:?} -- {}",
package_id,
arguments,
data
);
if let Some(ref sandbox) = ext.sandboxing_strategy {
sandbox.preexec()?;
}
let env: &[&CStr] = &[
CStr::from_bytes_with_nul(b"RUST_BACKTRACE=1\0").unwrap(),
CStr::from_bytes_with_nul(b"YSCLOUD=1\0").unwrap(),
];
c.artifact.execute(arguments, env)?;
unreachable!();
}
pub fn exec_artifact(e: &ExecExtras, c: AppPreforkConfiguration) -> io::Result<Pid> {
match unsafe { fork() } {
Ok(ForkResult::Child) => |
Ok(ForkResult::Parent { child,.. }) => Ok(child),
Err(err) => Err(io::Error::new(io::ErrorKind::Other, err)),
}
}
| {
if let Err(err) = exec_artifact_child(e, &c) {
event!(Level::WARN, "failed to execute: {:?}", err);
std::process::exit(1);
} else {
unreachable!();
}
} | conditional_block |
mod.rs | use std::error::Error as StdError;
use std::ffi::{CStr, CString};
use std::fmt;
use std::io;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::collections::HashSet;
use std::fs::File;
use digest::{Digest, FixedOutput};
use nix::fcntl::{fcntl, open, OFlag};
use nix::sys::stat::Mode;
use nix::unistd::{execveat, fork, lseek64, write, ForkResult, Gid, Pid, Uid, Whence};
use sha2::{Sha256, Sha512};
use sha3::{Sha3_512, Keccak512};
use tracing::{event, Level};
use users::{get_group_by_name, get_user_by_name};
use yscloud_config_model::ImageType;
use memfd::{MemFd, MemFdOptions, SealFlag};
use owned_fd::{OwnedFd, IntoOwnedFd};
use super::posix_imp::relabel_file_descriptors;
pub use super::posix_imp::run_reified;
use crate::{Void, AppPreforkConfiguration};
pub mod arch;
pub mod seccomp;
pub mod unshare;
pub mod container;
pub mod mount;
pub struct ContainerImage {
file: PathBuf,
mounts: Vec<(PathBuf, PathBuf)>,
}
impl ContainerImage {
pub fn start(&self) -> io::Result<Void> {
return Err(io::Error::new(io::ErrorKind::Other, "containers not yet implemented"));
}
}
#[derive(Debug)]
pub struct Executable {
file: OwnedFd,
}
impl fmt::Display for Executable {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "fd+executable:{}", self.file.as_raw_fd())
}
}
impl AsRawFd for Executable {
fn as_raw_fd(&self) -> RawFd {
self.file.as_raw_fd()
}
}
fn nix_error_to_io_error(err: nix::Error) -> io::Error {
match err {
nix::Error::Sys(syserr) => io::Error::from_raw_os_error(syserr as i32),
nix::Error::InvalidPath => io::Error::new(io::ErrorKind::Other, "Invalid path (nix)"),
nix::Error::InvalidUtf8 => io::Error::new(io::ErrorKind::Other, "Invalid UTF-8 (nix)"),
nix::Error::UnsupportedOperation => {
io::Error::new(io::ErrorKind::Other, "Unsupported operation (nix)")
}
}
}
pub struct ExecutableFactory {
storage: File,
expected_path: PathBuf,
disk_linked: bool,
}
impl ExecutableFactory {
pub fn new_unspecified(name: &str, capacity: i64) -> io::Result<ExecutableFactory> {
Self::new_in_memory(name, capacity)
}
pub fn new_in_memory(name: &str, capacity: i64) -> io::Result<ExecutableFactory> |
pub fn new_on_disk(name: &str, capacity: i64, root: &Path) -> io::Result<ExecutableFactory> {
let mut full_path = root.to_owned();
full_path.push(name);
let storage = File::create(&full_path)?;
Ok(ExecutableFactory {
storage,
expected_path: full_path,
disk_linked: true,
})
}
pub fn finalize_container(self) -> ContainerImage {
assert!(self.disk_linked, "must be disk-linked");
ContainerImage {
file: self.expected_path,
mounts: Vec::new(),
}
}
pub fn finalize_executable(self) -> Executable {
Executable {
file: self.storage.into_owned_fd(),
}
}
}
impl io::Write for ExecutableFactory {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
self.storage.write(data)
}
fn flush(&mut self) -> io::Result<()> {
self.storage.flush()
}
}
impl Executable {
pub fn open<P>(path: P) -> io::Result<Executable>
where
P: AsRef<Path>,
{
let path: &Path = path.as_ref();
let artifact_file = open(path, OFlag::O_RDONLY | OFlag::O_CLOEXEC, Mode::empty())
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
Ok(Executable {
file: unsafe { OwnedFd::from_raw_fd(artifact_file) },
})
}
pub fn execute(&self, arguments: &[&CStr], env: &[&CStr]) -> io::Result<Void> {
use nix::fcntl::{AtFlags, FcntlArg, FdFlag};
fcntl(self.file.as_raw_fd(), FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
let program_name = CString::new("").unwrap();
execveat(
self.file.as_raw_fd(),
&program_name,
arguments,
env,
AtFlags::AT_EMPTY_PATH,
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// successful invokations of execveat don't return.
unreachable!();
}
}
pub const EXTENSION: &str = "";
#[cfg(target_arch = "x86_64")]
pub const PLATFORM_TRIPLES: &[&str] = &[
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
"x86_64-unknown-linux",
];
pub fn keep_hook(c: &AppPreforkConfiguration, keep_map: &mut [bool]) {
keep_map[c.artifact.0.as_raw_fd() as usize] = true;
}
pub trait SandboxingStrategy {
fn preexec(&self) -> io::Result<()>;
}
impl SandboxingStrategy for () {
fn preexec(&self) -> io::Result<()> {
Ok(())
}
}
pub struct UserChangeStrategy {
workdir: Option<PathBuf>,
set_user: Option<Uid>,
set_group: Option<Gid>,
seccomp_props: HashSet<&'static str>,
}
fn io_other<E>(e: E) -> io::Error
where
E: Into<Box<dyn StdError + Send + Sync>>,
{
io::Error::new(io::ErrorKind::Other, e)
}
impl SandboxingStrategy for UserChangeStrategy {
fn preexec(&self) -> io::Result<()> {
if let Some(ref wd) = self.workdir {
std::fs::create_dir_all(wd)?;
nix::unistd::chown(wd, self.set_user, self.set_group)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
}
// seccomp::setup(&self.seccomp_props).unwrap();
if let Some(ref wd) = self.workdir {
event!(Level::INFO, "setting cwd = {}", wd.display());
nix::unistd::chdir(wd).map_err(io_other)?;
}
if!self.seccomp_props.contains("*") {
if!self.seccomp_props.contains("@filesystem") {
unshare::restrict_filesystem()?;
}
if!self.seccomp_props.contains("@network") {
unshare::restrict_network()?;
}
}
if let Some(gid) = self.set_group {
event!(Level::INFO, "setting gid = {:?}", gid);
nix::unistd::setgid(gid).map_err(io_other)?;
}
if let Some(uid) = self.set_user {
event!(Level::INFO, "setting uid = {:?}", uid);
nix::unistd::setuid(uid).map_err(io_other)?;
}
Ok(())
}
}
pub struct ExecExtras {
sandboxing_strategy: Option<Arc<dyn SandboxingStrategy>>,
}
impl ExecExtras {
pub fn builder() -> ExecExtrasBuilder {
let mut builder: ExecExtrasBuilder = Default::default();
builder.seccomp_props.insert("@network");
builder.seccomp_props.insert("*");
builder
}
}
#[derive(Default)]
pub struct ExecExtrasBuilder {
workdir: Option<PathBuf>,
set_user: Option<Uid>,
set_group: Option<Gid>,
seccomp_props: HashSet<&'static str>,
}
impl ExecExtrasBuilder {
pub fn set_user(&mut self, name: &str) -> io::Result<()> {
let uid = get_user_by_name(name).ok_or_else(|| {
let msg = format!("unknown user {}", name);
io::Error::new(io::ErrorKind::Other, msg)
})?;
self.set_user = Some(Uid::from_raw(uid.uid()));
Ok(())
}
pub fn set_group(&mut self, name: &str) -> io::Result<()> {
let gid = get_group_by_name(name).ok_or_else(|| {
let msg = format!("unknown group {}", name);
io::Error::new(io::ErrorKind::Other, msg)
})?;
self.set_group = Some(Gid::from_raw(gid.gid()));
Ok(())
}
pub fn set_workdir(&mut self, workdir: &Path) -> io::Result<()> {
self.workdir = Some(workdir.to_owned());
Ok(())
}
pub fn clear_seccomp_permission(&mut self) {
self.seccomp_props.clear();
}
pub fn add_seccomp_permission(&mut self, value: &'static str) {
self.seccomp_props.insert(value);
}
pub fn build(&self) -> ExecExtras {
let mut sandboxing_strategy = None;
if self.set_user.is_some() || self.set_group.is_some() {
let obj: Box<dyn SandboxingStrategy> = Box::new(UserChangeStrategy {
workdir: self.workdir.clone(),
set_user: self.set_user.clone(),
set_group: self.set_group.clone(),
seccomp_props: self.seccomp_props.clone(),
});
sandboxing_strategy = Some(obj.into());
}
ExecExtras {
sandboxing_strategy,
}
}
}
fn exec_artifact_child(ext: &ExecExtras, c: &AppPreforkConfiguration) -> io::Result<Void> {
let package_id = c.package_id.clone();
let app_config = relabel_file_descriptors(&c)?;
let tmpfile = open(
"/tmp",
OFlag::O_RDWR | OFlag::O_TMPFILE,
Mode::S_IRUSR | Mode::S_IWUSR,
)
.map_err(|e| {
event!(Level::WARN, "error opening temporary: {:?}", e);
io_other(e)
})?;
let data = serde_json::to_string(&app_config)?;
let data_len =
write(tmpfile, data.as_bytes()).map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
lseek64(tmpfile, 0, Whence::SeekSet)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
assert_eq!(data_len, data.len());
let tmpfile = format!("{}\0", tmpfile);
let arguments: &[&CStr] = &[
CStr::from_bytes_with_nul(b"yscloud-executable\0").unwrap(),
CStr::from_bytes_with_nul(b"--config-fd\0").unwrap(),
CStr::from_bytes_with_nul(tmpfile.as_bytes()).unwrap(),
];
event!(
Level::INFO,
"running {} {:?} -- {}",
package_id,
arguments,
data
);
if let Some(ref sandbox) = ext.sandboxing_strategy {
sandbox.preexec()?;
}
let env: &[&CStr] = &[
CStr::from_bytes_with_nul(b"RUST_BACKTRACE=1\0").unwrap(),
CStr::from_bytes_with_nul(b"YSCLOUD=1\0").unwrap(),
];
c.artifact.execute(arguments, env)?;
unreachable!();
}
pub fn exec_artifact(e: &ExecExtras, c: AppPreforkConfiguration) -> io::Result<Pid> {
match unsafe { fork() } {
Ok(ForkResult::Child) => {
if let Err(err) = exec_artifact_child(e, &c) {
event!(Level::WARN, "failed to execute: {:?}", err);
std::process::exit(1);
} else {
unreachable!();
}
}
Ok(ForkResult::Parent { child,.. }) => Ok(child),
Err(err) => Err(io::Error::new(io::ErrorKind::Other, err)),
}
}
| {
let mut mem_fd = MemFdOptions::new()
.cloexec(true)
.allow_sealing(true)
.with_capacity(capacity)
.set_mode(Mode::S_IRWXU | Mode::S_IRGRP | Mode::S_IXGRP | Mode::S_IROTH | Mode::S_IXOTH)
.open(name)
.map_err(|e| nix_error_to_io_error(e))?;
mem_fd
.seal(SealFlag::F_SEAL_SEAL | SealFlag::F_SEAL_SHRINK | SealFlag::F_SEAL_GROW)
.map_err(|e| nix_error_to_io_error(e))?;
Ok(ExecutableFactory {
storage: mem_fd.into(),
expected_path: Default::default(),
disk_linked: false,
})
} | identifier_body |
systems.rs | // будем юзать Behaviour Tree AI
//Главный скрипт работает по системе выбор-условие-действие (selector-condition-action).
//Выбор — своеобразный аналог оператора switch() в языках программирования.
// В «теле» элемента выбора происходит выбор одного из заданных наборов действий
// в зависимости от условия.
//Условие — проверка истинности заданного условия.
// Используется в начале каждого набора действий внутри элемента выбора.
// Если условие истинно — выполняется данный набор действий и выбор завершается.
// Если нет — происходит переход к следующему набору действий
//Действие — скрипт, запускающий другой скрипт (действие) с заданными параметрами.
// *В BT AI существует понятие базовых действий.
/*
SelectorBegin('AI Role 1');
SequenceBegin('Атака');
//видим врага и знаем его id
Condition(SeeEnemy && Enemy>0);
//смомтрим на него
Action( Action_LookAt, point_direction(x,y, Enemy.x, Enemy.y));
//стреляем в сторону врага 2 раза
Action( Action_Shoot, point_direction(x,y, Enemy.x, Enemy.y), 2);
SelectorBegin('Подходим на оптимальное растояние');
//или
SequenceBegin('Враг слишком далеко');
Condition(point_distance(x,y, Enemy.x, Enemy.y)>256);
Action(Action_MoveTo, Enemy.x-lengthdir_x(128, direction), Enemy.y-lengthdir_y(128, direction), highSpeed);
SequenceEnd();
//или
SequenceBegin('Враг слишком близко');
Condition(point_distance(x,y, Enemy.x, Enemy.y)<64);
//идем назад
Action(Action_MoveTo, x-lengthdir_x(64, direction), y-lengthdir_y(64, direction), highSpeed);
SequenceEnd();
SequenceBegin('маневр');
//иначе просто маневрируем, чтобы сложнее было попасть
Action( Action_MoveTo, x+irandom_range(-64, 64), y+irandom_range(-64, 64), highSpeed);
SequenceEnd();
SelectorEnd();
//стреляем в сторону врага 4 раза
Action(Action_Shoot, point_direction(x,y, Enemy.x, Enemy.y), 2);
SequenceEnd();
SelectorEnd();
*/
//Selector — оператор выбора набора действий
//Sequence — набор действий
//Condition — проверка условия
//Action — действие. вызов скрипта(первый аргумент) с параметрами (остальные аргументы)
/*
http://www.pvsm.ru/robototehnika/161885/print/
Узлы BT называют [10] задачами или поведениями. Каждая задача может иметь четыре состояния:
«Успех», если задача выполнена успешно;
- выкинуть нахер, заменитьт ошибкой. «Неудача», если условие не выполнено или задача, по какой-то причине, невыполнима;
«В работе», если задача запущена в работу и ожидает завершения
«Ошибка», если в программе возникает неизвестная ошибка.
Результат работы любого узла всегда передается родительскому узлу, расположенному на уровень выше.
Дерево просматривается с самого верхнего узла – корня. От него производится поиск в глубину начиная
с левой ветви дерева. Если у одного узла есть несколько подзадач, они исполняются слева направо.
Среди узлов выделяют следующие типы:
-действие (action),
-узел исполнения последовательности (sequence),
-параллельный узел (parallel),
-селектор (selector),
-условие (condition),
-инвертор (inverter).
Действие представляет собой запись переменных или какое-либо движение.
Узлы последовательностей поочередно исполняют поведения каждого дочернего узла до тех пор,
пока один из них не выдаст значение «Неудача», «В работе» или «Ошибка».
Если этого не произошло, возвращает значение «Успех».
Узлы параллельных действий исполняют поведения дочерних узлов до тех пор,
пока заданное количество из них не вернет статусы «Неудача» или «Успех».
Селекторы поочередно исполняют поведения каждого дочернего узла до тех пор,
пока один из них не выдаст значение «Успех», «В работе» или «Ошибка».
Если этого не произошло, возвращает значение «Неудача».
Условия содержат критерий, по которому определяется исход, и переменную.
Например, условие «Есть ли в этой комнате человек?» перебирает все объекты в комнате
и сравнивает их с переменной «Человек».
Узлы инверсии выполняют функцию оператора NOT.
*/
use tinyecs::*;
use time::{PreciseTime, Duration};
use WORLD_SPEED;
use ::manager::components::Position;
use ::monster::components::MonsterClass;
use ::monster::components::MonsterAttributes;
use ::monster::components::SelectionTree;
use ::monster::components::BehaviourEvent;
use ::monster::components::BehaviourState;
/// Система восприятия
pub struct _PerceptionSystem;
// тут типа чекает окружение, и помечает объекты что попадают в поле зения.
impl System for _PerceptionSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterClass)
}
fn process_one(&mut self, _entity: &mut Entity) {
// здесь тоже меняются события.
// сканируем вокруг, может есть еда или вода или др. монстр или ОБОРИГЕН!
}
}
/// Выбиральщик состояний дерева поведения
// используя код программатора SelectionTree, переключает состояния.
pub struct SelectorSystem;
impl System for SelectorSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterClass, SelectionTree, BehaviourEvent, BehaviourState)
}
// fn process_no_entities(&mut self) {
// println!("instaced buffer render system must work, but no entities!");
// }
// fn process_no_data(&mut self) {
// println!("instaced buffer render system must work, but no data!");
// }
fn process_one(&mut self, entity: &mut Entity) {
let mut selection_tree = entity.get_component::<SelectionTree>();
let mut behaviour_state = entity.get_component::<BehaviourState>(); // состояние
let behaviour_event = entity.get_component::<BehaviourEvent>(); // события
let len = selection_tree.selector.len();
if len > 0 {
// ткущий узел.
if selection_tree.curr_selector < 0i32 {
selection_tree.curr_selector = 0i32;
println!("ошибка/инициализация текущего указателя, теперь он {}", 0i32);
} else {
/*event, state
let sel = vec![[6, 2], [5, 1]];*/
let index: usize = selection_tree.curr_selector as usize;
let curr_cell = selection_tree.selector[index]; //[6, 2]
let v_event = curr_cell[0];
let v_state = curr_cell[1];
// проверить нет ли ошибки в селекторе/программаторе. или первый запуск/инициализация.
let curr_event = behaviour_event.event; // считываем текущий событие/event
if curr_event == v_event {
// меняем состояние, на соответствующее.
behaviour_state.state = v_state;
println!("обнаружено событие {}", v_event);
println!("переключаю состояние на {}", v_state);
// сдвигаем curr_selector, переходим к сл. ячейке.
let shl: i32 = (len - 1) as i32;
if selection_tree.curr_selector < shl { selection_tree.curr_selector += 1; } else {
selection_tree.curr_selector = 0;
}
}
}
}
}
}
/// Активатор. Приводит в действие.
// считывает состояние и выполняет его, либо продолжает выполнение.
// система поведения.
pub struct BehaviorSystem {
pub behavior_time: PreciseTime,
}
impl System for BehaviorSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterClass, BehaviourState, Position)
}
fn process_one(&mut self, entity: &mut Entity) {
// смотрит текущее состояние и выполняет действие.
// тут заставляем монстра ходить, пить, искать.
// 0. Инициализация, ошибка.
// 1. Сон. Монстр ждет, в этот момент с ним ничего не происходит.
// 2. Бодрствование. Случайное перемещение по полигону.
// 3. Поиск пищи.
// 4. Поиск воды.
// 5. Прием пищи.
// 6. Прием воды.
// 7. Перемещение к цели.
// 8. Проверка достижения цели.
if self.behavior_time.to(PreciseTime::now()) > Duration::seconds(5 * WORLD_SPEED) {
let behaviour_state = entity.get_component::<BehaviourState>(); // состояние
let mut position = entity.get_component::<Position>();
match behaviour_state.state {
1 => {
println!("...zzz...");
},
2 => {
// тут заставляем монстра ходить туда-сюда, бесцельно, куда подует)
if position.x > (position.y * 2f32) && position.y < 139f32 {
position.y += 1f32;
} else if position.x < 139f32 {
position.x += 1f32;
}
println!("x:{}, y:{}", position.x, position.y);
/* движение по овальной траектории.
x = x_+x_radius*cos(r_ang+2);
y = y_+y_radius*sin(r_ang);
X1 = X + R * 0.5;
Y1 = Y + 1.3 * R * 0.8;
0.5 это синус 30
0.8 это косинус 30
R - хз. например 20 клеток.
X, Y - это текущие координаты.
*/
// let x1: f32 = position.x + 20f32 * 0.5;
// let y1: f32 = position.y + 1.3f32 * 20f32 *0.8;
// position.x = x1;
// position.y = y1;
// println!("x:{}, y:{}", position.x, position.y);
},
_ => {},
}
// фиксируем текущее время
self.behavior_time = PreciseTime::now();
}
}
}
/// Генерация событий
// Создаем события, проверяем параметры.
pub struct EventSystem {
pub event_time: PreciseTime,
pub event_last: u32,
// 0 - инициализация/ошибка
}
impl System for EventSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterAttributes, BehaviourEvent)
}
fn process_one(&mut self, entity: &mut Entity) {
// 0. Инициализация, ошибка.
// 1. Обнаружена еда.
// 2. Обнаружена вода.
// 3. Наступил голод.
// 4. Наступила жажда.
// 5. Утомился.
// 6. Нет событий.
// 7. Монстр насытился.
// 8. Монстр напился.
if self.event_time.to(PreciseTime::now()) > Duration::seconds(WORLD_SPEED) {
let mut behaviour_event = entity.get_component::<BehaviourEvent>(); // события
let monster_attr = entity.get_component::<MonsterAttributes>(); // события
if behaviour_event.event == 0 {
// проверяем ошибки/инициализация
behaviour_event.event = 6;
println!("ошибка/инициализация текущего события, теперь он {}", 6);
} else if monster_attr.power < 960 && self.event_last!= 5 {
behaviour_event.event = 5; // наступает событие - УСТАЛ
self.event_last = behaviour_event.event;
println!("Новое событие: монстр устал.");
} else if monster_attr.power > 990 && self.event_last!= 6 {
behaviour_event.event = 6;
self.event_last = behaviour_event.event;
println!("Новое событие: монстр отдохнул.");
}
// фиксируем текущее время
self.event_time = PreciseTime::now();
}
}
}
| conditional_block |
||
systems.rs | // будем юзать Behaviour Tree AI
//Главный скрипт работает по системе выбор-условие-действие (selector-condition-action).
//Выбор — своеобразный аналог оператора switch() в языках программирования.
// В «теле» элемента выбора происходит выбор одного из заданных наборов действий
// в зависимости от условия.
//Условие — проверка истинности заданного условия.
// Используется в начале каждого набора действий внутри элемента выбора.
// Если условие истинно — выполняется данный набор действий и выбор завершается.
// Если нет — происходит переход к следующему набору действий
//Действие — скрипт, запускающий другой скрипт (действие) с заданными параметрами.
// *В BT AI существует понятие базовых действий.
/*
SelectorBegin('AI Role 1');
SequenceBegin('Атака');
//видим врага и знаем его id
Condition(SeeEnemy && Enemy>0);
//смомтрим на него
Action( Action_LookAt, point_direction(x,y, Enemy.x, Enemy.y));
//стреляем в сторону врага 2 раза
Action( Action_Shoot, point_direction(x,y, Enemy.x, Enemy.y), 2);
SelectorBegin('Подходим на оптимальное растояние');
//или
SequenceBegin('Враг слишком далеко');
Condition(point_distance(x,y, Enemy.x, Enemy.y)>256);
Action(Action_MoveTo, Enemy.x-lengthdir_x(128, direction), Enemy.y-lengthdir_y(128, direction), highSpeed);
SequenceEnd();
//или
SequenceBegin('Враг слишком близко');
Condition(point_distance(x,y, Enemy.x, Enemy.y)<64);
//идем назад
Action(Action_MoveTo, x-lengthdir_x(64, direction), y-lengthdir_y(64, direction), highSpeed);
SequenceEnd();
SequenceBegin('маневр');
//иначе просто маневрируем, чтобы сложнее было попасть
Action( Action_MoveTo, x+irandom_range(-64, 64), y+irandom_range(-64, 64), highSpeed);
SequenceEnd();
SelectorEnd();
//стреляем в сторону врага 4 раза
Action(Action_Shoot, point_direction(x,y, Enemy.x, Enemy.y), 2);
SequenceEnd();
SelectorEnd();
*/
//Selector — оператор выбора набора действий
//Sequence — набор действий
//Condition — проверка условия
//Action — действие. вызов скрипта(первый аргумент) с параметрами (остальные аргументы)
/*
http://www.pvsm.ru/robototehnika/161885/print/
Узлы BT называют [10] задачами или поведениями. Каждая задача может иметь четыре состояния:
«Успех», если задача выполнена успешно;
- выкинуть нахер, заменитьт ошибкой. «Неудача», если условие не выполнено или задача, по какой-то причине, невыполнима;
«В работе», если задача запущена в работу и ожидает завершения
«Ошибка», если в программе возникает неизвестная ошибка.
Результат работы любого узла всегда передается родительскому узлу, расположенному на уровень выше.
Дерево просматривается с самого верхнего узла – корня. От него производится поиск в глубину начиная
с левой ветви дерева. Если у одного узла есть несколько подзадач, они исполняются слева направо.
Среди узлов выделяют следующие типы:
-действие (action),
-узел исполнения последовательности (sequence),
-параллельный узел (parallel),
-селектор (selector),
-условие (condition),
-инвертор (inverter).
Действие представляет собой запись переменных или какое-либо движение.
Узлы последовательностей поочередно исполняют поведения каждого дочернего узла до тех пор,
пока один из них не выдаст значение «Неудача», «В работе» или «Ошибка».
Если этого не произошло, возвращает значение «Успех».
Узлы параллельных действий исполняют поведения дочерних узлов до тех пор,
пока заданное количество из них не вернет статусы «Неудача» или «Успех».
Селекторы поочередно исполняют поведения каждого дочернего узла до тех пор,
пока один из них не выдаст значение «Успех», «В работе» или «Ошибка».
Если этого не произошло, возвращает значение «Неудача».
Условия содержат критерий, по которому определяется исход, и переменную.
Например, условие «Есть ли в этой комнате человек?» перебирает все объекты в комнате
и сравнивает их с переменной «Человек».
Узлы инверсии выполняют функцию оператора NOT.
*/
use tinyecs::*;
use time::{PreciseTime, Duration};
use WORLD_SPEED;
use ::manager::components::Position;
use ::monster::components::MonsterClass;
use ::monster::components::MonsterAttributes;
use ::monster::components::SelectionTree;
use ::monster::components::BehaviourEvent;
use ::monster::components::BehaviourState;
/// Система восприятия
pub struct _PerceptionSystem;
// тут типа чекает окружение, и помечает объекты что попадают в поле зения.
impl System for _PerceptionSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterClass)
}
fn process_one(&mut self, _entity: &mut Entity) {
// здесь тоже меняются события.
// сканируем вокруг, может есть еда или вода или др. монстр или ОБОРИГЕН!
}
}
/// Выбиральщик состояний дерева поведения
// используя код программатора SelectionTree, переключает состояния.
pub struct SelectorSystem;
impl System for SelectorSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterClass, SelectionTree, BehaviourEvent, BehaviourState)
}
// fn process_no_entities(&mut self) {
// println!("instaced buffer render system must work, but no entities!");
// }
// fn process_no_data(&mut self) {
// println!("instaced buffer render system must work, but no data!");
// }
fn process_one(&mut self, entity: &mut Entity) {
let mut selection_tree = entity.get_component::<SelectionTree>();
let mut behaviour_state = entity.get_component::<BehaviourState>(); // состояние
let behaviour_event = entity.get_component::<BehaviourEvent>(); // события
let len = selection_tree.selector.len();
if len > 0 {
// ткущий узел.
if selection_tree.curr_selector < 0i32 {
selection_tree.curr_selector = 0i32;
println!("ошибка/инициализация текущего указателя, теперь он {}", 0i32);
} else {
/*event, state
let sel = vec![[6, 2], [5, 1]];*/
let index: usize = selection_tree.curr_selector as usize;
let curr_cell = selection_tree.selector[index]; //[6, 2]
let v_event = curr_cell[0];
let v_state = curr_cell[1];
// проверить нет ли ошибки в селекторе/программаторе. или первый запуск/инициализация.
let curr_event = behaviour_event.event; // считываем текущий событие/event
if curr_event == v_event {
// меняем состояние, на соответствующее.
behaviour_state.state = v_state;
println!("обнаружено событие {}", v_event);
println!("переключаю состояние на {}", v_state);
// сдвигаем curr_selector, переходим к сл. ячейке.
let shl: i32 = (len - 1) as i32;
if selection_tree.curr_selector < shl { selection_tree.curr_selector += 1; } else {
selection_tree.curr_selector = 0;
}
}
}
}
}
}
/// Активатор. Приводит в действие.
// считывает состояние и выполняет его, либо продолжает выполнение.
// система поведения.
pub struct BehaviorSystem {
pub behavior_time: PreciseTime,
}
impl System for BehaviorSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterClass, BehaviourState, Position)
}
fn process_one(&mut self, entity: &mut Entity) {
// смотрит текущее состояние и выполняет действие.
// тут заставляем монстра ходить, пить, искать.
// 0. Инициализация, ошибка.
// 1. Сон. Монстр ждет, в этот момент с ним ничего не происходит.
// 2. Бодрствование. Случайное перемещение по полигону.
// 3. Поиск пищи.
// 4. Поиск воды.
// 5. Прием пищи.
// 6. Прием воды.
// 7. Перемещение к цели.
// 8. Проверка достижения цели.
if self.behavior_time.to(PreciseTime::now()) > Duration::seconds(5 * WORLD_SPEED) {
let behaviour_state = entity.get_component::<BehaviourState>(); // состояние
let mut position = entity.get_component::<Position>();
match behaviour_state.state {
1 => {
println!("...zzz...");
},
2 => {
// тут заставляем монстра ходить туда-сюда, бесцельно, куда подует)
if position.x > (position.y * 2f32) && position.y < 139f32 {
position.y += 1f32;
} else if position.x < 139f32 {
position.x += 1f32;
}
println!("x:{}, y:{}", position.x, position.y);
/* движение по овальной траектории.
x = x_+x_radius*cos(r_ang+2);
y = y_+y_radius*sin(r_ang);
X1 = X + R * 0.5;
Y1 = Y + 1.3 * R * 0.8;
0.5 это синус 30
0.8 это косинус 30
R - хз. например 20 клеток.
X, Y - это текущие координаты.
*/
// let x1: f32 = position.x + 20f32 * 0.5;
// let y1: f32 = position.y + 1.3f32 * 20f32 *0.8;
// position.x = x1;
// position.y = y1;
// println!("x:{}, y:{}", position.x, position.y);
},
_ => {},
}
// фиксируем текущее время
self.behavior_time = PreciseTime::now();
}
}
}
/// Генерация событий
// Создаем события, проверяем параметры.
pub struct EventSystem {
pub event_time: PreciseTime,
pub event_last: u32,
// 0 - инициализация/ошибка
}
impl System for EventSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterAttributes, BehaviourEvent)
}
fn process_one(&mut self, entity: &mut Entity) {
// 0. Инициализация, ошибка.
// 1. Обнаружена еда.
// 2. Обнаружена вода.
// 3. Наступил голод.
// 4. Наступила жажда.
// 5. Утомился.
// 6. Нет событий.
// 7. Монстр насытился.
// 8. Монстр напился.
if self.event_time.to(PreciseTime::now()) > Duration::seconds(WORLD_SPEED) {
let mut behaviour_event = entity.get_component::<BehaviourEvent>(); // события
let monster_attr = entity.get_component::<MonsterAttributes>(); // события
if behaviour_event.event == 0 {
// проверяем ошибки/инициализация
behaviour_event.event = 6;
println!("ошибка/инициализация текущего события, теперь он {}", 6);
} else if monster_attr.power < 960 && self.event_last!= 5 {
behaviour_event.event = 5; // наступает событие - УСТАЛ
self.event_last = behaviour_event.event;
println!("Новое событие: монстр устал.");
} else if monster_attr.power > 990 && self.event_last!= 6 {
behaviour_event.event = 6;
self.event_last = behaviour_event.event;
println!("Новое событие: монстр отдохнул.");
}
// фиксируем текущее время
self.event_time = PreciseTime::now();
}
}
}
| identifier_body |
||
systems.rs | // будем юзать Behaviour Tree AI
//Главный скрипт работает по системе выбор-условие-действие (selector-condition-action).
//Выбор — своеобразный аналог оператора switch() в языках программирования.
// В «теле» элемента выбора происходит выбор одного из заданных наборов действий
// в зависимости от условия.
//Условие — проверка истинности заданного условия.
// Используется в начале каждого набора действий внутри элемента выбора.
// Если условие истинно — выполняется данный набор действий и выбор завершается.
// Если нет — происходит переход к следующему набору действий
//Действие — скрипт, запускающий другой скрипт (действие) с заданными параметрами.
// *В BT AI существует понятие базовых действий.
/*
SelectorBegin('AI Role 1');
SequenceBegin('Атака');
//видим врага и знаем его id
Condition(SeeEnemy && Enemy>0);
//смомтрим на него
Action( Action_LookAt, point_direction(x,y, Enemy.x, Enemy.y));
//стреляем в сторону врага 2 раза
Action( Action_Shoot, point_direction(x,y, Enemy.x, Enemy.y), 2);
SelectorBegin('Подходим на оптимальное растояние');
//или
SequenceBegin('Враг слишком далеко');
Condition(point_distance(x,y, Enemy.x, Enemy.y)>256);
Action(Action_MoveTo, Enemy.x-lengthdir_x(128, direction), Enemy.y-lengthdir_y(128, direction), highSpeed);
SequenceEnd();
//или
SequenceBegin('Враг слишком близко');
Condition(point_distance(x,y, Enemy.x, Enemy.y)<64);
//идем назад
Action(Action_MoveTo, x-lengthdir_x(64, direction), y-lengthdir_y(64, direction), highSpeed);
SequenceEnd();
SequenceBegin('маневр');
//иначе просто маневрируем, чтобы сложнее было попасть
Action( Action_MoveTo, x+irandom_range(-64, 64), y+irandom_range(-64, 64), highSpeed);
SequenceEnd();
SelectorEnd();
//стреляем в сторону врага 4 раза
Action(Action_Shoot, point_direction(x,y, Enemy.x, Enemy.y), 2);
SequenceEnd();
SelectorEnd();
*/
//Selector — оператор выбора набора действий
//Sequence — набор действий
//Condition — проверка условия
//Action — действие. вызов скрипта(первый аргумент) с параметрами (остальные аргументы)
/*
http://www.pvsm.ru/robototehnika/161885/print/
Узлы BT называют [10] задачами или поведениями. Каждая задача может иметь четыре состояния:
«Успех», если задача выполнена успешно;
- выкинуть нахер, заменитьт ошибкой. «Неудача», если условие не выполнено или задача, по какой-то причине, невыполнима;
«В работе», если задача запущена в работу и ожидает завершения
«Ошибка», если в программе возникает неизвестная ошибка.
Результат работы любого узла всегда передается родительскому узлу, расположенному на уровень выше.
Дерево просматривается с самого верхнего узла – корня. От него производится поиск в глубину начиная
с левой ветви дерева. Если у одного узла есть несколько подзадач, они исполняются слева направо.
Среди узлов выделяют следующие типы:
-действие (action),
-узел исполнения последовательности (sequence),
-параллельный узел (parallel),
-селектор (selector),
-условие (condition),
-инвертор (inverter).
Действие представляет собой запись переменных или какое-либо движение.
Узлы последовательностей поочередно исполняют поведения каждого дочернего узла до тех пор,
пока один из них не выдаст значение «Неудача», «В работе» или «Ошибка».
Если этого не произошло, возвращает значение «Успех».
Узлы параллельных действий исполняют поведения дочерних узлов до тех пор,
пока заданное количество из них не вернет статусы «Неудача» или «Успех».
Селекторы поочередно исполняют поведения каждого дочернего узла до тех пор,
пока один из них не выдаст значение «Успех», «В работе» или «Ошибка».
Если этого не произошло, возвращает значение «Неудача».
Условия содержат критерий, по которому определяется исход, и переменную.
Например, условие «Есть ли в этой комнате человек?» перебирает все объекты в комнате
и сравнивает их с переменной «Человек».
Узлы инверсии выполняют функцию оператора NOT.
*/
use tinyecs::*;
use time::{PreciseTime, Duration};
use WORLD_SPEED;
use ::manager::components::Position;
use ::monster::components::MonsterClass;
use ::monster::components::MonsterAttributes;
use ::monster::components::SelectionTree;
use ::monster::components::BehaviourEvent;
use ::monster::components::BehaviourState;
/// Система восприятия
pub struct _PerceptionSystem;
// тут типа чекает окружение, и помечает объекты что попадают в поле зения.
impl System for _PerceptionSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterClass)
}
fn process_one(&mut self, _entity: &mut Entity) {
// здесь тоже меняются события.
// сканируем вокруг, может есть еда или вода или др. монстр или ОБОРИГЕН!
}
}
/// Выбиральщик состояний дерева поведения
// используя код программатора SelectionTree, переключает состояния.
pub struct SelectorSystem;
impl System for SelectorSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterClass, SelectionTree, BehaviourEvent, BehaviourState)
}
// fn process_no_entities(&mut self) {
// println!("instaced buffer render system must work, but no entities!");
// }
// fn process_no_data(&mut self) {
// println!("instaced buffer render system must work, but no data!");
// }
fn process_one(&mut self, entity: &mut Entity) {
let mut selection_tree = entity.get_component::<SelectionTree>();
let mut behaviour_state = entity.get_component::<BehaviourState>(); // состояние
let behaviour_event = entity.get_component::<BehaviourEvent>(); // события
let len = selection_tree.selector.len();
if len > 0 {
// ткущий узел.
if selection_tree.curr_selector < 0i32 {
selection_tree.curr_selector = 0i32;
println!("ошибка/инициализация текущего указателя, теперь он {}", 0i32);
} else {
/*event, state
let sel = vec![[6, 2], [5, 1]];*/
let index: usize = selection_tree.curr_selector as usize;
let curr_cell = selection_tree.selector[index]; //[6, 2]
let v_event = curr_cell[0];
let v_state = curr_cell[1];
// проверить нет ли ошибки в селекторе/программаторе. или первый запуск/инициализация.
let curr_event = behaviour_event.event; // считываем текущий событие/event
if curr_event == v_event {
// меняем состояние, на соответствующее.
behaviour_state.state = v_state;
println!("обнаружено событие {}", v_event);
println!("переключаю состояние на {}", v_state);
// сдвигаем curr_selector, переходим к сл. ячейке.
let shl: i32 = (len - 1) as i32;
if selection_tree.curr_selector < shl { selection_tree.curr_selector += 1; } else {
selection_tree.curr_selector = 0;
}
}
}
}
}
}
/// Активатор. Приводит в действие.
// считывает состояние и выполняет его, либо продолжает выполнение.
// система поведения.
pub struct BehaviorSystem {
pub behavior_time: PreciseTime,
}
impl System for BehaviorSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterClass, BehaviourState, Position)
}
fn process_one(&mut self, entity: &mut Entity) {
// смотрит текущее состояние и выполняет действие.
// тут заставляем монстра ходить, пить, искать.
// 0. Инициализация, ошибка.
// 1. Сон. Монстр ждет, в этот момент с ним ничего не происходит.
// 2. Бодрствование. Случайное перемещение по полигону.
// 3. Поиск пищи.
// 4. Поиск воды.
// 5. Прием пищи.
// 6. Прием воды.
// 7. Перемещение к цели.
// 8. Проверка достижения цели.
if self.behavior_time.to(PreciseTime::now()) > Duration::seconds(5 * WORLD_SPEED) {
let behaviour_state = entity.get_component::<BehaviourState>(); // состояние
let mut position = entity.get_component::<Position>();
match behaviour_state.state {
1 => {
println!("...zzz...");
},
2 => {
// тут заставляем монстра ходить туда-сюда, бесцельно, куда подует)
if position.x > (position.y * 2f32) && position.y < 139f32 {
position.y += 1f32;
} else if position.x < 139f32 {
position.x += 1f32;
}
println!("x:{}, y:{}", position.x, position.y);
/* движение по овальной траектории.
x = x_+x_radius*cos(r_ang+2);
y = y_+y_radius*sin(r_ang);
X1 = X + R * 0.5;
Y1 = Y + 1.3 * R * 0.8;
0.5 это синус 30
0.8 это косинус 30
R - хз. например 20 клеток.
X, Y - это текущие координаты.
*/
// let x1: f32 = position.x + 20f32 * 0.5;
// let y1: f32 = position.y + 1.3f32 * 20f32 *0.8;
// position.x = x1;
// position.y = y1;
// println!("x:{}, y:{}", position.x, position.y);
},
_ => {},
}
// фиксируем текущее время
self.behavior_time = PreciseTime::now();
}
}
}
/// Генерация событий
// Создаем события, проверяем параметры.
pub struct EventSystem {
pub event_time: PreciseTime,
pub event_last: u32,
// 0 - инициализация/ошибка
}
impl System for EventSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterAttributes, BehaviourEvent)
}
fn process_one(&mut self, entity: &mut Entity) {
// 0. Инициализация, ошибка.
// 1. Обнаружена еда.
// 2. Обнаружена вода.
// 3. Наступил голод.
// 4. Наступила жа | // 5. Утомился.
// 6. Нет событий.
// 7. Монстр насытился.
// 8. Монстр напился.
if self.event_time.to(PreciseTime::now()) > Duration::seconds(WORLD_SPEED) {
let mut behaviour_event = entity.get_component::<BehaviourEvent>(); // события
let monster_attr = entity.get_component::<MonsterAttributes>(); // события
if behaviour_event.event == 0 {
// проверяем ошибки/инициализация
behaviour_event.event = 6;
println!("ошибка/инициализация текущего события, теперь он {}", 6);
} else if monster_attr.power < 960 && self.event_last!= 5 {
behaviour_event.event = 5; // наступает событие - УСТАЛ
self.event_last = behaviour_event.event;
println!("Новое событие: монстр устал.");
} else if monster_attr.power > 990 && self.event_last!= 6 {
behaviour_event.event = 6;
self.event_last = behaviour_event.event;
println!("Новое событие: монстр отдохнул.");
}
// фиксируем текущее время
self.event_time = PreciseTime::now();
}
}
}
| жда.
| identifier_name |
systems.rs | // будем юзать Behaviour Tree AI
//Главный скрипт работает по системе выбор-условие-действие (selector-condition-action).
//Выбор — своеобразный аналог оператора switch() в языках программирования.
// В «теле» элемента выбора происходит выбор одного из заданных наборов действий
// в зависимости от условия.
//Условие — проверка истинности заданного условия.
// Используется в начале каждого набора действий внутри элемента выбора.
// Если условие истинно — выполняется данный набор действий и выбор завершается.
// Если нет — происходит переход к следующему набору действий
//Действие — скрипт, запускающий другой скрипт (действие) с заданными параметрами.
// *В BT AI существует понятие базовых действий.
/*
SelectorBegin('AI Role 1');
SequenceBegin('Атака');
//видим врага и знаем его id
Condition(SeeEnemy && Enemy>0);
//смомтрим на него
Action( Action_LookAt, point_direction(x,y, Enemy.x, Enemy.y));
//стреляем в сторону врага 2 раза
Action( Action_Shoot, point_direction(x,y, Enemy.x, Enemy.y), 2);
SelectorBegin('Подходим на оптимальное растояние');
//или
SequenceBegin('Враг слишком далеко');
Condition(point_distance(x,y, Enemy.x, Enemy.y)>256);
Action(Action_MoveTo, Enemy.x-lengthdir_x(128, direction), Enemy.y-lengthdir_y(128, direction), highSpeed);
SequenceEnd();
//или
SequenceBegin('Враг слишком близко');
Condition(point_distance(x,y, Enemy.x, Enemy.y)<64);
//идем назад
Action(Action_MoveTo, x-lengthdir_x(64, direction), y-lengthdir_y(64, direction), highSpeed);
SequenceEnd();
SequenceBegin('маневр');
//иначе просто маневрируем, чтобы сложнее было попасть
Action( Action_MoveTo, x+irandom_range(-64, 64), y+irandom_range(-64, 64), highSpeed);
SequenceEnd();
SelectorEnd();
//стреляем в сторону врага 4 раза
Action(Action_Shoot, point_direction(x,y, Enemy.x, Enemy.y), 2);
SequenceEnd();
SelectorEnd();
*/
//Selector — оператор выбора набора действий
//Sequence — набор действий
//Condition — проверка условия
//Action — действие. вызов скрипта(первый аргумент) с параметрами (остальные аргументы)
/*
http://www.pvsm.ru/robototehnika/161885/print/
Узлы BT называют [10] задачами или поведениями. Каждая задача может иметь четыре состояния:
«Успех», если задача выполнена успешно;
- выкинуть нахер, заменитьт ошибкой. «Неудача», если условие не выполнено или задача, по какой-то причине, невыполнима;
«В работе», если задача запущена в работу и ожидает завершения
«Ошибка», если в программе возникает неизвестная ошибка.
Результат работы любого узла всегда передается родительскому узлу, расположенному на уровень выше.
Дерево просматривается с самого верхнего узла – корня. От него производится поиск в глубину начиная
с левой ветви дерева. Если у одного узла есть несколько подзадач, они исполняются слева направо.
Среди узлов выделяют следующие типы:
-действие (action),
-узел исполнения последовательности (sequence),
-параллельный узел (parallel),
-селектор (selector),
-условие (condition),
-инвертор (inverter).
Действие представляет собой запись переменных или какое-либо движение.
Узлы последовательностей поочередно исполняют поведения каждого дочернего узла до тех пор,
пока один из них не выдаст значение «Неудача», «В работе» или «Ошибка».
Если этого не произошло, возвращает значение «Успех».
Узлы параллельных действий исполняют поведения дочерних узлов до тех пор,
пока заданное количество из них не вернет статусы «Неудача» или «Успех».
Селекторы поочередно исполняют поведения каждого дочернего узла до тех пор,
пока один из них не выдаст значение «Успех», «В работе» или «Ошибка».
Если этого не произошло, возвращает значение «Неудача».
Условия содержат критерий, по которому определяется исход, и переменную.
Например, условие «Есть ли в этой комнате человек?» перебирает все объекты в комнате
и сравнивает их с переменной «Человек».
Узлы инверсии выполняют функцию оператора NOT.
*/
use tinyecs::*;
use time::{PreciseTime, Duration};
use WORLD_SPEED;
use ::manager::components::Position;
use ::monster::components::MonsterClass;
use ::monster::components::MonsterAttributes;
use ::monster::components::SelectionTree;
use ::monster::components::BehaviourEvent;
use ::monster::components::BehaviourState;
/// Система восприятия
pub struct _PerceptionSystem;
// тут типа чекает окружение, и помечает объекты что попадают в поле зения.
impl System for _PerceptionSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterClass)
}
fn process_one(&mut self, _entity: &mut Entity) {
// здесь тоже меняются события.
// сканируем вокруг, может есть еда или вода или др. монстр или ОБОРИГЕН!
}
}
/// Выбиральщик состояний дерева поведения
// используя код программатора SelectionTree, переключает состояния.
pub struct SelectorSystem;
impl System for SelectorSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterClass, SelectionTree, BehaviourEvent, BehaviourState)
}
// fn process_no_entities(&mut self) {
// println!("instaced buffer render system must work, but no entities!");
// }
// fn process_no_data(&mut self) {
// println!("instaced buffer render system must work, but no data!");
// }
fn process_one(&mut self, entity: &mut Entity) {
let mut selection_tree = entity.get_component::<SelectionTree>();
let mut behaviour_state = entity.get_component::<BehaviourState>(); // состояние
let behaviour_event = entity.get_component::<BehaviourEvent>(); // события
let len = selection_tree.selector.len();
if len > 0 {
// ткущий узел.
if selection_tree.curr_selector < 0i32 {
selection_tree.curr_selector = 0i32;
println!("ошибка/инициализация текущего указателя, теперь он {}", 0i32);
} else {
/*event, state
let sel = vec![[6, 2], [5, 1]];*/
let index: usize = selection_tree.curr_selector as usize;
let curr_cell = selection_tree.selector[index]; //[6, 2]
let v_event = curr_cell[0];
let v_state = curr_cell[1];
// проверить нет ли ошибки в селекторе/программаторе. или первый запуск/инициализация.
let curr_event = behaviour_event.event; // считываем текущий событие/event
if curr_event == v_event {
// меняем состояние, на соответствующее.
behaviour_state.state = v_state;
println!("обнаружено событие {}", v_event);
println!("переключаю состояние на {}", v_state);
// сдвигаем curr_selector, переходим к сл. ячейке.
let shl: i32 = (len - 1) as i32;
if selection_tree.curr_selector < shl { selection_tree.curr_selector += 1; } else {
selection_tree.curr_selector = 0;
}
}
}
}
}
}
/// Активатор. Приводит в действие.
// считывает состояние и выполняет его, либо продолжает выполнение.
// система поведения.
pub struct BehaviorSystem {
pub behavior_time: PreciseTime,
}
impl System for BehaviorSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterClass, BehaviourState, Position)
}
fn process_one(&mut self, entity: &mut Entity) {
// смотрит текущее состояние и выполняет действие.
// тут заставляем монстра ходить, пить, искать.
// 0. Инициализация, ошибка.
// 1. Сон. Монстр ждет, в этот момент с ним ничего не происходит.
// 2. Бодрствование. Случайное перемещение по полигону.
// 3. Поиск пищи.
// 4. Поиск воды.
// 5. Прием пищи.
// 6. Прием воды.
// 7. Перемещение к цели.
// 8. Проверка достижения цели.
if self.behavior_time.to(PreciseTime::now()) > Duration::seconds(5 * WORLD_SPEED) {
let behaviour_state = entity.get_component::<BehaviourState>(); // состояние
let mut position = entity.get_component::<Position>();
match behaviour_state.state {
1 => {
println!("...zzz...");
},
2 => {
// тут заставляем монстра ходить туда-сюда, бесцельно, куда подует)
if position.x > (position.y * 2f32) && position.y < 139f32 {
position.y += 1f32;
} else if position.x < 139f32 {
position.x += 1f32;
}
println!("x:{}, y:{}", position.x, position.y);
/* движение по овальной траектории.
x = x_+x_radius*cos(r_ang+2);
y = y_+y_radius*sin(r_ang);
X1 = X + R * 0.5;
Y1 = Y + 1.3 * R * 0.8;
0.5 это синус 30
0.8 это косинус 30
R - хз. например 20 клеток.
X, Y - это текущие координаты.
*/
// let x1: f32 = position.x + 20f32 * 0.5;
// let y1: f32 = position.y + 1.3f32 * 20f32 *0.8;
// position.x = x1;
// position.y = y1;
// println!("x:{}, y:{}", position.x, position.y);
},
_ => {},
}
// фиксируем текущее время
self.behavior_time = PreciseTime::now();
}
}
}
/// Генерация событий
// Создаем события, проверяем параметры.
pub struct EventSystem {
pub event_time: PreciseTime,
pub event_last: u32,
// 0 - инициализация/ошибка
}
impl System for EventSystem {
fn aspect(&self) -> Aspect {
aspect_all!(MonsterAttributes, BehaviourEvent)
}
fn process_one(&mut self, entity: &mut Entity) {
// 0. Инициализация, ошибка.
// 1. Обнаружена еда.
// 2. Обнаружена вода.
// 3. Наступил голод.
// 4. Наступила жажда.
// 5. Утомился.
// 6. Нет событий.
// 7. Монстр насытился.
// 8. Монстр напился. | behaviour_event.event = 6;
println!("ошибка/инициализация текущего события, теперь он {}", 6);
} else if monster_attr.power < 960 && self.event_last!= 5 {
behaviour_event.event = 5; // наступает событие - УСТАЛ
self.event_last = behaviour_event.event;
println!("Новое событие: монстр устал.");
} else if monster_attr.power > 990 && self.event_last!= 6 {
behaviour_event.event = 6;
self.event_last = behaviour_event.event;
println!("Новое событие: монстр отдохнул.");
}
// фиксируем текущее время
self.event_time = PreciseTime::now();
}
}
} | if self.event_time.to(PreciseTime::now()) > Duration::seconds(WORLD_SPEED) {
let mut behaviour_event = entity.get_component::<BehaviourEvent>(); // события
let monster_attr = entity.get_component::<MonsterAttributes>(); // события
if behaviour_event.event == 0 {
// проверяем ошибки/инициализация | random_line_split |
builder.rs | //! builders for tag path and tag
use crate::DebugLevel;
use std::fmt;
pub use anyhow::Result;
/// builder to build tag full path
///
/// # Examples
/// ```rust,ignore
/// use plctag::builder::*;
/// use plctag::RawTag;
///
/// fn main() {
/// let timeout = 100;
/// let path = PathBuilder::default()
/// .protocol(Protocol::EIP)
/// .gateway("192.168.1.120")
/// .plc(PlcKind::ControlLogix)
/// .name("MyTag1")
/// .element_size(16)
/// .element_count(1)
/// .path("1,0")
/// .read_cache_ms(0)
/// .build()
/// .unwrap();
/// let tag = RawTag::new(path, timeout).unwrap();
/// let status = tag.status();
/// assert!(status.is_ok());
/// }
///
/// ```
#[derive(Default, Debug)]
pub struct PathBuilder {
protocol: Option<Protocol>,
debug: Option<DebugLevel>,
elem_count: Option<usize>,
elem_size: Option<usize>,
read_cache_ms: Option<usize>,
plc: Option<PlcKind>,
name: Option<String>,
path: Option<String>,
gateway: Option<String>,
use_connected_msg: Option<bool>,
}
impl PathBuilder {
/// generic attribute.
/// defining the current debugging level.
/// please use [`plc::set_debug_level`](../plc/fn.set_debug_level.html) instead.
#[deprecated]
#[inline]
pub fn debug(&mut self, level: DebugLevel) -> &mut Self |
/// generic attribute.
/// Required. Determines the type of the PLC protocol.
#[inline]
pub fn protocol(&mut self, protocol: Protocol) -> &mut Self {
self.protocol = Some(protocol);
self
}
/// generic attribute.
/// Optional. All tags are treated as arrays. Tags that are not arrays are considered to have a length of one element. This attribute determines how many elements are in the tag. Defaults to one (1)
#[inline]
pub fn element_count(&mut self, count: usize) -> &mut Self {
self.elem_count = Some(count);
self
}
/// generic attribute
/// Required for some protocols or PLC types. This attribute determines the size of a single element of the tag. All tags are considered to be arrays, even those with only one entry. Ignored for Modbus and for ControlLogix-class Allen-Bradley PLCs. This parameter will become optional for as many PLC types as possible
#[inline]
pub fn element_size(&mut self, size: usize) -> &mut Self {
self.elem_size = Some(size);
self
}
/// generic attribute:
/// Optional. An integer number of milliseconds to cache read data.
/// Use this attribute to cause the tag read operations to cache data the requested number of milliseconds. This can be used to lower the actual number of requests against the PLC. Example read_cache_ms=100 will result in read operations no more often than once every 100 milliseconds.
#[inline]
pub fn read_cache_ms(&mut self, millis: usize) -> &mut Self {
self.read_cache_ms = Some(millis);
self
}
/// Required for EIP. Determines the type of the PLC
#[inline]
pub fn plc(&mut self, plc: PlcKind) -> &mut Self {
self.plc = Some(plc);
self
}
/// - EIP
/// IP address or host name.
/// This tells the library what host name or IP address to use for the PLC or the gateway to the PLC (in the case that the PLC is remote).
/// - ModBus
/// Required IP address or host name and optional port
/// This tells the library what host name or IP address to use for the PLC. Can have an optional port at the end, e.g. gateway=10.1.2.3:502 where the :502 part specifies the port.
#[inline]
pub fn gateway(&mut self, gateway: impl AsRef<str>) -> &mut Self {
self.gateway = Some(gateway.as_ref().to_owned());
self
}
/// - EIP
/// This is the full name of the tag. For program tags, prepend Program:<program name>. where <program name> is the name of the program in which the tag is created
/// - ModBus
/// Required the type and first register number of a tag, e.g. co42 for coil 42 (counts from zero).
/// The supported register type prefixes are co for coil, di for discrete inputs, hr for holding registers and ir for input registers. The type prefix must be present and the register number must be greater than or equal to zero and less than or equal to 65535. Modbus examples: co21 - coil 21, di22 - discrete input 22, hr66 - holding register 66, ir64000 - input register 64000.
///
/// you might want to use `register()` instead of `name()` for Modbus
#[inline]
pub fn name(&mut self, name: impl AsRef<str>) -> &mut Self {
self.name = Some(name.as_ref().to_owned());
self
}
/// set register for Modbus
pub fn register(&mut self, reg: Register) -> &mut Self {
self.name = Some(format!("{}", reg));
self
}
/// - EIP
/// AB: CIP path to PLC CPU. I.e. 1,0.
/// This attribute is required for CompactLogix/ControlLogix tags and for tags using a DH+ protocol bridge (i.e. a DHRIO module) to get to a PLC/5, SLC 500, or MicroLogix PLC on a remote DH+ link. The attribute is ignored if it is not a DH+ bridge route, but will generate a warning if debugging is active. Note that Micro800 connections must not have a path attribute.
/// - ModBus
/// Required The server/unit ID. Must be an integer value between 0 and 255.
/// Servers may support more than one unit or may bridge to other units.
#[inline]
pub fn path(&mut self, path: impl AsRef<str>) -> &mut Self {
self.path = Some(path.as_ref().to_owned());
self
}
/// EIP only
/// Optional 1 = use CIP connection, 0 = use UCMM.
/// Control whether to use connected or unconnected messaging. Only valid on Logix-class PLCs. Connected messaging is required on Micro800 and DH+ bridged links. Default is PLC-specific and link-type specific. Generally you do not need to set this.
#[inline]
pub fn use_connected_msg(&mut self, yes: bool) -> &mut Self {
self.use_connected_msg = Some(yes);
self
}
/// check required attributes or conflict attributes
fn check(&self) -> Result<()> {
//check protocol, required
if self.protocol.is_none() {
return Err(anyhow!("protocol required"));
}
let protocol = self.protocol.unwrap();
// check required attributes
match protocol {
Protocol::EIP => {
//TODO: check gateway, either ip or host name
//check plc, required
if self.plc.is_none() {
return Err(anyhow!("plc required"));
}
let plc = self.plc.unwrap();
if plc == PlcKind::ControlLogix {
if self.path.is_none() {
return Err(anyhow!("path required for controllogix"));
}
return Ok(()); //skip check for elem_size
} else if plc == PlcKind::Micro800 {
if self.path.is_some() {
return Err(anyhow!("path must not provided for micro800"));
}
}
if self.elem_size.is_none() {
return Err(anyhow!("element size required"));
}
}
Protocol::ModBus => {
//TODO: check gateway, host with port
if self.gateway.is_none() {
return Err(anyhow!("gateway required"));
}
if self.name.is_none() {
return Err(anyhow!("name required"));
}
//path is number [0-255]
match self.path {
Some(ref path) => {
let _: u8 = path
.parse()
.or(Err(anyhow!("path is a number in range [0-255]")))?;
}
None => return Err(anyhow!("path required")),
}
if self.elem_size.is_none() {
return Err(anyhow!("element size required"));
}
}
}
Ok(())
}
/// build full tag path
pub fn build(&self) -> Result<String> {
self.check()?;
let mut path_buf = vec![];
let protocol = self.protocol.unwrap();
path_buf.push(format!("protocol={}", protocol));
match protocol {
Protocol::EIP => {
if let Some(plc) = self.plc {
path_buf.push(format!("plc={}", plc));
}
if let Some(yes) = self.use_connected_msg {
path_buf.push(format!("use_connected_msg={}", yes as u8));
}
}
Protocol::ModBus => {}
}
if let Some(ref gateway) = self.gateway {
path_buf.push(format!("gateway={}", gateway));
}
if let Some(ref path) = self.path {
path_buf.push(format!("path={}", path));
}
if let Some(ref name) = self.name {
path_buf.push(format!("name={}", name));
}
if let Some(elem_count) = self.elem_count {
path_buf.push(format!("elem_count={}", elem_count));
}
if let Some(elem_size) = self.elem_size {
path_buf.push(format!("elem_size={}", elem_size));
}
if let Some(read_cache_ms) = self.read_cache_ms {
path_buf.push(format!("read_cache_ms={}", read_cache_ms));
}
if let Some(debug) = self.debug {
let level: u8 = debug.into();
path_buf.push(format!("debug={}", level));
}
let buf = path_buf.join("&");
Ok(buf.to_owned())
}
}
/// library supported protocols
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum Protocol {
/// EIP protocol
EIP,
/// Modbus protocol
ModBus,
}
impl fmt::Display for Protocol {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Protocol::EIP => write!(f, "ab-eip"),
Protocol::ModBus => write!(f, "modbus-tcp"),
}
}
}
///modbus supported register
pub enum Register {
///coil registers
Coil(u16),
///discrete inputs
Discrete(u16),
///holding registers
Holding(u16),
///input registers
Input(u16),
}
impl fmt::Display for Register {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Register::Coil(v) => write!(f, "co{}", v),
Register::Discrete(v) => write!(f, "di{}", v),
Register::Holding(v) => write!(f, "hr{}", v),
Register::Input(v) => write!(f, "ir{}", v),
}
}
}
/// plc kind, required for EIP protocol
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum PlcKind {
/// Tell the library that this tag is in a Control Logix-class PLC
ControlLogix,
/// Tell the library that this tag is in a PLC/5 PLC
PLC5,
/// Tell the library that this tag is in a SLC 500 PLC
SLC500,
/// Tell the library that this tag is in a Control Logix-class PLC using the PLC/5 protocol
LogixPCCC,
/// Tell the library that this tag is in a Micro800-class PLC
Micro800,
/// Tell the library that this tag is in a Micrologix PLC
MicroLogix,
}
impl fmt::Display for PlcKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
PlcKind::ControlLogix => write!(f, "controllogix"),
PlcKind::PLC5 => write!(f, "plc5"),
PlcKind::SLC500 => write!(f, "slc500"),
PlcKind::LogixPCCC => write!(f, "logixpccc"),
PlcKind::Micro800 => write!(f, "micro800"),
PlcKind::MicroLogix => write!(f, "micrologix"),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_eip_builder() {
let path = PathBuilder::default()
.protocol(Protocol::EIP)
.gateway("192.168.1.120")
.plc(PlcKind::ControlLogix)
.name("MyTag1")
.element_size(16)
.element_count(1)
.path("1,0")
.read_cache_ms(0)
.build()
.unwrap();
assert_eq!(path, "protocol=ab-eip&plc=controllogix&gateway=192.168.1.120&path=1,0&name=MyTag1&elem_count=1&elem_size=16&read_cache_ms=0");
}
#[test]
fn test_modbus_builder() {
let path = PathBuilder::default()
.protocol(Protocol::ModBus)
.gateway("192.168.1.120:502")
.path("0")
.register(Register::Coil(42))
.element_size(16)
.element_count(1)
.read_cache_ms(0)
.build()
.unwrap();
assert_eq!(path, "protocol=modbus-tcp&gateway=192.168.1.120:502&path=0&name=co42&elem_count=1&elem_size=16&read_cache_ms=0");
}
}
| {
self.debug = Some(level);
self
} | identifier_body |
builder.rs | //! builders for tag path and tag
use crate::DebugLevel;
use std::fmt;
pub use anyhow::Result;
/// builder to build tag full path
///
/// # Examples
/// ```rust,ignore
/// use plctag::builder::*;
/// use plctag::RawTag;
///
/// fn main() {
/// let timeout = 100;
/// let path = PathBuilder::default()
/// .protocol(Protocol::EIP)
/// .gateway("192.168.1.120")
/// .plc(PlcKind::ControlLogix)
/// .name("MyTag1")
/// .element_size(16)
/// .element_count(1)
/// .path("1,0")
/// .read_cache_ms(0)
/// .build()
/// .unwrap();
/// let tag = RawTag::new(path, timeout).unwrap();
/// let status = tag.status();
/// assert!(status.is_ok());
/// }
///
/// ```
#[derive(Default, Debug)]
pub struct PathBuilder {
protocol: Option<Protocol>,
debug: Option<DebugLevel>,
elem_count: Option<usize>,
elem_size: Option<usize>,
read_cache_ms: Option<usize>,
plc: Option<PlcKind>,
name: Option<String>,
path: Option<String>,
gateway: Option<String>,
use_connected_msg: Option<bool>,
}
impl PathBuilder {
/// generic attribute.
/// defining the current debugging level.
/// please use [`plc::set_debug_level`](../plc/fn.set_debug_level.html) instead.
#[deprecated]
#[inline]
pub fn debug(&mut self, level: DebugLevel) -> &mut Self {
self.debug = Some(level);
self
}
/// generic attribute.
/// Required. Determines the type of the PLC protocol.
#[inline]
pub fn protocol(&mut self, protocol: Protocol) -> &mut Self {
self.protocol = Some(protocol);
self
}
/// generic attribute.
/// Optional. All tags are treated as arrays. Tags that are not arrays are considered to have a length of one element. This attribute determines how many elements are in the tag. Defaults to one (1)
#[inline]
pub fn element_count(&mut self, count: usize) -> &mut Self {
self.elem_count = Some(count);
self
}
/// generic attribute
/// Required for some protocols or PLC types. This attribute determines the size of a single element of the tag. All tags are considered to be arrays, even those with only one entry. Ignored for Modbus and for ControlLogix-class Allen-Bradley PLCs. This parameter will become optional for as many PLC types as possible
#[inline]
pub fn element_size(&mut self, size: usize) -> &mut Self {
self.elem_size = Some(size);
self
}
/// generic attribute:
/// Optional. An integer number of milliseconds to cache read data.
/// Use this attribute to cause the tag read operations to cache data the requested number of milliseconds. This can be used to lower the actual number of requests against the PLC. Example read_cache_ms=100 will result in read operations no more often than once every 100 milliseconds.
#[inline]
pub fn read_cache_ms(&mut self, millis: usize) -> &mut Self {
self.read_cache_ms = Some(millis);
self
}
/// Required for EIP. Determines the type of the PLC
#[inline]
pub fn plc(&mut self, plc: PlcKind) -> &mut Self {
self.plc = Some(plc);
self
}
/// - EIP
/// IP address or host name.
/// This tells the library what host name or IP address to use for the PLC or the gateway to the PLC (in the case that the PLC is remote).
/// - ModBus
/// Required IP address or host name and optional port
/// This tells the library what host name or IP address to use for the PLC. Can have an optional port at the end, e.g. gateway=10.1.2.3:502 where the :502 part specifies the port.
#[inline]
pub fn | (&mut self, gateway: impl AsRef<str>) -> &mut Self {
self.gateway = Some(gateway.as_ref().to_owned());
self
}
/// - EIP
/// This is the full name of the tag. For program tags, prepend Program:<program name>. where <program name> is the name of the program in which the tag is created
/// - ModBus
/// Required the type and first register number of a tag, e.g. co42 for coil 42 (counts from zero).
/// The supported register type prefixes are co for coil, di for discrete inputs, hr for holding registers and ir for input registers. The type prefix must be present and the register number must be greater than or equal to zero and less than or equal to 65535. Modbus examples: co21 - coil 21, di22 - discrete input 22, hr66 - holding register 66, ir64000 - input register 64000.
///
/// you might want to use `register()` instead of `name()` for Modbus
#[inline]
pub fn name(&mut self, name: impl AsRef<str>) -> &mut Self {
self.name = Some(name.as_ref().to_owned());
self
}
/// set register for Modbus
pub fn register(&mut self, reg: Register) -> &mut Self {
self.name = Some(format!("{}", reg));
self
}
/// - EIP
/// AB: CIP path to PLC CPU. I.e. 1,0.
/// This attribute is required for CompactLogix/ControlLogix tags and for tags using a DH+ protocol bridge (i.e. a DHRIO module) to get to a PLC/5, SLC 500, or MicroLogix PLC on a remote DH+ link. The attribute is ignored if it is not a DH+ bridge route, but will generate a warning if debugging is active. Note that Micro800 connections must not have a path attribute.
/// - ModBus
/// Required The server/unit ID. Must be an integer value between 0 and 255.
/// Servers may support more than one unit or may bridge to other units.
#[inline]
pub fn path(&mut self, path: impl AsRef<str>) -> &mut Self {
self.path = Some(path.as_ref().to_owned());
self
}
/// EIP only
/// Optional 1 = use CIP connection, 0 = use UCMM.
/// Control whether to use connected or unconnected messaging. Only valid on Logix-class PLCs. Connected messaging is required on Micro800 and DH+ bridged links. Default is PLC-specific and link-type specific. Generally you do not need to set this.
#[inline]
pub fn use_connected_msg(&mut self, yes: bool) -> &mut Self {
self.use_connected_msg = Some(yes);
self
}
/// check required attributes or conflict attributes
fn check(&self) -> Result<()> {
//check protocol, required
if self.protocol.is_none() {
return Err(anyhow!("protocol required"));
}
let protocol = self.protocol.unwrap();
// check required attributes
match protocol {
Protocol::EIP => {
//TODO: check gateway, either ip or host name
//check plc, required
if self.plc.is_none() {
return Err(anyhow!("plc required"));
}
let plc = self.plc.unwrap();
if plc == PlcKind::ControlLogix {
if self.path.is_none() {
return Err(anyhow!("path required for controllogix"));
}
return Ok(()); //skip check for elem_size
} else if plc == PlcKind::Micro800 {
if self.path.is_some() {
return Err(anyhow!("path must not provided for micro800"));
}
}
if self.elem_size.is_none() {
return Err(anyhow!("element size required"));
}
}
Protocol::ModBus => {
//TODO: check gateway, host with port
if self.gateway.is_none() {
return Err(anyhow!("gateway required"));
}
if self.name.is_none() {
return Err(anyhow!("name required"));
}
//path is number [0-255]
match self.path {
Some(ref path) => {
let _: u8 = path
.parse()
.or(Err(anyhow!("path is a number in range [0-255]")))?;
}
None => return Err(anyhow!("path required")),
}
if self.elem_size.is_none() {
return Err(anyhow!("element size required"));
}
}
}
Ok(())
}
/// build full tag path
pub fn build(&self) -> Result<String> {
self.check()?;
let mut path_buf = vec![];
let protocol = self.protocol.unwrap();
path_buf.push(format!("protocol={}", protocol));
match protocol {
Protocol::EIP => {
if let Some(plc) = self.plc {
path_buf.push(format!("plc={}", plc));
}
if let Some(yes) = self.use_connected_msg {
path_buf.push(format!("use_connected_msg={}", yes as u8));
}
}
Protocol::ModBus => {}
}
if let Some(ref gateway) = self.gateway {
path_buf.push(format!("gateway={}", gateway));
}
if let Some(ref path) = self.path {
path_buf.push(format!("path={}", path));
}
if let Some(ref name) = self.name {
path_buf.push(format!("name={}", name));
}
if let Some(elem_count) = self.elem_count {
path_buf.push(format!("elem_count={}", elem_count));
}
if let Some(elem_size) = self.elem_size {
path_buf.push(format!("elem_size={}", elem_size));
}
if let Some(read_cache_ms) = self.read_cache_ms {
path_buf.push(format!("read_cache_ms={}", read_cache_ms));
}
if let Some(debug) = self.debug {
let level: u8 = debug.into();
path_buf.push(format!("debug={}", level));
}
let buf = path_buf.join("&");
Ok(buf.to_owned())
}
}
/// library supported protocols
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum Protocol {
/// EIP protocol
EIP,
/// Modbus protocol
ModBus,
}
impl fmt::Display for Protocol {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Protocol::EIP => write!(f, "ab-eip"),
Protocol::ModBus => write!(f, "modbus-tcp"),
}
}
}
///modbus supported register
pub enum Register {
///coil registers
Coil(u16),
///discrete inputs
Discrete(u16),
///holding registers
Holding(u16),
///input registers
Input(u16),
}
impl fmt::Display for Register {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Register::Coil(v) => write!(f, "co{}", v),
Register::Discrete(v) => write!(f, "di{}", v),
Register::Holding(v) => write!(f, "hr{}", v),
Register::Input(v) => write!(f, "ir{}", v),
}
}
}
/// plc kind, required for EIP protocol
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum PlcKind {
/// Tell the library that this tag is in a Control Logix-class PLC
ControlLogix,
/// Tell the library that this tag is in a PLC/5 PLC
PLC5,
/// Tell the library that this tag is in a SLC 500 PLC
SLC500,
/// Tell the library that this tag is in a Control Logix-class PLC using the PLC/5 protocol
LogixPCCC,
/// Tell the library that this tag is in a Micro800-class PLC
Micro800,
/// Tell the library that this tag is in a Micrologix PLC
MicroLogix,
}
impl fmt::Display for PlcKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
PlcKind::ControlLogix => write!(f, "controllogix"),
PlcKind::PLC5 => write!(f, "plc5"),
PlcKind::SLC500 => write!(f, "slc500"),
PlcKind::LogixPCCC => write!(f, "logixpccc"),
PlcKind::Micro800 => write!(f, "micro800"),
PlcKind::MicroLogix => write!(f, "micrologix"),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_eip_builder() {
let path = PathBuilder::default()
.protocol(Protocol::EIP)
.gateway("192.168.1.120")
.plc(PlcKind::ControlLogix)
.name("MyTag1")
.element_size(16)
.element_count(1)
.path("1,0")
.read_cache_ms(0)
.build()
.unwrap();
assert_eq!(path, "protocol=ab-eip&plc=controllogix&gateway=192.168.1.120&path=1,0&name=MyTag1&elem_count=1&elem_size=16&read_cache_ms=0");
}
#[test]
fn test_modbus_builder() {
let path = PathBuilder::default()
.protocol(Protocol::ModBus)
.gateway("192.168.1.120:502")
.path("0")
.register(Register::Coil(42))
.element_size(16)
.element_count(1)
.read_cache_ms(0)
.build()
.unwrap();
assert_eq!(path, "protocol=modbus-tcp&gateway=192.168.1.120:502&path=0&name=co42&elem_count=1&elem_size=16&read_cache_ms=0");
}
}
| gateway | identifier_name |
builder.rs | //! builders for tag path and tag
use crate::DebugLevel;
use std::fmt;
pub use anyhow::Result;
/// builder to build tag full path
///
/// # Examples
/// ```rust,ignore
/// use plctag::builder::*;
/// use plctag::RawTag;
///
/// fn main() {
/// let timeout = 100;
/// let path = PathBuilder::default()
/// .protocol(Protocol::EIP)
/// .gateway("192.168.1.120")
/// .plc(PlcKind::ControlLogix)
/// .name("MyTag1")
/// .element_size(16)
/// .element_count(1)
/// .path("1,0")
/// .read_cache_ms(0)
/// .build()
/// .unwrap();
/// let tag = RawTag::new(path, timeout).unwrap();
/// let status = tag.status();
/// assert!(status.is_ok());
/// }
///
/// ```
#[derive(Default, Debug)]
pub struct PathBuilder {
protocol: Option<Protocol>,
debug: Option<DebugLevel>,
elem_count: Option<usize>,
elem_size: Option<usize>,
read_cache_ms: Option<usize>,
plc: Option<PlcKind>,
name: Option<String>,
path: Option<String>,
gateway: Option<String>,
use_connected_msg: Option<bool>,
}
impl PathBuilder {
/// generic attribute.
/// defining the current debugging level.
/// please use [`plc::set_debug_level`](../plc/fn.set_debug_level.html) instead.
#[deprecated]
#[inline]
pub fn debug(&mut self, level: DebugLevel) -> &mut Self {
self.debug = Some(level);
self
}
/// generic attribute.
/// Required. Determines the type of the PLC protocol.
#[inline]
pub fn protocol(&mut self, protocol: Protocol) -> &mut Self {
self.protocol = Some(protocol);
self
}
/// generic attribute.
/// Optional. All tags are treated as arrays. Tags that are not arrays are considered to have a length of one element. This attribute determines how many elements are in the tag. Defaults to one (1)
#[inline]
pub fn element_count(&mut self, count: usize) -> &mut Self {
self.elem_count = Some(count);
self
}
/// generic attribute
/// Required for some protocols or PLC types. This attribute determines the size of a single element of the tag. All tags are considered to be arrays, even those with only one entry. Ignored for Modbus and for ControlLogix-class Allen-Bradley PLCs. This parameter will become optional for as many PLC types as possible
#[inline]
pub fn element_size(&mut self, size: usize) -> &mut Self {
self.elem_size = Some(size);
self
}
/// generic attribute:
/// Optional. An integer number of milliseconds to cache read data.
/// Use this attribute to cause the tag read operations to cache data the requested number of milliseconds. This can be used to lower the actual number of requests against the PLC. Example read_cache_ms=100 will result in read operations no more often than once every 100 milliseconds.
#[inline]
pub fn read_cache_ms(&mut self, millis: usize) -> &mut Self {
self.read_cache_ms = Some(millis);
self
}
/// Required for EIP. Determines the type of the PLC
#[inline]
pub fn plc(&mut self, plc: PlcKind) -> &mut Self {
self.plc = Some(plc);
self
}
/// - EIP
/// IP address or host name.
/// This tells the library what host name or IP address to use for the PLC or the gateway to the PLC (in the case that the PLC is remote).
/// - ModBus
/// Required IP address or host name and optional port
/// This tells the library what host name or IP address to use for the PLC. Can have an optional port at the end, e.g. gateway=10.1.2.3:502 where the :502 part specifies the port.
#[inline]
pub fn gateway(&mut self, gateway: impl AsRef<str>) -> &mut Self {
self.gateway = Some(gateway.as_ref().to_owned());
self
}
/// - EIP
/// This is the full name of the tag. For program tags, prepend Program:<program name>. where <program name> is the name of the program in which the tag is created
/// - ModBus
/// Required the type and first register number of a tag, e.g. co42 for coil 42 (counts from zero).
/// The supported register type prefixes are co for coil, di for discrete inputs, hr for holding registers and ir for input registers. The type prefix must be present and the register number must be greater than or equal to zero and less than or equal to 65535. Modbus examples: co21 - coil 21, di22 - discrete input 22, hr66 - holding register 66, ir64000 - input register 64000.
///
/// you might want to use `register()` instead of `name()` for Modbus
#[inline]
pub fn name(&mut self, name: impl AsRef<str>) -> &mut Self {
self.name = Some(name.as_ref().to_owned());
self
}
/// set register for Modbus
pub fn register(&mut self, reg: Register) -> &mut Self {
self.name = Some(format!("{}", reg));
self
}
/// - EIP
/// AB: CIP path to PLC CPU. I.e. 1,0.
/// This attribute is required for CompactLogix/ControlLogix tags and for tags using a DH+ protocol bridge (i.e. a DHRIO module) to get to a PLC/5, SLC 500, or MicroLogix PLC on a remote DH+ link. The attribute is ignored if it is not a DH+ bridge route, but will generate a warning if debugging is active. Note that Micro800 connections must not have a path attribute.
/// - ModBus
/// Required The server/unit ID. Must be an integer value between 0 and 255.
/// Servers may support more than one unit or may bridge to other units.
#[inline]
pub fn path(&mut self, path: impl AsRef<str>) -> &mut Self {
self.path = Some(path.as_ref().to_owned());
self
}
/// EIP only
/// Optional 1 = use CIP connection, 0 = use UCMM.
/// Control whether to use connected or unconnected messaging. Only valid on Logix-class PLCs. Connected messaging is required on Micro800 and DH+ bridged links. Default is PLC-specific and link-type specific. Generally you do not need to set this.
#[inline]
pub fn use_connected_msg(&mut self, yes: bool) -> &mut Self {
self.use_connected_msg = Some(yes);
self
}
/// check required attributes or conflict attributes
fn check(&self) -> Result<()> {
//check protocol, required
if self.protocol.is_none() {
return Err(anyhow!("protocol required"));
}
let protocol = self.protocol.unwrap();
// check required attributes
match protocol {
Protocol::EIP => {
//TODO: check gateway, either ip or host name
//check plc, required
if self.plc.is_none() {
return Err(anyhow!("plc required"));
}
let plc = self.plc.unwrap();
if plc == PlcKind::ControlLogix {
if self.path.is_none() {
return Err(anyhow!("path required for controllogix"));
}
return Ok(()); //skip check for elem_size
} else if plc == PlcKind::Micro800 {
if self.path.is_some() {
return Err(anyhow!("path must not provided for micro800"));
}
}
if self.elem_size.is_none() {
return Err(anyhow!("element size required"));
}
}
Protocol::ModBus => {
//TODO: check gateway, host with port
if self.gateway.is_none() {
return Err(anyhow!("gateway required"));
}
if self.name.is_none() {
return Err(anyhow!("name required"));
}
//path is number [0-255]
match self.path {
Some(ref path) => {
let _: u8 = path
.parse()
.or(Err(anyhow!("path is a number in range [0-255]")))?;
}
None => return Err(anyhow!("path required")),
}
if self.elem_size.is_none() {
return Err(anyhow!("element size required"));
}
}
}
Ok(())
}
/// build full tag path
pub fn build(&self) -> Result<String> {
self.check()?;
let mut path_buf = vec![];
let protocol = self.protocol.unwrap();
path_buf.push(format!("protocol={}", protocol));
match protocol {
Protocol::EIP => {
if let Some(plc) = self.plc {
path_buf.push(format!("plc={}", plc));
| }
if let Some(yes) = self.use_connected_msg {
path_buf.push(format!("use_connected_msg={}", yes as u8));
}
}
Protocol::ModBus => {}
}
if let Some(ref gateway) = self.gateway {
path_buf.push(format!("gateway={}", gateway));
}
if let Some(ref path) = self.path {
path_buf.push(format!("path={}", path));
}
if let Some(ref name) = self.name {
path_buf.push(format!("name={}", name));
}
if let Some(elem_count) = self.elem_count {
path_buf.push(format!("elem_count={}", elem_count));
}
if let Some(elem_size) = self.elem_size {
path_buf.push(format!("elem_size={}", elem_size));
}
if let Some(read_cache_ms) = self.read_cache_ms {
path_buf.push(format!("read_cache_ms={}", read_cache_ms));
}
if let Some(debug) = self.debug {
let level: u8 = debug.into();
path_buf.push(format!("debug={}", level));
}
let buf = path_buf.join("&");
Ok(buf.to_owned())
}
}
/// library supported protocols
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum Protocol {
/// EIP protocol
EIP,
/// Modbus protocol
ModBus,
}
impl fmt::Display for Protocol {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Protocol::EIP => write!(f, "ab-eip"),
Protocol::ModBus => write!(f, "modbus-tcp"),
}
}
}
///modbus supported register
pub enum Register {
///coil registers
Coil(u16),
///discrete inputs
Discrete(u16),
///holding registers
Holding(u16),
///input registers
Input(u16),
}
impl fmt::Display for Register {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Register::Coil(v) => write!(f, "co{}", v),
Register::Discrete(v) => write!(f, "di{}", v),
Register::Holding(v) => write!(f, "hr{}", v),
Register::Input(v) => write!(f, "ir{}", v),
}
}
}
/// plc kind, required for EIP protocol
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum PlcKind {
/// Tell the library that this tag is in a Control Logix-class PLC
ControlLogix,
/// Tell the library that this tag is in a PLC/5 PLC
PLC5,
/// Tell the library that this tag is in a SLC 500 PLC
SLC500,
/// Tell the library that this tag is in a Control Logix-class PLC using the PLC/5 protocol
LogixPCCC,
/// Tell the library that this tag is in a Micro800-class PLC
Micro800,
/// Tell the library that this tag is in a Micrologix PLC
MicroLogix,
}
impl fmt::Display for PlcKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
PlcKind::ControlLogix => write!(f, "controllogix"),
PlcKind::PLC5 => write!(f, "plc5"),
PlcKind::SLC500 => write!(f, "slc500"),
PlcKind::LogixPCCC => write!(f, "logixpccc"),
PlcKind::Micro800 => write!(f, "micro800"),
PlcKind::MicroLogix => write!(f, "micrologix"),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_eip_builder() {
let path = PathBuilder::default()
.protocol(Protocol::EIP)
.gateway("192.168.1.120")
.plc(PlcKind::ControlLogix)
.name("MyTag1")
.element_size(16)
.element_count(1)
.path("1,0")
.read_cache_ms(0)
.build()
.unwrap();
assert_eq!(path, "protocol=ab-eip&plc=controllogix&gateway=192.168.1.120&path=1,0&name=MyTag1&elem_count=1&elem_size=16&read_cache_ms=0");
}
#[test]
fn test_modbus_builder() {
let path = PathBuilder::default()
.protocol(Protocol::ModBus)
.gateway("192.168.1.120:502")
.path("0")
.register(Register::Coil(42))
.element_size(16)
.element_count(1)
.read_cache_ms(0)
.build()
.unwrap();
assert_eq!(path, "protocol=modbus-tcp&gateway=192.168.1.120:502&path=0&name=co42&elem_count=1&elem_size=16&read_cache_ms=0");
}
} | random_line_split |
|
lib.rs | //! <img src="https://raw.githubusercontent.com/maciejhirsz/logos/master/logos.svg?sanitize=true" alt="Logos logo" width="250" align="right">
//!
//! # Logos
//!
//! This is a `#[derive]` macro crate, [for documentation go to main crate](https://docs.rs/logos).
// The `quote!` macro requires deep recursion.
#![recursion_limit = "196"]
mod generator;
mod error;
mod graph;
mod util;
mod leaf;
use error::Error;
use generator::Generator;
use graph::{Graph, Fork, Rope};
use leaf::Leaf;
use util::{Literal, Definition};
use proc_macro::TokenStream;
use quote::quote;
use syn::{Ident, Fields, ItemEnum, GenericParam, Attribute};
use syn::spanned::Spanned;
enum Mode {
Utf8,
Binary,
}
#[proc_macro_derive(
Logos,
attributes(logos, extras, error, end, token, regex, extras)
)]
pub fn logos(input: TokenStream) -> TokenStream | let span = item.generics.span();
errors.push(Error::new("Logos currently supports permits a single lifetime generic.").span(span));
None
}
};
let mut parse_attr = |attr: &Attribute| -> Result<(), error::SpannedError> {
if attr.path.is_ident("logos") {
if let Some(nested) = util::read_attr("logos", attr)? {
let span = nested.span();
if let Some(ext) = util::value_from_nested("extras", &nested)? {
if extras.replace(ext).is_some() {
return Err(Error::new("Extras can be defined only once.").span(span));
}
}
if let Err(_) = util::value_from_nested("trivia", &nested) {
const ERR: &str = "\
trivia are no longer supported.\n\n\
For help with migration see release notes: https://github.com/maciejhirsz/logos/releases";
return Err(Error::new(ERR).span(span));
}
}
}
if attr.path.is_ident("extras") {
const ERR: &str = "\
#[extras] attribute is deprecated. Use #[logos(extras = Type)] instead.\n\n\
For help with migration see release notes: https://github.com/maciejhirsz/logos/releases";
return Err(Error::new(ERR).span(attr.span()));
}
Ok(())
};
for attr in &item.attrs {
if let Err(err) = parse_attr(attr) {
errors.push(err);
}
}
let mut variants = Vec::new();
let mut ropes = Vec::new();
let mut regex_ids = Vec::new();
let mut graph = Graph::new();
for variant in &item.variants {
variants.push(&variant.ident);
let span = variant.span();
if let Some((_, value)) = &variant.discriminant {
let span = value.span();
let value = util::unpack_int(value).unwrap_or(usize::max_value());
if value >= size {
errors.push(Error::new(
format!(
"Discriminant value for `{}` is invalid. Expected integer in range 0..={}.",
variant.ident,
size,
),
).span(span));
}
}
let field = match &variant.fields {
Fields::Unit => None,
Fields::Unnamed(ref fields) => {
if fields.unnamed.len()!= 1 {
errors.push(Error::new(
format!(
"Logos currently only supports variants with one field, found {}",
fields.unnamed.len(),
)
).span(fields.span()))
}
let field = fields.unnamed.first().expect("Already checked len; qed").ty.clone();
Some(field)
}
Fields::Named(_) => {
errors.push(Error::new("Logos doesn't support named fields yet.").span(span));
None
}
};
for attr in &variant.attrs {
let variant = &variant.ident;
let mut with_definition = |definition: Definition<Literal>| {
if let Literal::Bytes(..) = definition.value {
mode = Mode::Binary;
}
(
Leaf::token(variant).field(field.clone()).callback(definition.callback),
definition.value,
)
};
if attr.path.is_ident("error") {
if let Some(previous) = error.replace(variant) {
errors.extend(vec![
Error::new("Only one #[error] variant can be declared.").span(span),
Error::new("Previously declared #[error]:").span(previous.span()),
]);
}
} else if attr.path.is_ident("end") {
errors.push(
Error::new(
"Since 0.11 Logos no longer requires the #[end] variant.\n\n\
For help with migration see release notes: https://github.com/maciejhirsz/logos/releases"
).span(attr.span())
);
} else if attr.path.is_ident("token") {
match util::value_from_attr("token", attr) {
Ok(Some(definition)) => {
let (token, value) = with_definition(definition);
let value = value.into_bytes();
let then = graph.push(token.priority(value.len()));
ropes.push(Rope::new(value, then));
},
Err(err) => errors.push(err),
_ => (),
}
} else if attr.path.is_ident("regex") {
match util::value_from_attr("regex", attr) {
Ok(Some(definition)) => {
let (token, value) = with_definition(definition);
let then = graph.reserve();
let (utf8, regex, span) = match value {
Literal::Utf8(string, span) => (true, string, span),
Literal::Bytes(bytes, span) => {
mode = Mode::Binary;
(false, util::bytes_to_regex_string(&bytes), span)
}
};
match graph.regex(utf8, ®ex, then.get()) {
Ok((len, mut id)) => {
let then = graph.insert(then, token.priority(len));
regex_ids.push(id);
// Drain recursive miss values.
// We need the root node to have straight branches.
while let Some(miss) = graph[id].miss() {
if miss == then {
errors.push(
Error::new("#[regex]: expression can match empty string.\n\n\
hint: consider changing * to +").span(span)
);
break;
} else {
regex_ids.push(miss);
id = miss;
}
}
},
Err(err) => errors.push(err.span(span)),
}
},
Err(err) => errors.push(err),
_ => (),
}
}
}
}
let mut root = Fork::new();
let extras = match extras {
Some(ext) => quote!(#ext),
None => quote!(()),
};
let source = match mode {
Mode::Utf8 => quote!(str),
Mode::Binary => quote!([u8]),
};
let error_def = match error {
Some(error) => Some(quote!(const ERROR: Self = #name::#error;)),
None => {
errors.push(Error::new("missing #[error] token variant.").span(super_span));
None
},
};
let this = quote!(#name #generics);
let impl_logos = |body| {
quote! {
impl<'s> ::logos::Logos<'s> for #this {
type Extras = #extras;
type Source = #source;
const SIZE: usize = #size;
#error_def
fn lex(lex: &mut ::logos::Lexer<'s, Self>) {
#body
}
}
}
};
if errors.len() > 0 {
return impl_logos(quote! {
fn _logos_derive_compile_errors() {
#(#errors)*
}
}).into()
}
for id in regex_ids {
let fork = graph.fork_off(id);
root.merge(fork, &mut graph);
}
for rope in ropes {
root.merge(rope.into_fork(&mut graph), &mut graph)
}
while let Some(id) = root.miss.take() {
let fork = graph.fork_off(id);
if fork.branches().next().is_some() {
root.merge(fork, &mut graph);
} else {
break;
}
}
let root = graph.push(root);
graph.shake(root);
// panic!("{:#?}\n\n{} nodes", graph, graph.nodes().iter().filter_map(|n| n.as_ref()).count());
let generator = Generator::new(name, &this, root, &graph);
let body = generator.generate();
let tokens = impl_logos(quote! {
use ::logos::internal::{LexerInternal, CallbackResult};
type Lexer<'s> = ::logos::Lexer<'s, #name #generics>;
fn _end<'s>(lex: &mut Lexer<'s>) {
lex.end()
}
fn _error<'s>(lex: &mut Lexer<'s>) {
lex.bump_unchecked(1);
lex.set(#name::#error);
}
#body
});
// panic!("{}", tokens);
TokenStream::from(tokens)
}
| {
let item: ItemEnum = syn::parse(input).expect("#[token] can be only applied to enums");
let super_span = item.span();
let size = item.variants.len();
let name = &item.ident;
let mut extras: Option<Ident> = None;
let mut error = None;
let mut mode = Mode::Utf8;
let mut errors = Vec::new();
let generics = match item.generics.params.len() {
0 => {
None
},
1 if matches!(item.generics.params.first(), Some(GenericParam::Lifetime(..))) => {
Some(quote!(<'s>))
},
_ => { | identifier_body |
lib.rs | //! <img src="https://raw.githubusercontent.com/maciejhirsz/logos/master/logos.svg?sanitize=true" alt="Logos logo" width="250" align="right">
//!
//! # Logos
//!
//! This is a `#[derive]` macro crate, [for documentation go to main crate](https://docs.rs/logos).
// The `quote!` macro requires deep recursion.
#![recursion_limit = "196"]
mod generator;
mod error;
mod graph;
mod util;
mod leaf;
use error::Error;
use generator::Generator;
use graph::{Graph, Fork, Rope};
use leaf::Leaf;
use util::{Literal, Definition};
use proc_macro::TokenStream;
use quote::quote;
use syn::{Ident, Fields, ItemEnum, GenericParam, Attribute};
use syn::spanned::Spanned;
enum Mode {
Utf8,
Binary,
}
#[proc_macro_derive(
Logos,
attributes(logos, extras, error, end, token, regex, extras)
)]
pub fn | (input: TokenStream) -> TokenStream {
let item: ItemEnum = syn::parse(input).expect("#[token] can be only applied to enums");
let super_span = item.span();
let size = item.variants.len();
let name = &item.ident;
let mut extras: Option<Ident> = None;
let mut error = None;
let mut mode = Mode::Utf8;
let mut errors = Vec::new();
let generics = match item.generics.params.len() {
0 => {
None
},
1 if matches!(item.generics.params.first(), Some(GenericParam::Lifetime(..))) => {
Some(quote!(<'s>))
},
_ => {
let span = item.generics.span();
errors.push(Error::new("Logos currently supports permits a single lifetime generic.").span(span));
None
}
};
let mut parse_attr = |attr: &Attribute| -> Result<(), error::SpannedError> {
if attr.path.is_ident("logos") {
if let Some(nested) = util::read_attr("logos", attr)? {
let span = nested.span();
if let Some(ext) = util::value_from_nested("extras", &nested)? {
if extras.replace(ext).is_some() {
return Err(Error::new("Extras can be defined only once.").span(span));
}
}
if let Err(_) = util::value_from_nested("trivia", &nested) {
const ERR: &str = "\
trivia are no longer supported.\n\n\
For help with migration see release notes: https://github.com/maciejhirsz/logos/releases";
return Err(Error::new(ERR).span(span));
}
}
}
if attr.path.is_ident("extras") {
const ERR: &str = "\
#[extras] attribute is deprecated. Use #[logos(extras = Type)] instead.\n\n\
For help with migration see release notes: https://github.com/maciejhirsz/logos/releases";
return Err(Error::new(ERR).span(attr.span()));
}
Ok(())
};
for attr in &item.attrs {
if let Err(err) = parse_attr(attr) {
errors.push(err);
}
}
let mut variants = Vec::new();
let mut ropes = Vec::new();
let mut regex_ids = Vec::new();
let mut graph = Graph::new();
for variant in &item.variants {
variants.push(&variant.ident);
let span = variant.span();
if let Some((_, value)) = &variant.discriminant {
let span = value.span();
let value = util::unpack_int(value).unwrap_or(usize::max_value());
if value >= size {
errors.push(Error::new(
format!(
"Discriminant value for `{}` is invalid. Expected integer in range 0..={}.",
variant.ident,
size,
),
).span(span));
}
}
let field = match &variant.fields {
Fields::Unit => None,
Fields::Unnamed(ref fields) => {
if fields.unnamed.len()!= 1 {
errors.push(Error::new(
format!(
"Logos currently only supports variants with one field, found {}",
fields.unnamed.len(),
)
).span(fields.span()))
}
let field = fields.unnamed.first().expect("Already checked len; qed").ty.clone();
Some(field)
}
Fields::Named(_) => {
errors.push(Error::new("Logos doesn't support named fields yet.").span(span));
None
}
};
for attr in &variant.attrs {
let variant = &variant.ident;
let mut with_definition = |definition: Definition<Literal>| {
if let Literal::Bytes(..) = definition.value {
mode = Mode::Binary;
}
(
Leaf::token(variant).field(field.clone()).callback(definition.callback),
definition.value,
)
};
if attr.path.is_ident("error") {
if let Some(previous) = error.replace(variant) {
errors.extend(vec![
Error::new("Only one #[error] variant can be declared.").span(span),
Error::new("Previously declared #[error]:").span(previous.span()),
]);
}
} else if attr.path.is_ident("end") {
errors.push(
Error::new(
"Since 0.11 Logos no longer requires the #[end] variant.\n\n\
For help with migration see release notes: https://github.com/maciejhirsz/logos/releases"
).span(attr.span())
);
} else if attr.path.is_ident("token") {
match util::value_from_attr("token", attr) {
Ok(Some(definition)) => {
let (token, value) = with_definition(definition);
let value = value.into_bytes();
let then = graph.push(token.priority(value.len()));
ropes.push(Rope::new(value, then));
},
Err(err) => errors.push(err),
_ => (),
}
} else if attr.path.is_ident("regex") {
match util::value_from_attr("regex", attr) {
Ok(Some(definition)) => {
let (token, value) = with_definition(definition);
let then = graph.reserve();
let (utf8, regex, span) = match value {
Literal::Utf8(string, span) => (true, string, span),
Literal::Bytes(bytes, span) => {
mode = Mode::Binary;
(false, util::bytes_to_regex_string(&bytes), span)
}
};
match graph.regex(utf8, ®ex, then.get()) {
Ok((len, mut id)) => {
let then = graph.insert(then, token.priority(len));
regex_ids.push(id);
// Drain recursive miss values.
// We need the root node to have straight branches.
while let Some(miss) = graph[id].miss() {
if miss == then {
errors.push(
Error::new("#[regex]: expression can match empty string.\n\n\
hint: consider changing * to +").span(span)
);
break;
} else {
regex_ids.push(miss);
id = miss;
}
}
},
Err(err) => errors.push(err.span(span)),
}
},
Err(err) => errors.push(err),
_ => (),
}
}
}
}
let mut root = Fork::new();
let extras = match extras {
Some(ext) => quote!(#ext),
None => quote!(()),
};
let source = match mode {
Mode::Utf8 => quote!(str),
Mode::Binary => quote!([u8]),
};
let error_def = match error {
Some(error) => Some(quote!(const ERROR: Self = #name::#error;)),
None => {
errors.push(Error::new("missing #[error] token variant.").span(super_span));
None
},
};
let this = quote!(#name #generics);
let impl_logos = |body| {
quote! {
impl<'s> ::logos::Logos<'s> for #this {
type Extras = #extras;
type Source = #source;
const SIZE: usize = #size;
#error_def
fn lex(lex: &mut ::logos::Lexer<'s, Self>) {
#body
}
}
}
};
if errors.len() > 0 {
return impl_logos(quote! {
fn _logos_derive_compile_errors() {
#(#errors)*
}
}).into()
}
for id in regex_ids {
let fork = graph.fork_off(id);
root.merge(fork, &mut graph);
}
for rope in ropes {
root.merge(rope.into_fork(&mut graph), &mut graph)
}
while let Some(id) = root.miss.take() {
let fork = graph.fork_off(id);
if fork.branches().next().is_some() {
root.merge(fork, &mut graph);
} else {
break;
}
}
let root = graph.push(root);
graph.shake(root);
// panic!("{:#?}\n\n{} nodes", graph, graph.nodes().iter().filter_map(|n| n.as_ref()).count());
let generator = Generator::new(name, &this, root, &graph);
let body = generator.generate();
let tokens = impl_logos(quote! {
use ::logos::internal::{LexerInternal, CallbackResult};
type Lexer<'s> = ::logos::Lexer<'s, #name #generics>;
fn _end<'s>(lex: &mut Lexer<'s>) {
lex.end()
}
fn _error<'s>(lex: &mut Lexer<'s>) {
lex.bump_unchecked(1);
lex.set(#name::#error);
}
#body
});
// panic!("{}", tokens);
TokenStream::from(tokens)
}
| logos | identifier_name |
lib.rs | //! <img src="https://raw.githubusercontent.com/maciejhirsz/logos/master/logos.svg?sanitize=true" alt="Logos logo" width="250" align="right">
//!
//! # Logos
//!
//! This is a `#[derive]` macro crate, [for documentation go to main crate](https://docs.rs/logos).
// The `quote!` macro requires deep recursion.
#![recursion_limit = "196"]
mod generator;
mod error;
mod graph;
mod util;
mod leaf;
use error::Error;
use generator::Generator;
use graph::{Graph, Fork, Rope};
use leaf::Leaf;
use util::{Literal, Definition};
use proc_macro::TokenStream;
use quote::quote;
use syn::{Ident, Fields, ItemEnum, GenericParam, Attribute};
use syn::spanned::Spanned;
enum Mode {
Utf8,
Binary,
}
#[proc_macro_derive(
Logos,
attributes(logos, extras, error, end, token, regex, extras)
)]
pub fn logos(input: TokenStream) -> TokenStream {
let item: ItemEnum = syn::parse(input).expect("#[token] can be only applied to enums");
let super_span = item.span();
let size = item.variants.len();
let name = &item.ident;
let mut extras: Option<Ident> = None;
let mut error = None;
let mut mode = Mode::Utf8;
let mut errors = Vec::new();
let generics = match item.generics.params.len() {
0 => {
None
},
1 if matches!(item.generics.params.first(), Some(GenericParam::Lifetime(..))) => {
Some(quote!(<'s>))
},
_ => {
let span = item.generics.span();
errors.push(Error::new("Logos currently supports permits a single lifetime generic.").span(span));
None
}
};
let mut parse_attr = |attr: &Attribute| -> Result<(), error::SpannedError> {
if attr.path.is_ident("logos") {
if let Some(nested) = util::read_attr("logos", attr)? {
let span = nested.span();
if let Some(ext) = util::value_from_nested("extras", &nested)? {
if extras.replace(ext).is_some() {
return Err(Error::new("Extras can be defined only once.").span(span));
}
}
if let Err(_) = util::value_from_nested("trivia", &nested) {
const ERR: &str = "\
trivia are no longer supported.\n\n\
For help with migration see release notes: https://github.com/maciejhirsz/logos/releases";
return Err(Error::new(ERR).span(span));
}
}
}
if attr.path.is_ident("extras") { | #[extras] attribute is deprecated. Use #[logos(extras = Type)] instead.\n\n\
For help with migration see release notes: https://github.com/maciejhirsz/logos/releases";
return Err(Error::new(ERR).span(attr.span()));
}
Ok(())
};
for attr in &item.attrs {
if let Err(err) = parse_attr(attr) {
errors.push(err);
}
}
let mut variants = Vec::new();
let mut ropes = Vec::new();
let mut regex_ids = Vec::new();
let mut graph = Graph::new();
for variant in &item.variants {
variants.push(&variant.ident);
let span = variant.span();
if let Some((_, value)) = &variant.discriminant {
let span = value.span();
let value = util::unpack_int(value).unwrap_or(usize::max_value());
if value >= size {
errors.push(Error::new(
format!(
"Discriminant value for `{}` is invalid. Expected integer in range 0..={}.",
variant.ident,
size,
),
).span(span));
}
}
let field = match &variant.fields {
Fields::Unit => None,
Fields::Unnamed(ref fields) => {
if fields.unnamed.len()!= 1 {
errors.push(Error::new(
format!(
"Logos currently only supports variants with one field, found {}",
fields.unnamed.len(),
)
).span(fields.span()))
}
let field = fields.unnamed.first().expect("Already checked len; qed").ty.clone();
Some(field)
}
Fields::Named(_) => {
errors.push(Error::new("Logos doesn't support named fields yet.").span(span));
None
}
};
for attr in &variant.attrs {
let variant = &variant.ident;
let mut with_definition = |definition: Definition<Literal>| {
if let Literal::Bytes(..) = definition.value {
mode = Mode::Binary;
}
(
Leaf::token(variant).field(field.clone()).callback(definition.callback),
definition.value,
)
};
if attr.path.is_ident("error") {
if let Some(previous) = error.replace(variant) {
errors.extend(vec![
Error::new("Only one #[error] variant can be declared.").span(span),
Error::new("Previously declared #[error]:").span(previous.span()),
]);
}
} else if attr.path.is_ident("end") {
errors.push(
Error::new(
"Since 0.11 Logos no longer requires the #[end] variant.\n\n\
For help with migration see release notes: https://github.com/maciejhirsz/logos/releases"
).span(attr.span())
);
} else if attr.path.is_ident("token") {
match util::value_from_attr("token", attr) {
Ok(Some(definition)) => {
let (token, value) = with_definition(definition);
let value = value.into_bytes();
let then = graph.push(token.priority(value.len()));
ropes.push(Rope::new(value, then));
},
Err(err) => errors.push(err),
_ => (),
}
} else if attr.path.is_ident("regex") {
match util::value_from_attr("regex", attr) {
Ok(Some(definition)) => {
let (token, value) = with_definition(definition);
let then = graph.reserve();
let (utf8, regex, span) = match value {
Literal::Utf8(string, span) => (true, string, span),
Literal::Bytes(bytes, span) => {
mode = Mode::Binary;
(false, util::bytes_to_regex_string(&bytes), span)
}
};
match graph.regex(utf8, ®ex, then.get()) {
Ok((len, mut id)) => {
let then = graph.insert(then, token.priority(len));
regex_ids.push(id);
// Drain recursive miss values.
// We need the root node to have straight branches.
while let Some(miss) = graph[id].miss() {
if miss == then {
errors.push(
Error::new("#[regex]: expression can match empty string.\n\n\
hint: consider changing * to +").span(span)
);
break;
} else {
regex_ids.push(miss);
id = miss;
}
}
},
Err(err) => errors.push(err.span(span)),
}
},
Err(err) => errors.push(err),
_ => (),
}
}
}
}
let mut root = Fork::new();
let extras = match extras {
Some(ext) => quote!(#ext),
None => quote!(()),
};
let source = match mode {
Mode::Utf8 => quote!(str),
Mode::Binary => quote!([u8]),
};
let error_def = match error {
Some(error) => Some(quote!(const ERROR: Self = #name::#error;)),
None => {
errors.push(Error::new("missing #[error] token variant.").span(super_span));
None
},
};
let this = quote!(#name #generics);
let impl_logos = |body| {
quote! {
impl<'s> ::logos::Logos<'s> for #this {
type Extras = #extras;
type Source = #source;
const SIZE: usize = #size;
#error_def
fn lex(lex: &mut ::logos::Lexer<'s, Self>) {
#body
}
}
}
};
if errors.len() > 0 {
return impl_logos(quote! {
fn _logos_derive_compile_errors() {
#(#errors)*
}
}).into()
}
for id in regex_ids {
let fork = graph.fork_off(id);
root.merge(fork, &mut graph);
}
for rope in ropes {
root.merge(rope.into_fork(&mut graph), &mut graph)
}
while let Some(id) = root.miss.take() {
let fork = graph.fork_off(id);
if fork.branches().next().is_some() {
root.merge(fork, &mut graph);
} else {
break;
}
}
let root = graph.push(root);
graph.shake(root);
// panic!("{:#?}\n\n{} nodes", graph, graph.nodes().iter().filter_map(|n| n.as_ref()).count());
let generator = Generator::new(name, &this, root, &graph);
let body = generator.generate();
let tokens = impl_logos(quote! {
use ::logos::internal::{LexerInternal, CallbackResult};
type Lexer<'s> = ::logos::Lexer<'s, #name #generics>;
fn _end<'s>(lex: &mut Lexer<'s>) {
lex.end()
}
fn _error<'s>(lex: &mut Lexer<'s>) {
lex.bump_unchecked(1);
lex.set(#name::#error);
}
#body
});
// panic!("{}", tokens);
TokenStream::from(tokens)
} | const ERR: &str = "\ | random_line_split |
lib.rs | //! <img src="https://raw.githubusercontent.com/maciejhirsz/logos/master/logos.svg?sanitize=true" alt="Logos logo" width="250" align="right">
//!
//! # Logos
//!
//! This is a `#[derive]` macro crate, [for documentation go to main crate](https://docs.rs/logos).
// The `quote!` macro requires deep recursion.
#![recursion_limit = "196"]
mod generator;
mod error;
mod graph;
mod util;
mod leaf;
use error::Error;
use generator::Generator;
use graph::{Graph, Fork, Rope};
use leaf::Leaf;
use util::{Literal, Definition};
use proc_macro::TokenStream;
use quote::quote;
use syn::{Ident, Fields, ItemEnum, GenericParam, Attribute};
use syn::spanned::Spanned;
enum Mode {
Utf8,
Binary,
}
#[proc_macro_derive(
Logos,
attributes(logos, extras, error, end, token, regex, extras)
)]
pub fn logos(input: TokenStream) -> TokenStream {
let item: ItemEnum = syn::parse(input).expect("#[token] can be only applied to enums");
let super_span = item.span();
let size = item.variants.len();
let name = &item.ident;
let mut extras: Option<Ident> = None;
let mut error = None;
let mut mode = Mode::Utf8;
let mut errors = Vec::new();
let generics = match item.generics.params.len() {
0 => {
None
},
1 if matches!(item.generics.params.first(), Some(GenericParam::Lifetime(..))) => {
Some(quote!(<'s>))
},
_ => {
let span = item.generics.span();
errors.push(Error::new("Logos currently supports permits a single lifetime generic.").span(span));
None
}
};
let mut parse_attr = |attr: &Attribute| -> Result<(), error::SpannedError> {
if attr.path.is_ident("logos") {
if let Some(nested) = util::read_attr("logos", attr)? {
let span = nested.span();
if let Some(ext) = util::value_from_nested("extras", &nested)? {
if extras.replace(ext).is_some() {
return Err(Error::new("Extras can be defined only once.").span(span));
}
}
if let Err(_) = util::value_from_nested("trivia", &nested) {
const ERR: &str = "\
trivia are no longer supported.\n\n\
For help with migration see release notes: https://github.com/maciejhirsz/logos/releases";
return Err(Error::new(ERR).span(span));
}
}
}
if attr.path.is_ident("extras") {
const ERR: &str = "\
#[extras] attribute is deprecated. Use #[logos(extras = Type)] instead.\n\n\
For help with migration see release notes: https://github.com/maciejhirsz/logos/releases";
return Err(Error::new(ERR).span(attr.span()));
}
Ok(())
};
for attr in &item.attrs {
if let Err(err) = parse_attr(attr) {
errors.push(err);
}
}
let mut variants = Vec::new();
let mut ropes = Vec::new();
let mut regex_ids = Vec::new();
let mut graph = Graph::new();
for variant in &item.variants {
variants.push(&variant.ident);
let span = variant.span();
if let Some((_, value)) = &variant.discriminant {
let span = value.span();
let value = util::unpack_int(value).unwrap_or(usize::max_value());
if value >= size {
errors.push(Error::new(
format!(
"Discriminant value for `{}` is invalid. Expected integer in range 0..={}.",
variant.ident,
size,
),
).span(span));
}
}
let field = match &variant.fields {
Fields::Unit => None,
Fields::Unnamed(ref fields) => {
if fields.unnamed.len()!= 1 {
errors.push(Error::new(
format!(
"Logos currently only supports variants with one field, found {}",
fields.unnamed.len(),
)
).span(fields.span()))
}
let field = fields.unnamed.first().expect("Already checked len; qed").ty.clone();
Some(field)
}
Fields::Named(_) => {
errors.push(Error::new("Logos doesn't support named fields yet.").span(span));
None
}
};
for attr in &variant.attrs {
let variant = &variant.ident;
let mut with_definition = |definition: Definition<Literal>| {
if let Literal::Bytes(..) = definition.value {
mode = Mode::Binary;
}
(
Leaf::token(variant).field(field.clone()).callback(definition.callback),
definition.value,
)
};
if attr.path.is_ident("error") {
if let Some(previous) = error.replace(variant) {
errors.extend(vec![
Error::new("Only one #[error] variant can be declared.").span(span),
Error::new("Previously declared #[error]:").span(previous.span()),
]);
}
} else if attr.path.is_ident("end") {
errors.push(
Error::new(
"Since 0.11 Logos no longer requires the #[end] variant.\n\n\
For help with migration see release notes: https://github.com/maciejhirsz/logos/releases"
).span(attr.span())
);
} else if attr.path.is_ident("token") {
match util::value_from_attr("token", attr) {
Ok(Some(definition)) => {
let (token, value) = with_definition(definition);
let value = value.into_bytes();
let then = graph.push(token.priority(value.len()));
ropes.push(Rope::new(value, then));
},
Err(err) => errors.push(err),
_ => (),
}
} else if attr.path.is_ident("regex") {
match util::value_from_attr("regex", attr) {
Ok(Some(definition)) => {
let (token, value) = with_definition(definition);
let then = graph.reserve();
let (utf8, regex, span) = match value {
Literal::Utf8(string, span) => (true, string, span),
Literal::Bytes(bytes, span) => {
mode = Mode::Binary;
(false, util::bytes_to_regex_string(&bytes), span)
}
};
match graph.regex(utf8, ®ex, then.get()) {
Ok((len, mut id)) => {
let then = graph.insert(then, token.priority(len));
regex_ids.push(id);
// Drain recursive miss values.
// We need the root node to have straight branches.
while let Some(miss) = graph[id].miss() {
if miss == then {
errors.push(
Error::new("#[regex]: expression can match empty string.\n\n\
hint: consider changing * to +").span(span)
);
break;
} else {
regex_ids.push(miss);
id = miss;
}
}
},
Err(err) => errors.push(err.span(span)),
}
},
Err(err) => errors.push(err),
_ => (),
}
}
}
}
let mut root = Fork::new();
let extras = match extras {
Some(ext) => quote!(#ext),
None => quote!(()),
};
let source = match mode {
Mode::Utf8 => quote!(str),
Mode::Binary => quote!([u8]),
};
let error_def = match error {
Some(error) => Some(quote!(const ERROR: Self = #name::#error;)),
None => {
errors.push(Error::new("missing #[error] token variant.").span(super_span));
None
},
};
let this = quote!(#name #generics);
let impl_logos = |body| {
quote! {
impl<'s> ::logos::Logos<'s> for #this {
type Extras = #extras;
type Source = #source;
const SIZE: usize = #size;
#error_def
fn lex(lex: &mut ::logos::Lexer<'s, Self>) {
#body
}
}
}
};
if errors.len() > 0 {
return impl_logos(quote! {
fn _logos_derive_compile_errors() {
#(#errors)*
}
}).into()
}
for id in regex_ids {
let fork = graph.fork_off(id);
root.merge(fork, &mut graph);
}
for rope in ropes {
root.merge(rope.into_fork(&mut graph), &mut graph)
}
while let Some(id) = root.miss.take() {
let fork = graph.fork_off(id);
if fork.branches().next().is_some() {
root.merge(fork, &mut graph);
} else |
}
let root = graph.push(root);
graph.shake(root);
// panic!("{:#?}\n\n{} nodes", graph, graph.nodes().iter().filter_map(|n| n.as_ref()).count());
let generator = Generator::new(name, &this, root, &graph);
let body = generator.generate();
let tokens = impl_logos(quote! {
use ::logos::internal::{LexerInternal, CallbackResult};
type Lexer<'s> = ::logos::Lexer<'s, #name #generics>;
fn _end<'s>(lex: &mut Lexer<'s>) {
lex.end()
}
fn _error<'s>(lex: &mut Lexer<'s>) {
lex.bump_unchecked(1);
lex.set(#name::#error);
}
#body
});
// panic!("{}", tokens);
TokenStream::from(tokens)
}
| {
break;
} | conditional_block |
substrate_like.rs | // Copyright 2017, 2021 Parity Technologies
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Codec and layout configuration similar to upstream default substrate one.
use super::{CodecError as Error, NodeCodec as NodeCodecT, *};
use trie_db::node::Value;
/// No extension trie with no hashed value.
pub struct HashedValueNoExt;
/// No extension trie which stores value above a static size
/// as external node.
pub struct HashedValueNoExtThreshold<const C: u32>;
impl TrieLayout for HashedValueNoExt {
const USE_EXTENSION: bool = false;
const ALLOW_EMPTY: bool = false;
const MAX_INLINE_VALUE: Option<u32> = None;
type Hash = RefHasher;
type Codec = ReferenceNodeCodecNoExtMeta<RefHasher>;
}
impl<const C: u32> TrieLayout for HashedValueNoExtThreshold<C> {
const USE_EXTENSION: bool = false;
const ALLOW_EMPTY: bool = false;
const MAX_INLINE_VALUE: Option<u32> = Some(C);
type Hash = RefHasher;
type Codec = ReferenceNodeCodecNoExtMeta<RefHasher>;
}
/// Constants specific to encoding with external value node support.
pub mod trie_constants {
const FIRST_PREFIX: u8 = 0b_00 << 6;
pub const NIBBLE_SIZE_BOUND: usize = u16::max_value() as usize;
pub const LEAF_PREFIX_MASK: u8 = 0b_01 << 6;
pub const BRANCH_WITHOUT_MASK: u8 = 0b_10 << 6;
pub const BRANCH_WITH_MASK: u8 = 0b_11 << 6;
pub const EMPTY_TRIE: u8 = FIRST_PREFIX | (0b_00 << 4);
pub const ALT_HASHING_LEAF_PREFIX_MASK: u8 = FIRST_PREFIX | (0b_1 << 5);
pub const ALT_HASHING_BRANCH_WITH_MASK: u8 = FIRST_PREFIX | (0b_01 << 4);
pub const ESCAPE_COMPACT_HEADER: u8 = EMPTY_TRIE | 0b_00_01;
}
#[derive(Default, Clone)]
pub struct NodeCodec<H>(PhantomData<H>);
impl<H: Hasher> NodeCodec<H> {
fn decode_plan_inner_hashed(data: &[u8]) -> Result<NodePlan, Error> {
let mut input = ByteSliceInput::new(data);
let header = NodeHeader::decode(&mut input)?;
let contains_hash = header.contains_hash_of_value();
let branch_has_value = if let NodeHeader::Branch(has_value, _) = &header {
*has_value
} else {
// alt_hash_branch
true
};
match header {
NodeHeader::Null => Ok(NodePlan::Empty),
NodeHeader::HashedValueBranch(nibble_count) | NodeHeader::Branch(_, nibble_count) => {
let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE!= 0;
// check that the padding is valid (if any)
if padding && nibble_ops::pad_left(data[input.offset])!= 0 {
return Err(CodecError::from("Bad format"))
}
let partial = input.take(
(nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) /
nibble_ops::NIBBLE_PER_BYTE,
)?;
let partial_padding = nibble_ops::number_padding(nibble_count);
let bitmap_range = input.take(BITMAP_LENGTH)?;
let bitmap = Bitmap::decode(&data[bitmap_range])?;
let value = if branch_has_value {
Some(if contains_hash {
ValuePlan::Node(input.take(H::LENGTH)?)
} else {
let count = <Compact<u32>>::decode(&mut input)?.0 as usize;
ValuePlan::Inline(input.take(count)?)
})
} else {
None
};
let mut children = [
None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None,
];
for i in 0..nibble_ops::NIBBLE_LENGTH {
if bitmap.value_at(i) {
let count = <Compact<u32>>::decode(&mut input)?.0 as usize;
let range = input.take(count)?;
children[i] = Some(if count == H::LENGTH {
NodeHandlePlan::Hash(range)
} else {
NodeHandlePlan::Inline(range)
});
}
}
Ok(NodePlan::NibbledBranch {
partial: NibbleSlicePlan::new(partial, partial_padding),
value,
children,
})
},
NodeHeader::HashedValueLeaf(nibble_count) | NodeHeader::Leaf(nibble_count) => {
let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE!= 0;
// check that the padding is valid (if any)
if padding && nibble_ops::pad_left(data[input.offset])!= 0 {
return Err(CodecError::from("Bad format"))
}
let partial = input.take(
(nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) /
nibble_ops::NIBBLE_PER_BYTE,
)?;
let partial_padding = nibble_ops::number_padding(nibble_count);
let value = if contains_hash {
ValuePlan::Node(input.take(H::LENGTH)?)
} else {
let count = <Compact<u32>>::decode(&mut input)?.0 as usize;
ValuePlan::Inline(input.take(count)?)
};
Ok(NodePlan::Leaf {
partial: NibbleSlicePlan::new(partial, partial_padding),
value,
})
},
}
}
}
impl<H> NodeCodecT for NodeCodec<H>
where
H: Hasher,
{
const ESCAPE_HEADER: Option<u8> = Some(trie_constants::ESCAPE_COMPACT_HEADER);
type Error = Error;
type HashOut = H::Out;
fn hashed_null_node() -> <H as Hasher>::Out {
H::hash(<Self as NodeCodecT>::empty_node())
}
fn decode_plan(data: &[u8]) -> Result<NodePlan, Self::Error> {
Self::decode_plan_inner_hashed(data)
}
fn is_empty_node(data: &[u8]) -> bool {
data == <Self as NodeCodecT>::empty_node()
}
fn empty_node() -> &'static [u8] {
&[trie_constants::EMPTY_TRIE]
}
fn leaf_node(partial: impl Iterator<Item = u8>, number_nibble: usize, value: Value) -> Vec<u8> {
let contains_hash = matches!(&value, Value::Node(..));
let mut output = if contains_hash {
partial_from_iterator_encode(partial, number_nibble, NodeKind::HashedValueLeaf)
} else {
partial_from_iterator_encode(partial, number_nibble, NodeKind::Leaf)
};
match value {
Value::Inline(value) => {
Compact(value.len() as u32).encode_to(&mut output);
output.extend_from_slice(value);
},
Value::Node(hash) => {
debug_assert!(hash.len() == H::LENGTH);
output.extend_from_slice(hash);
},
} | fn extension_node(
_partial: impl Iterator<Item = u8>,
_nbnibble: usize,
_child: ChildReference<<H as Hasher>::Out>,
) -> Vec<u8> {
unreachable!("Codec without extension.")
}
fn branch_node(
_children: impl Iterator<Item = impl Borrow<Option<ChildReference<<H as Hasher>::Out>>>>,
_maybe_value: Option<Value>,
) -> Vec<u8> {
unreachable!("Codec without extension.")
}
fn branch_node_nibbled(
partial: impl Iterator<Item = u8>,
number_nibble: usize,
children: impl Iterator<Item = impl Borrow<Option<ChildReference<<H as Hasher>::Out>>>>,
value: Option<Value>,
) -> Vec<u8> {
let contains_hash = matches!(&value, Some(Value::Node(..)));
let mut output = match (&value, contains_hash) {
(&None, _) =>
partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue),
(_, false) =>
partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchWithValue),
(_, true) =>
partial_from_iterator_encode(partial, number_nibble, NodeKind::HashedValueBranch),
};
let bitmap_index = output.len();
let mut bitmap: [u8; BITMAP_LENGTH] = [0; BITMAP_LENGTH];
(0..BITMAP_LENGTH).for_each(|_| output.push(0));
match value {
Some(Value::Inline(value)) => {
Compact(value.len() as u32).encode_to(&mut output);
output.extend_from_slice(value);
},
Some(Value::Node(hash)) => {
debug_assert!(hash.len() == H::LENGTH);
output.extend_from_slice(hash);
},
None => (),
}
Bitmap::encode(
children.map(|maybe_child| match maybe_child.borrow() {
Some(ChildReference::Hash(h)) => {
h.as_ref().encode_to(&mut output);
true
},
&Some(ChildReference::Inline(inline_data, len)) => {
inline_data.as_ref()[..len].encode_to(&mut output);
true
},
None => false,
}),
bitmap.as_mut(),
);
output[bitmap_index..bitmap_index + BITMAP_LENGTH]
.copy_from_slice(&bitmap[..BITMAP_LENGTH]);
output
}
}
// utils
/// Encode and allocate node type header (type and size), and partial value.
/// It uses an iterator over encoded partial bytes as input.
fn partial_from_iterator_encode<I: Iterator<Item = u8>>(
partial: I,
nibble_count: usize,
node_kind: NodeKind,
) -> Vec<u8> {
let nibble_count = std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibble_count);
let mut output = Vec::with_capacity(4 + (nibble_count / nibble_ops::NIBBLE_PER_BYTE));
match node_kind {
NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output),
NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output),
NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output),
NodeKind::HashedValueLeaf =>
NodeHeader::HashedValueLeaf(nibble_count).encode_to(&mut output),
NodeKind::HashedValueBranch =>
NodeHeader::HashedValueBranch(nibble_count).encode_to(&mut output),
};
output.extend(partial);
output
}
/// A node header.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub(crate) enum NodeHeader {
Null,
// contains wether there is a value and nibble count
Branch(bool, usize),
// contains nibble count
Leaf(usize),
// contains nibble count.
HashedValueBranch(usize),
// contains nibble count.
HashedValueLeaf(usize),
}
impl NodeHeader {
fn contains_hash_of_value(&self) -> bool {
match self {
NodeHeader::HashedValueBranch(_) | NodeHeader::HashedValueLeaf(_) => true,
_ => false,
}
}
}
/// NodeHeader without content
pub(crate) enum NodeKind {
Leaf,
BranchNoValue,
BranchWithValue,
HashedValueLeaf,
HashedValueBranch,
}
impl Encode for NodeHeader {
fn encode_to<T: Output +?Sized>(&self, output: &mut T) {
match self {
NodeHeader::Null => output.push_byte(trie_constants::EMPTY_TRIE),
NodeHeader::Branch(true, nibble_count) =>
encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITH_MASK, 2, output),
NodeHeader::Branch(false, nibble_count) => encode_size_and_prefix(
*nibble_count,
trie_constants::BRANCH_WITHOUT_MASK,
2,
output,
),
NodeHeader::Leaf(nibble_count) =>
encode_size_and_prefix(*nibble_count, trie_constants::LEAF_PREFIX_MASK, 2, output),
NodeHeader::HashedValueBranch(nibble_count) => encode_size_and_prefix(
*nibble_count,
trie_constants::ALT_HASHING_BRANCH_WITH_MASK,
4,
output,
),
NodeHeader::HashedValueLeaf(nibble_count) => encode_size_and_prefix(
*nibble_count,
trie_constants::ALT_HASHING_LEAF_PREFIX_MASK,
3,
output,
),
}
}
}
impl parity_scale_codec::EncodeLike for NodeHeader {}
impl Decode for NodeHeader {
fn decode<I: Input>(input: &mut I) -> Result<Self, Error> {
let i = input.read_byte()?;
if i == trie_constants::EMPTY_TRIE {
return Ok(NodeHeader::Null)
}
match i & (0b11 << 6) {
trie_constants::LEAF_PREFIX_MASK => Ok(NodeHeader::Leaf(decode_size(i, input, 2)?)),
trie_constants::BRANCH_WITH_MASK =>
Ok(NodeHeader::Branch(true, decode_size(i, input, 2)?)),
trie_constants::BRANCH_WITHOUT_MASK =>
Ok(NodeHeader::Branch(false, decode_size(i, input, 2)?)),
trie_constants::EMPTY_TRIE => {
if i & (0b111 << 5) == trie_constants::ALT_HASHING_LEAF_PREFIX_MASK {
Ok(NodeHeader::HashedValueLeaf(decode_size(i, input, 3)?))
} else if i & (0b1111 << 4) == trie_constants::ALT_HASHING_BRANCH_WITH_MASK {
Ok(NodeHeader::HashedValueBranch(decode_size(i, input, 4)?))
} else {
// do not allow any special encoding
Err("Unallowed encoding".into())
}
},
_ => unreachable!(),
}
}
}
/// Returns an iterator over encoded bytes for node header and size.
/// Size encoding allows unlimited, length inefficient, representation, but
/// is bounded to 16 bit maximum value to avoid possible DOS.
pub(crate) fn size_and_prefix_iterator(
size: usize,
prefix: u8,
prefix_mask: usize,
) -> impl Iterator<Item = u8> {
let size = std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, size);
let max_value = 255u8 >> prefix_mask;
let l1 = std::cmp::min(max_value as usize - 1, size);
let (first_byte, mut rem) = if size == l1 {
(once(prefix + l1 as u8), 0)
} else {
(once(prefix + max_value as u8), size - l1)
};
let next_bytes = move || {
if rem > 0 {
if rem < 256 {
let result = rem - 1;
rem = 0;
Some(result as u8)
} else {
rem = rem.saturating_sub(255);
Some(255)
}
} else {
None
}
};
first_byte.chain(std::iter::from_fn(next_bytes))
}
/// Encodes size and prefix to a stream output (prefix on 2 first bit only).
fn encode_size_and_prefix<W>(size: usize, prefix: u8, prefix_mask: usize, out: &mut W)
where
W: Output +?Sized,
{
for b in size_and_prefix_iterator(size, prefix, prefix_mask) {
out.push_byte(b)
}
}
/// Decode size only from stream input and header byte.
fn decode_size(first: u8, input: &mut impl Input, prefix_mask: usize) -> Result<usize, Error> {
let max_value = 255u8 >> prefix_mask;
let mut result = (first & max_value) as usize;
if result < max_value as usize {
return Ok(result)
}
result -= 1;
while result <= trie_constants::NIBBLE_SIZE_BOUND {
let n = input.read_byte()? as usize;
if n < 255 {
return Ok(result + n + 1)
}
result += 255;
}
Ok(trie_constants::NIBBLE_SIZE_BOUND)
}
/// Reference implementation of a `TrieStream` without extension.
#[derive(Default, Clone)]
pub struct ReferenceTrieStreamNoExt {
/// Current node buffer.
buffer: Vec<u8>,
}
/// Create a leaf/branch node, encoding a number of nibbles.
fn fuse_nibbles_node<'a>(nibbles: &'a [u8], kind: NodeKind) -> impl Iterator<Item = u8> + 'a {
let size = std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibbles.len());
let iter_start = match kind {
NodeKind::Leaf => size_and_prefix_iterator(size, trie_constants::LEAF_PREFIX_MASK, 2),
NodeKind::BranchNoValue =>
size_and_prefix_iterator(size, trie_constants::BRANCH_WITHOUT_MASK, 2),
NodeKind::BranchWithValue =>
size_and_prefix_iterator(size, trie_constants::BRANCH_WITH_MASK, 2),
NodeKind::HashedValueLeaf =>
size_and_prefix_iterator(size, trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, 3),
NodeKind::HashedValueBranch =>
size_and_prefix_iterator(size, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, 4),
};
iter_start
.chain(if nibbles.len() % 2 == 1 { Some(nibbles[0]) } else { None })
.chain(nibbles[nibbles.len() % 2..].chunks(2).map(|ch| ch[0] << 4 | ch[1]))
}
use trie_root::Value as TrieStreamValue;
impl TrieStream for ReferenceTrieStreamNoExt {
fn new() -> Self {
Self { buffer: Vec::new() }
}
fn append_empty_data(&mut self) {
self.buffer.push(trie_constants::EMPTY_TRIE);
}
fn append_leaf(&mut self, key: &[u8], value: TrieStreamValue) {
let kind = match &value {
TrieStreamValue::Inline(..) => NodeKind::Leaf,
TrieStreamValue::Node(..) => NodeKind::HashedValueLeaf,
};
self.buffer.extend(fuse_nibbles_node(key, kind));
match &value {
TrieStreamValue::Inline(value) => {
Compact(value.len() as u32).encode_to(&mut self.buffer);
self.buffer.extend_from_slice(value);
},
TrieStreamValue::Node(hash) => {
self.buffer.extend_from_slice(hash.as_slice());
},
};
}
fn begin_branch(
&mut self,
maybe_partial: Option<&[u8]>,
maybe_value: Option<TrieStreamValue>,
has_children: impl Iterator<Item = bool>,
) {
if let Some(partial) = maybe_partial {
let kind = match &maybe_value {
None => NodeKind::BranchNoValue,
Some(TrieStreamValue::Inline(..)) => NodeKind::BranchWithValue,
Some(TrieStreamValue::Node(..)) => NodeKind::HashedValueBranch,
};
self.buffer.extend(fuse_nibbles_node(partial, kind));
let bm = branch_node_bit_mask(has_children);
self.buffer.extend([bm.0, bm.1].iter());
} else {
unreachable!("trie stream codec only for no extension trie");
}
match maybe_value {
None => (),
Some(TrieStreamValue::Inline(value)) => {
Compact(value.len() as u32).encode_to(&mut self.buffer);
self.buffer.extend_from_slice(value);
},
Some(TrieStreamValue::Node(hash)) => {
self.buffer.extend_from_slice(hash.as_slice());
},
}
}
fn append_extension(&mut self, _key: &[u8]) {
unreachable!("trie stream codec only for no extension trie");
}
fn append_substream<H: Hasher>(&mut self, other: Self) {
let data = other.out();
match data.len() {
0..=31 => data.encode_to(&mut self.buffer),
_ => H::hash(&data).as_ref().encode_to(&mut self.buffer),
}
}
fn out(self) -> Vec<u8> {
self.buffer
}
} | output
}
| random_line_split |
substrate_like.rs | // Copyright 2017, 2021 Parity Technologies
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Codec and layout configuration similar to upstream default substrate one.
use super::{CodecError as Error, NodeCodec as NodeCodecT, *};
use trie_db::node::Value;
/// No extension trie with no hashed value.
pub struct HashedValueNoExt;
/// No extension trie which stores value above a static size
/// as external node.
pub struct HashedValueNoExtThreshold<const C: u32>;
impl TrieLayout for HashedValueNoExt {
const USE_EXTENSION: bool = false;
const ALLOW_EMPTY: bool = false;
const MAX_INLINE_VALUE: Option<u32> = None;
type Hash = RefHasher;
type Codec = ReferenceNodeCodecNoExtMeta<RefHasher>;
}
impl<const C: u32> TrieLayout for HashedValueNoExtThreshold<C> {
const USE_EXTENSION: bool = false;
const ALLOW_EMPTY: bool = false;
const MAX_INLINE_VALUE: Option<u32> = Some(C);
type Hash = RefHasher;
type Codec = ReferenceNodeCodecNoExtMeta<RefHasher>;
}
/// Constants specific to encoding with external value node support.
pub mod trie_constants {
const FIRST_PREFIX: u8 = 0b_00 << 6;
pub const NIBBLE_SIZE_BOUND: usize = u16::max_value() as usize;
pub const LEAF_PREFIX_MASK: u8 = 0b_01 << 6;
pub const BRANCH_WITHOUT_MASK: u8 = 0b_10 << 6;
pub const BRANCH_WITH_MASK: u8 = 0b_11 << 6;
pub const EMPTY_TRIE: u8 = FIRST_PREFIX | (0b_00 << 4);
pub const ALT_HASHING_LEAF_PREFIX_MASK: u8 = FIRST_PREFIX | (0b_1 << 5);
pub const ALT_HASHING_BRANCH_WITH_MASK: u8 = FIRST_PREFIX | (0b_01 << 4);
pub const ESCAPE_COMPACT_HEADER: u8 = EMPTY_TRIE | 0b_00_01;
}
#[derive(Default, Clone)]
pub struct NodeCodec<H>(PhantomData<H>);
impl<H: Hasher> NodeCodec<H> {
fn decode_plan_inner_hashed(data: &[u8]) -> Result<NodePlan, Error> {
let mut input = ByteSliceInput::new(data);
let header = NodeHeader::decode(&mut input)?;
let contains_hash = header.contains_hash_of_value();
let branch_has_value = if let NodeHeader::Branch(has_value, _) = &header {
*has_value
} else {
// alt_hash_branch
true
};
match header {
NodeHeader::Null => Ok(NodePlan::Empty),
NodeHeader::HashedValueBranch(nibble_count) | NodeHeader::Branch(_, nibble_count) => {
let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE!= 0;
// check that the padding is valid (if any)
if padding && nibble_ops::pad_left(data[input.offset])!= 0 {
return Err(CodecError::from("Bad format"))
}
let partial = input.take(
(nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) /
nibble_ops::NIBBLE_PER_BYTE,
)?;
let partial_padding = nibble_ops::number_padding(nibble_count);
let bitmap_range = input.take(BITMAP_LENGTH)?;
let bitmap = Bitmap::decode(&data[bitmap_range])?;
let value = if branch_has_value {
Some(if contains_hash {
ValuePlan::Node(input.take(H::LENGTH)?)
} else {
let count = <Compact<u32>>::decode(&mut input)?.0 as usize;
ValuePlan::Inline(input.take(count)?)
})
} else {
None
};
let mut children = [
None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None,
];
for i in 0..nibble_ops::NIBBLE_LENGTH {
if bitmap.value_at(i) {
let count = <Compact<u32>>::decode(&mut input)?.0 as usize;
let range = input.take(count)?;
children[i] = Some(if count == H::LENGTH {
NodeHandlePlan::Hash(range)
} else {
NodeHandlePlan::Inline(range)
});
}
}
Ok(NodePlan::NibbledBranch {
partial: NibbleSlicePlan::new(partial, partial_padding),
value,
children,
})
},
NodeHeader::HashedValueLeaf(nibble_count) | NodeHeader::Leaf(nibble_count) => | value,
})
}
,
}
}
}
impl<H> NodeCodecT for NodeCodec<H>
where
H: Hasher,
{
const ESCAPE_HEADER: Option<u8> = Some(trie_constants::ESCAPE_COMPACT_HEADER);
type Error = Error;
type HashOut = H::Out;
fn hashed_null_node() -> <H as Hasher>::Out {
H::hash(<Self as NodeCodecT>::empty_node())
}
fn decode_plan(data: &[u8]) -> Result<NodePlan, Self::Error> {
Self::decode_plan_inner_hashed(data)
}
fn is_empty_node(data: &[u8]) -> bool {
data == <Self as NodeCodecT>::empty_node()
}
fn empty_node() -> &'static [u8] {
&[trie_constants::EMPTY_TRIE]
}
fn leaf_node(partial: impl Iterator<Item = u8>, number_nibble: usize, value: Value) -> Vec<u8> {
let contains_hash = matches!(&value, Value::Node(..));
let mut output = if contains_hash {
partial_from_iterator_encode(partial, number_nibble, NodeKind::HashedValueLeaf)
} else {
partial_from_iterator_encode(partial, number_nibble, NodeKind::Leaf)
};
match value {
Value::Inline(value) => {
Compact(value.len() as u32).encode_to(&mut output);
output.extend_from_slice(value);
},
Value::Node(hash) => {
debug_assert!(hash.len() == H::LENGTH);
output.extend_from_slice(hash);
},
}
output
}
fn extension_node(
_partial: impl Iterator<Item = u8>,
_nbnibble: usize,
_child: ChildReference<<H as Hasher>::Out>,
) -> Vec<u8> {
unreachable!("Codec without extension.")
}
fn branch_node(
_children: impl Iterator<Item = impl Borrow<Option<ChildReference<<H as Hasher>::Out>>>>,
_maybe_value: Option<Value>,
) -> Vec<u8> {
unreachable!("Codec without extension.")
}
fn branch_node_nibbled(
partial: impl Iterator<Item = u8>,
number_nibble: usize,
children: impl Iterator<Item = impl Borrow<Option<ChildReference<<H as Hasher>::Out>>>>,
value: Option<Value>,
) -> Vec<u8> {
let contains_hash = matches!(&value, Some(Value::Node(..)));
let mut output = match (&value, contains_hash) {
(&None, _) =>
partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue),
(_, false) =>
partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchWithValue),
(_, true) =>
partial_from_iterator_encode(partial, number_nibble, NodeKind::HashedValueBranch),
};
let bitmap_index = output.len();
let mut bitmap: [u8; BITMAP_LENGTH] = [0; BITMAP_LENGTH];
(0..BITMAP_LENGTH).for_each(|_| output.push(0));
match value {
Some(Value::Inline(value)) => {
Compact(value.len() as u32).encode_to(&mut output);
output.extend_from_slice(value);
},
Some(Value::Node(hash)) => {
debug_assert!(hash.len() == H::LENGTH);
output.extend_from_slice(hash);
},
None => (),
}
Bitmap::encode(
children.map(|maybe_child| match maybe_child.borrow() {
Some(ChildReference::Hash(h)) => {
h.as_ref().encode_to(&mut output);
true
},
&Some(ChildReference::Inline(inline_data, len)) => {
inline_data.as_ref()[..len].encode_to(&mut output);
true
},
None => false,
}),
bitmap.as_mut(),
);
output[bitmap_index..bitmap_index + BITMAP_LENGTH]
.copy_from_slice(&bitmap[..BITMAP_LENGTH]);
output
}
}
// utils
/// Encode and allocate node type header (type and size), and partial value.
/// It uses an iterator over encoded partial bytes as input.
fn partial_from_iterator_encode<I: Iterator<Item = u8>>(
partial: I,
nibble_count: usize,
node_kind: NodeKind,
) -> Vec<u8> {
let nibble_count = std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibble_count);
let mut output = Vec::with_capacity(4 + (nibble_count / nibble_ops::NIBBLE_PER_BYTE));
match node_kind {
NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output),
NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output),
NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output),
NodeKind::HashedValueLeaf =>
NodeHeader::HashedValueLeaf(nibble_count).encode_to(&mut output),
NodeKind::HashedValueBranch =>
NodeHeader::HashedValueBranch(nibble_count).encode_to(&mut output),
};
output.extend(partial);
output
}
/// A node header.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub(crate) enum NodeHeader {
Null,
// contains wether there is a value and nibble count
Branch(bool, usize),
// contains nibble count
Leaf(usize),
// contains nibble count.
HashedValueBranch(usize),
// contains nibble count.
HashedValueLeaf(usize),
}
impl NodeHeader {
fn contains_hash_of_value(&self) -> bool {
match self {
NodeHeader::HashedValueBranch(_) | NodeHeader::HashedValueLeaf(_) => true,
_ => false,
}
}
}
/// NodeHeader without content
pub(crate) enum NodeKind {
Leaf,
BranchNoValue,
BranchWithValue,
HashedValueLeaf,
HashedValueBranch,
}
impl Encode for NodeHeader {
fn encode_to<T: Output +?Sized>(&self, output: &mut T) {
match self {
NodeHeader::Null => output.push_byte(trie_constants::EMPTY_TRIE),
NodeHeader::Branch(true, nibble_count) =>
encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITH_MASK, 2, output),
NodeHeader::Branch(false, nibble_count) => encode_size_and_prefix(
*nibble_count,
trie_constants::BRANCH_WITHOUT_MASK,
2,
output,
),
NodeHeader::Leaf(nibble_count) =>
encode_size_and_prefix(*nibble_count, trie_constants::LEAF_PREFIX_MASK, 2, output),
NodeHeader::HashedValueBranch(nibble_count) => encode_size_and_prefix(
*nibble_count,
trie_constants::ALT_HASHING_BRANCH_WITH_MASK,
4,
output,
),
NodeHeader::HashedValueLeaf(nibble_count) => encode_size_and_prefix(
*nibble_count,
trie_constants::ALT_HASHING_LEAF_PREFIX_MASK,
3,
output,
),
}
}
}
impl parity_scale_codec::EncodeLike for NodeHeader {}
impl Decode for NodeHeader {
fn decode<I: Input>(input: &mut I) -> Result<Self, Error> {
let i = input.read_byte()?;
if i == trie_constants::EMPTY_TRIE {
return Ok(NodeHeader::Null)
}
match i & (0b11 << 6) {
trie_constants::LEAF_PREFIX_MASK => Ok(NodeHeader::Leaf(decode_size(i, input, 2)?)),
trie_constants::BRANCH_WITH_MASK =>
Ok(NodeHeader::Branch(true, decode_size(i, input, 2)?)),
trie_constants::BRANCH_WITHOUT_MASK =>
Ok(NodeHeader::Branch(false, decode_size(i, input, 2)?)),
trie_constants::EMPTY_TRIE => {
if i & (0b111 << 5) == trie_constants::ALT_HASHING_LEAF_PREFIX_MASK {
Ok(NodeHeader::HashedValueLeaf(decode_size(i, input, 3)?))
} else if i & (0b1111 << 4) == trie_constants::ALT_HASHING_BRANCH_WITH_MASK {
Ok(NodeHeader::HashedValueBranch(decode_size(i, input, 4)?))
} else {
// do not allow any special encoding
Err("Unallowed encoding".into())
}
},
_ => unreachable!(),
}
}
}
/// Returns an iterator over encoded bytes for node header and size.
/// Size encoding allows unlimited, length inefficient, representation, but
/// is bounded to 16 bit maximum value to avoid possible DOS.
pub(crate) fn size_and_prefix_iterator(
size: usize,
prefix: u8,
prefix_mask: usize,
) -> impl Iterator<Item = u8> {
let size = std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, size);
let max_value = 255u8 >> prefix_mask;
let l1 = std::cmp::min(max_value as usize - 1, size);
let (first_byte, mut rem) = if size == l1 {
(once(prefix + l1 as u8), 0)
} else {
(once(prefix + max_value as u8), size - l1)
};
let next_bytes = move || {
if rem > 0 {
if rem < 256 {
let result = rem - 1;
rem = 0;
Some(result as u8)
} else {
rem = rem.saturating_sub(255);
Some(255)
}
} else {
None
}
};
first_byte.chain(std::iter::from_fn(next_bytes))
}
/// Encodes size and prefix to a stream output (prefix on 2 first bit only).
fn encode_size_and_prefix<W>(size: usize, prefix: u8, prefix_mask: usize, out: &mut W)
where
W: Output +?Sized,
{
for b in size_and_prefix_iterator(size, prefix, prefix_mask) {
out.push_byte(b)
}
}
/// Decode size only from stream input and header byte.
fn decode_size(first: u8, input: &mut impl Input, prefix_mask: usize) -> Result<usize, Error> {
let max_value = 255u8 >> prefix_mask;
let mut result = (first & max_value) as usize;
if result < max_value as usize {
return Ok(result)
}
result -= 1;
while result <= trie_constants::NIBBLE_SIZE_BOUND {
let n = input.read_byte()? as usize;
if n < 255 {
return Ok(result + n + 1)
}
result += 255;
}
Ok(trie_constants::NIBBLE_SIZE_BOUND)
}
/// Reference implementation of a `TrieStream` without extension.
#[derive(Default, Clone)]
pub struct ReferenceTrieStreamNoExt {
/// Current node buffer.
buffer: Vec<u8>,
}
/// Create a leaf/branch node, encoding a number of nibbles.
fn fuse_nibbles_node<'a>(nibbles: &'a [u8], kind: NodeKind) -> impl Iterator<Item = u8> + 'a {
let size = std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibbles.len());
let iter_start = match kind {
NodeKind::Leaf => size_and_prefix_iterator(size, trie_constants::LEAF_PREFIX_MASK, 2),
NodeKind::BranchNoValue =>
size_and_prefix_iterator(size, trie_constants::BRANCH_WITHOUT_MASK, 2),
NodeKind::BranchWithValue =>
size_and_prefix_iterator(size, trie_constants::BRANCH_WITH_MASK, 2),
NodeKind::HashedValueLeaf =>
size_and_prefix_iterator(size, trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, 3),
NodeKind::HashedValueBranch =>
size_and_prefix_iterator(size, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, 4),
};
iter_start
.chain(if nibbles.len() % 2 == 1 { Some(nibbles[0]) } else { None })
.chain(nibbles[nibbles.len() % 2..].chunks(2).map(|ch| ch[0] << 4 | ch[1]))
}
use trie_root::Value as TrieStreamValue;
impl TrieStream for ReferenceTrieStreamNoExt {
fn new() -> Self {
Self { buffer: Vec::new() }
}
fn append_empty_data(&mut self) {
self.buffer.push(trie_constants::EMPTY_TRIE);
}
fn append_leaf(&mut self, key: &[u8], value: TrieStreamValue) {
let kind = match &value {
TrieStreamValue::Inline(..) => NodeKind::Leaf,
TrieStreamValue::Node(..) => NodeKind::HashedValueLeaf,
};
self.buffer.extend(fuse_nibbles_node(key, kind));
match &value {
TrieStreamValue::Inline(value) => {
Compact(value.len() as u32).encode_to(&mut self.buffer);
self.buffer.extend_from_slice(value);
},
TrieStreamValue::Node(hash) => {
self.buffer.extend_from_slice(hash.as_slice());
},
};
}
fn begin_branch(
&mut self,
maybe_partial: Option<&[u8]>,
maybe_value: Option<TrieStreamValue>,
has_children: impl Iterator<Item = bool>,
) {
if let Some(partial) = maybe_partial {
let kind = match &maybe_value {
None => NodeKind::BranchNoValue,
Some(TrieStreamValue::Inline(..)) => NodeKind::BranchWithValue,
Some(TrieStreamValue::Node(..)) => NodeKind::HashedValueBranch,
};
self.buffer.extend(fuse_nibbles_node(partial, kind));
let bm = branch_node_bit_mask(has_children);
self.buffer.extend([bm.0, bm.1].iter());
} else {
unreachable!("trie stream codec only for no extension trie");
}
match maybe_value {
None => (),
Some(TrieStreamValue::Inline(value)) => {
Compact(value.len() as u32).encode_to(&mut self.buffer);
self.buffer.extend_from_slice(value);
},
Some(TrieStreamValue::Node(hash)) => {
self.buffer.extend_from_slice(hash.as_slice());
},
}
}
fn append_extension(&mut self, _key: &[u8]) {
unreachable!("trie stream codec only for no extension trie");
}
fn append_substream<H: Hasher>(&mut self, other: Self) {
let data = other.out();
match data.len() {
0..=31 => data.encode_to(&mut self.buffer),
_ => H::hash(&data).as_ref().encode_to(&mut self.buffer),
}
}
fn out(self) -> Vec<u8> {
self.buffer
}
}
| {
let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0;
// check that the padding is valid (if any)
if padding && nibble_ops::pad_left(data[input.offset]) != 0 {
return Err(CodecError::from("Bad format"))
}
let partial = input.take(
(nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) /
nibble_ops::NIBBLE_PER_BYTE,
)?;
let partial_padding = nibble_ops::number_padding(nibble_count);
let value = if contains_hash {
ValuePlan::Node(input.take(H::LENGTH)?)
} else {
let count = <Compact<u32>>::decode(&mut input)?.0 as usize;
ValuePlan::Inline(input.take(count)?)
};
Ok(NodePlan::Leaf {
partial: NibbleSlicePlan::new(partial, partial_padding), | conditional_block |
substrate_like.rs | // Copyright 2017, 2021 Parity Technologies
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Codec and layout configuration similar to upstream default substrate one.
use super::{CodecError as Error, NodeCodec as NodeCodecT, *};
use trie_db::node::Value;
/// No extension trie with no hashed value.
pub struct HashedValueNoExt;
/// No extension trie which stores value above a static size
/// as external node.
pub struct HashedValueNoExtThreshold<const C: u32>;
impl TrieLayout for HashedValueNoExt {
const USE_EXTENSION: bool = false;
const ALLOW_EMPTY: bool = false;
const MAX_INLINE_VALUE: Option<u32> = None;
type Hash = RefHasher;
type Codec = ReferenceNodeCodecNoExtMeta<RefHasher>;
}
impl<const C: u32> TrieLayout for HashedValueNoExtThreshold<C> {
const USE_EXTENSION: bool = false;
const ALLOW_EMPTY: bool = false;
const MAX_INLINE_VALUE: Option<u32> = Some(C);
type Hash = RefHasher;
type Codec = ReferenceNodeCodecNoExtMeta<RefHasher>;
}
/// Constants specific to encoding with external value node support.
pub mod trie_constants {
const FIRST_PREFIX: u8 = 0b_00 << 6;
pub const NIBBLE_SIZE_BOUND: usize = u16::max_value() as usize;
pub const LEAF_PREFIX_MASK: u8 = 0b_01 << 6;
pub const BRANCH_WITHOUT_MASK: u8 = 0b_10 << 6;
pub const BRANCH_WITH_MASK: u8 = 0b_11 << 6;
pub const EMPTY_TRIE: u8 = FIRST_PREFIX | (0b_00 << 4);
pub const ALT_HASHING_LEAF_PREFIX_MASK: u8 = FIRST_PREFIX | (0b_1 << 5);
pub const ALT_HASHING_BRANCH_WITH_MASK: u8 = FIRST_PREFIX | (0b_01 << 4);
pub const ESCAPE_COMPACT_HEADER: u8 = EMPTY_TRIE | 0b_00_01;
}
#[derive(Default, Clone)]
pub struct NodeCodec<H>(PhantomData<H>);
impl<H: Hasher> NodeCodec<H> {
fn decode_plan_inner_hashed(data: &[u8]) -> Result<NodePlan, Error> {
let mut input = ByteSliceInput::new(data);
let header = NodeHeader::decode(&mut input)?;
let contains_hash = header.contains_hash_of_value();
let branch_has_value = if let NodeHeader::Branch(has_value, _) = &header {
*has_value
} else {
// alt_hash_branch
true
};
match header {
NodeHeader::Null => Ok(NodePlan::Empty),
NodeHeader::HashedValueBranch(nibble_count) | NodeHeader::Branch(_, nibble_count) => {
let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE!= 0;
// check that the padding is valid (if any)
if padding && nibble_ops::pad_left(data[input.offset])!= 0 {
return Err(CodecError::from("Bad format"))
}
let partial = input.take(
(nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) /
nibble_ops::NIBBLE_PER_BYTE,
)?;
let partial_padding = nibble_ops::number_padding(nibble_count);
let bitmap_range = input.take(BITMAP_LENGTH)?;
let bitmap = Bitmap::decode(&data[bitmap_range])?;
let value = if branch_has_value {
Some(if contains_hash {
ValuePlan::Node(input.take(H::LENGTH)?)
} else {
let count = <Compact<u32>>::decode(&mut input)?.0 as usize;
ValuePlan::Inline(input.take(count)?)
})
} else {
None
};
let mut children = [
None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None,
];
for i in 0..nibble_ops::NIBBLE_LENGTH {
if bitmap.value_at(i) {
let count = <Compact<u32>>::decode(&mut input)?.0 as usize;
let range = input.take(count)?;
children[i] = Some(if count == H::LENGTH {
NodeHandlePlan::Hash(range)
} else {
NodeHandlePlan::Inline(range)
});
}
}
Ok(NodePlan::NibbledBranch {
partial: NibbleSlicePlan::new(partial, partial_padding),
value,
children,
})
},
NodeHeader::HashedValueLeaf(nibble_count) | NodeHeader::Leaf(nibble_count) => {
let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE!= 0;
// check that the padding is valid (if any)
if padding && nibble_ops::pad_left(data[input.offset])!= 0 {
return Err(CodecError::from("Bad format"))
}
let partial = input.take(
(nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) /
nibble_ops::NIBBLE_PER_BYTE,
)?;
let partial_padding = nibble_ops::number_padding(nibble_count);
let value = if contains_hash {
ValuePlan::Node(input.take(H::LENGTH)?)
} else {
let count = <Compact<u32>>::decode(&mut input)?.0 as usize;
ValuePlan::Inline(input.take(count)?)
};
Ok(NodePlan::Leaf {
partial: NibbleSlicePlan::new(partial, partial_padding),
value,
})
},
}
}
}
impl<H> NodeCodecT for NodeCodec<H>
where
H: Hasher,
{
const ESCAPE_HEADER: Option<u8> = Some(trie_constants::ESCAPE_COMPACT_HEADER);
type Error = Error;
type HashOut = H::Out;
fn hashed_null_node() -> <H as Hasher>::Out {
H::hash(<Self as NodeCodecT>::empty_node())
}
fn decode_plan(data: &[u8]) -> Result<NodePlan, Self::Error> {
Self::decode_plan_inner_hashed(data)
}
fn is_empty_node(data: &[u8]) -> bool {
data == <Self as NodeCodecT>::empty_node()
}
fn empty_node() -> &'static [u8] {
&[trie_constants::EMPTY_TRIE]
}
fn leaf_node(partial: impl Iterator<Item = u8>, number_nibble: usize, value: Value) -> Vec<u8> {
let contains_hash = matches!(&value, Value::Node(..));
let mut output = if contains_hash {
partial_from_iterator_encode(partial, number_nibble, NodeKind::HashedValueLeaf)
} else {
partial_from_iterator_encode(partial, number_nibble, NodeKind::Leaf)
};
match value {
Value::Inline(value) => {
Compact(value.len() as u32).encode_to(&mut output);
output.extend_from_slice(value);
},
Value::Node(hash) => {
debug_assert!(hash.len() == H::LENGTH);
output.extend_from_slice(hash);
},
}
output
}
fn extension_node(
_partial: impl Iterator<Item = u8>,
_nbnibble: usize,
_child: ChildReference<<H as Hasher>::Out>,
) -> Vec<u8> {
unreachable!("Codec without extension.")
}
fn branch_node(
_children: impl Iterator<Item = impl Borrow<Option<ChildReference<<H as Hasher>::Out>>>>,
_maybe_value: Option<Value>,
) -> Vec<u8> {
unreachable!("Codec without extension.")
}
fn branch_node_nibbled(
partial: impl Iterator<Item = u8>,
number_nibble: usize,
children: impl Iterator<Item = impl Borrow<Option<ChildReference<<H as Hasher>::Out>>>>,
value: Option<Value>,
) -> Vec<u8> {
let contains_hash = matches!(&value, Some(Value::Node(..)));
let mut output = match (&value, contains_hash) {
(&None, _) =>
partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue),
(_, false) =>
partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchWithValue),
(_, true) =>
partial_from_iterator_encode(partial, number_nibble, NodeKind::HashedValueBranch),
};
let bitmap_index = output.len();
let mut bitmap: [u8; BITMAP_LENGTH] = [0; BITMAP_LENGTH];
(0..BITMAP_LENGTH).for_each(|_| output.push(0));
match value {
Some(Value::Inline(value)) => {
Compact(value.len() as u32).encode_to(&mut output);
output.extend_from_slice(value);
},
Some(Value::Node(hash)) => {
debug_assert!(hash.len() == H::LENGTH);
output.extend_from_slice(hash);
},
None => (),
}
Bitmap::encode(
children.map(|maybe_child| match maybe_child.borrow() {
Some(ChildReference::Hash(h)) => {
h.as_ref().encode_to(&mut output);
true
},
&Some(ChildReference::Inline(inline_data, len)) => {
inline_data.as_ref()[..len].encode_to(&mut output);
true
},
None => false,
}),
bitmap.as_mut(),
);
output[bitmap_index..bitmap_index + BITMAP_LENGTH]
.copy_from_slice(&bitmap[..BITMAP_LENGTH]);
output
}
}
// utils
/// Encode and allocate node type header (type and size), and partial value.
/// It uses an iterator over encoded partial bytes as input.
fn partial_from_iterator_encode<I: Iterator<Item = u8>>(
partial: I,
nibble_count: usize,
node_kind: NodeKind,
) -> Vec<u8> {
let nibble_count = std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibble_count);
let mut output = Vec::with_capacity(4 + (nibble_count / nibble_ops::NIBBLE_PER_BYTE));
match node_kind {
NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output),
NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output),
NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output),
NodeKind::HashedValueLeaf =>
NodeHeader::HashedValueLeaf(nibble_count).encode_to(&mut output),
NodeKind::HashedValueBranch =>
NodeHeader::HashedValueBranch(nibble_count).encode_to(&mut output),
};
output.extend(partial);
output
}
/// A node header.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub(crate) enum NodeHeader {
Null,
// contains wether there is a value and nibble count
Branch(bool, usize),
// contains nibble count
Leaf(usize),
// contains nibble count.
HashedValueBranch(usize),
// contains nibble count.
HashedValueLeaf(usize),
}
impl NodeHeader {
fn contains_hash_of_value(&self) -> bool {
match self {
NodeHeader::HashedValueBranch(_) | NodeHeader::HashedValueLeaf(_) => true,
_ => false,
}
}
}
/// NodeHeader without content
pub(crate) enum | {
Leaf,
BranchNoValue,
BranchWithValue,
HashedValueLeaf,
HashedValueBranch,
}
impl Encode for NodeHeader {
fn encode_to<T: Output +?Sized>(&self, output: &mut T) {
match self {
NodeHeader::Null => output.push_byte(trie_constants::EMPTY_TRIE),
NodeHeader::Branch(true, nibble_count) =>
encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITH_MASK, 2, output),
NodeHeader::Branch(false, nibble_count) => encode_size_and_prefix(
*nibble_count,
trie_constants::BRANCH_WITHOUT_MASK,
2,
output,
),
NodeHeader::Leaf(nibble_count) =>
encode_size_and_prefix(*nibble_count, trie_constants::LEAF_PREFIX_MASK, 2, output),
NodeHeader::HashedValueBranch(nibble_count) => encode_size_and_prefix(
*nibble_count,
trie_constants::ALT_HASHING_BRANCH_WITH_MASK,
4,
output,
),
NodeHeader::HashedValueLeaf(nibble_count) => encode_size_and_prefix(
*nibble_count,
trie_constants::ALT_HASHING_LEAF_PREFIX_MASK,
3,
output,
),
}
}
}
impl parity_scale_codec::EncodeLike for NodeHeader {}
impl Decode for NodeHeader {
fn decode<I: Input>(input: &mut I) -> Result<Self, Error> {
let i = input.read_byte()?;
if i == trie_constants::EMPTY_TRIE {
return Ok(NodeHeader::Null)
}
match i & (0b11 << 6) {
trie_constants::LEAF_PREFIX_MASK => Ok(NodeHeader::Leaf(decode_size(i, input, 2)?)),
trie_constants::BRANCH_WITH_MASK =>
Ok(NodeHeader::Branch(true, decode_size(i, input, 2)?)),
trie_constants::BRANCH_WITHOUT_MASK =>
Ok(NodeHeader::Branch(false, decode_size(i, input, 2)?)),
trie_constants::EMPTY_TRIE => {
if i & (0b111 << 5) == trie_constants::ALT_HASHING_LEAF_PREFIX_MASK {
Ok(NodeHeader::HashedValueLeaf(decode_size(i, input, 3)?))
} else if i & (0b1111 << 4) == trie_constants::ALT_HASHING_BRANCH_WITH_MASK {
Ok(NodeHeader::HashedValueBranch(decode_size(i, input, 4)?))
} else {
// do not allow any special encoding
Err("Unallowed encoding".into())
}
},
_ => unreachable!(),
}
}
}
/// Returns an iterator over encoded bytes for node header and size.
/// Size encoding allows unlimited, length inefficient, representation, but
/// is bounded to 16 bit maximum value to avoid possible DOS.
pub(crate) fn size_and_prefix_iterator(
size: usize,
prefix: u8,
prefix_mask: usize,
) -> impl Iterator<Item = u8> {
let size = std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, size);
let max_value = 255u8 >> prefix_mask;
let l1 = std::cmp::min(max_value as usize - 1, size);
let (first_byte, mut rem) = if size == l1 {
(once(prefix + l1 as u8), 0)
} else {
(once(prefix + max_value as u8), size - l1)
};
let next_bytes = move || {
if rem > 0 {
if rem < 256 {
let result = rem - 1;
rem = 0;
Some(result as u8)
} else {
rem = rem.saturating_sub(255);
Some(255)
}
} else {
None
}
};
first_byte.chain(std::iter::from_fn(next_bytes))
}
/// Encodes size and prefix to a stream output (prefix on 2 first bit only).
fn encode_size_and_prefix<W>(size: usize, prefix: u8, prefix_mask: usize, out: &mut W)
where
W: Output +?Sized,
{
for b in size_and_prefix_iterator(size, prefix, prefix_mask) {
out.push_byte(b)
}
}
/// Decode size only from stream input and header byte.
fn decode_size(first: u8, input: &mut impl Input, prefix_mask: usize) -> Result<usize, Error> {
let max_value = 255u8 >> prefix_mask;
let mut result = (first & max_value) as usize;
if result < max_value as usize {
return Ok(result)
}
result -= 1;
while result <= trie_constants::NIBBLE_SIZE_BOUND {
let n = input.read_byte()? as usize;
if n < 255 {
return Ok(result + n + 1)
}
result += 255;
}
Ok(trie_constants::NIBBLE_SIZE_BOUND)
}
/// Reference implementation of a `TrieStream` without extension.
#[derive(Default, Clone)]
pub struct ReferenceTrieStreamNoExt {
/// Current node buffer.
buffer: Vec<u8>,
}
/// Create a leaf/branch node, encoding a number of nibbles.
fn fuse_nibbles_node<'a>(nibbles: &'a [u8], kind: NodeKind) -> impl Iterator<Item = u8> + 'a {
let size = std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibbles.len());
let iter_start = match kind {
NodeKind::Leaf => size_and_prefix_iterator(size, trie_constants::LEAF_PREFIX_MASK, 2),
NodeKind::BranchNoValue =>
size_and_prefix_iterator(size, trie_constants::BRANCH_WITHOUT_MASK, 2),
NodeKind::BranchWithValue =>
size_and_prefix_iterator(size, trie_constants::BRANCH_WITH_MASK, 2),
NodeKind::HashedValueLeaf =>
size_and_prefix_iterator(size, trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, 3),
NodeKind::HashedValueBranch =>
size_and_prefix_iterator(size, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, 4),
};
iter_start
.chain(if nibbles.len() % 2 == 1 { Some(nibbles[0]) } else { None })
.chain(nibbles[nibbles.len() % 2..].chunks(2).map(|ch| ch[0] << 4 | ch[1]))
}
use trie_root::Value as TrieStreamValue;
impl TrieStream for ReferenceTrieStreamNoExt {
fn new() -> Self {
Self { buffer: Vec::new() }
}
fn append_empty_data(&mut self) {
self.buffer.push(trie_constants::EMPTY_TRIE);
}
fn append_leaf(&mut self, key: &[u8], value: TrieStreamValue) {
let kind = match &value {
TrieStreamValue::Inline(..) => NodeKind::Leaf,
TrieStreamValue::Node(..) => NodeKind::HashedValueLeaf,
};
self.buffer.extend(fuse_nibbles_node(key, kind));
match &value {
TrieStreamValue::Inline(value) => {
Compact(value.len() as u32).encode_to(&mut self.buffer);
self.buffer.extend_from_slice(value);
},
TrieStreamValue::Node(hash) => {
self.buffer.extend_from_slice(hash.as_slice());
},
};
}
fn begin_branch(
&mut self,
maybe_partial: Option<&[u8]>,
maybe_value: Option<TrieStreamValue>,
has_children: impl Iterator<Item = bool>,
) {
if let Some(partial) = maybe_partial {
let kind = match &maybe_value {
None => NodeKind::BranchNoValue,
Some(TrieStreamValue::Inline(..)) => NodeKind::BranchWithValue,
Some(TrieStreamValue::Node(..)) => NodeKind::HashedValueBranch,
};
self.buffer.extend(fuse_nibbles_node(partial, kind));
let bm = branch_node_bit_mask(has_children);
self.buffer.extend([bm.0, bm.1].iter());
} else {
unreachable!("trie stream codec only for no extension trie");
}
match maybe_value {
None => (),
Some(TrieStreamValue::Inline(value)) => {
Compact(value.len() as u32).encode_to(&mut self.buffer);
self.buffer.extend_from_slice(value);
},
Some(TrieStreamValue::Node(hash)) => {
self.buffer.extend_from_slice(hash.as_slice());
},
}
}
fn append_extension(&mut self, _key: &[u8]) {
unreachable!("trie stream codec only for no extension trie");
}
fn append_substream<H: Hasher>(&mut self, other: Self) {
let data = other.out();
match data.len() {
0..=31 => data.encode_to(&mut self.buffer),
_ => H::hash(&data).as_ref().encode_to(&mut self.buffer),
}
}
fn out(self) -> Vec<u8> {
self.buffer
}
}
| NodeKind | identifier_name |
substrate_like.rs | // Copyright 2017, 2021 Parity Technologies
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Codec and layout configuration similar to upstream default substrate one.
use super::{CodecError as Error, NodeCodec as NodeCodecT, *};
use trie_db::node::Value;
/// No extension trie with no hashed value.
pub struct HashedValueNoExt;
/// No extension trie which stores value above a static size
/// as external node.
pub struct HashedValueNoExtThreshold<const C: u32>;
impl TrieLayout for HashedValueNoExt {
const USE_EXTENSION: bool = false;
const ALLOW_EMPTY: bool = false;
const MAX_INLINE_VALUE: Option<u32> = None;
type Hash = RefHasher;
type Codec = ReferenceNodeCodecNoExtMeta<RefHasher>;
}
impl<const C: u32> TrieLayout for HashedValueNoExtThreshold<C> {
const USE_EXTENSION: bool = false;
const ALLOW_EMPTY: bool = false;
const MAX_INLINE_VALUE: Option<u32> = Some(C);
type Hash = RefHasher;
type Codec = ReferenceNodeCodecNoExtMeta<RefHasher>;
}
/// Constants specific to encoding with external value node support.
pub mod trie_constants {
const FIRST_PREFIX: u8 = 0b_00 << 6;
pub const NIBBLE_SIZE_BOUND: usize = u16::max_value() as usize;
pub const LEAF_PREFIX_MASK: u8 = 0b_01 << 6;
pub const BRANCH_WITHOUT_MASK: u8 = 0b_10 << 6;
pub const BRANCH_WITH_MASK: u8 = 0b_11 << 6;
pub const EMPTY_TRIE: u8 = FIRST_PREFIX | (0b_00 << 4);
pub const ALT_HASHING_LEAF_PREFIX_MASK: u8 = FIRST_PREFIX | (0b_1 << 5);
pub const ALT_HASHING_BRANCH_WITH_MASK: u8 = FIRST_PREFIX | (0b_01 << 4);
pub const ESCAPE_COMPACT_HEADER: u8 = EMPTY_TRIE | 0b_00_01;
}
#[derive(Default, Clone)]
pub struct NodeCodec<H>(PhantomData<H>);
impl<H: Hasher> NodeCodec<H> {
fn decode_plan_inner_hashed(data: &[u8]) -> Result<NodePlan, Error> {
let mut input = ByteSliceInput::new(data);
let header = NodeHeader::decode(&mut input)?;
let contains_hash = header.contains_hash_of_value();
let branch_has_value = if let NodeHeader::Branch(has_value, _) = &header {
*has_value
} else {
// alt_hash_branch
true
};
match header {
NodeHeader::Null => Ok(NodePlan::Empty),
NodeHeader::HashedValueBranch(nibble_count) | NodeHeader::Branch(_, nibble_count) => {
let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE!= 0;
// check that the padding is valid (if any)
if padding && nibble_ops::pad_left(data[input.offset])!= 0 {
return Err(CodecError::from("Bad format"))
}
let partial = input.take(
(nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) /
nibble_ops::NIBBLE_PER_BYTE,
)?;
let partial_padding = nibble_ops::number_padding(nibble_count);
let bitmap_range = input.take(BITMAP_LENGTH)?;
let bitmap = Bitmap::decode(&data[bitmap_range])?;
let value = if branch_has_value {
Some(if contains_hash {
ValuePlan::Node(input.take(H::LENGTH)?)
} else {
let count = <Compact<u32>>::decode(&mut input)?.0 as usize;
ValuePlan::Inline(input.take(count)?)
})
} else {
None
};
let mut children = [
None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None,
];
for i in 0..nibble_ops::NIBBLE_LENGTH {
if bitmap.value_at(i) {
let count = <Compact<u32>>::decode(&mut input)?.0 as usize;
let range = input.take(count)?;
children[i] = Some(if count == H::LENGTH {
NodeHandlePlan::Hash(range)
} else {
NodeHandlePlan::Inline(range)
});
}
}
Ok(NodePlan::NibbledBranch {
partial: NibbleSlicePlan::new(partial, partial_padding),
value,
children,
})
},
NodeHeader::HashedValueLeaf(nibble_count) | NodeHeader::Leaf(nibble_count) => {
let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE!= 0;
// check that the padding is valid (if any)
if padding && nibble_ops::pad_left(data[input.offset])!= 0 {
return Err(CodecError::from("Bad format"))
}
let partial = input.take(
(nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) /
nibble_ops::NIBBLE_PER_BYTE,
)?;
let partial_padding = nibble_ops::number_padding(nibble_count);
let value = if contains_hash {
ValuePlan::Node(input.take(H::LENGTH)?)
} else {
let count = <Compact<u32>>::decode(&mut input)?.0 as usize;
ValuePlan::Inline(input.take(count)?)
};
Ok(NodePlan::Leaf {
partial: NibbleSlicePlan::new(partial, partial_padding),
value,
})
},
}
}
}
impl<H> NodeCodecT for NodeCodec<H>
where
H: Hasher,
{
const ESCAPE_HEADER: Option<u8> = Some(trie_constants::ESCAPE_COMPACT_HEADER);
type Error = Error;
type HashOut = H::Out;
fn hashed_null_node() -> <H as Hasher>::Out {
H::hash(<Self as NodeCodecT>::empty_node())
}
fn decode_plan(data: &[u8]) -> Result<NodePlan, Self::Error> {
Self::decode_plan_inner_hashed(data)
}
fn is_empty_node(data: &[u8]) -> bool {
data == <Self as NodeCodecT>::empty_node()
}
fn empty_node() -> &'static [u8] {
&[trie_constants::EMPTY_TRIE]
}
fn leaf_node(partial: impl Iterator<Item = u8>, number_nibble: usize, value: Value) -> Vec<u8> {
let contains_hash = matches!(&value, Value::Node(..));
let mut output = if contains_hash {
partial_from_iterator_encode(partial, number_nibble, NodeKind::HashedValueLeaf)
} else {
partial_from_iterator_encode(partial, number_nibble, NodeKind::Leaf)
};
match value {
Value::Inline(value) => {
Compact(value.len() as u32).encode_to(&mut output);
output.extend_from_slice(value);
},
Value::Node(hash) => {
debug_assert!(hash.len() == H::LENGTH);
output.extend_from_slice(hash);
},
}
output
}
fn extension_node(
_partial: impl Iterator<Item = u8>,
_nbnibble: usize,
_child: ChildReference<<H as Hasher>::Out>,
) -> Vec<u8> {
unreachable!("Codec without extension.")
}
fn branch_node(
_children: impl Iterator<Item = impl Borrow<Option<ChildReference<<H as Hasher>::Out>>>>,
_maybe_value: Option<Value>,
) -> Vec<u8> {
unreachable!("Codec without extension.")
}
fn branch_node_nibbled(
partial: impl Iterator<Item = u8>,
number_nibble: usize,
children: impl Iterator<Item = impl Borrow<Option<ChildReference<<H as Hasher>::Out>>>>,
value: Option<Value>,
) -> Vec<u8> {
let contains_hash = matches!(&value, Some(Value::Node(..)));
let mut output = match (&value, contains_hash) {
(&None, _) =>
partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue),
(_, false) =>
partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchWithValue),
(_, true) =>
partial_from_iterator_encode(partial, number_nibble, NodeKind::HashedValueBranch),
};
let bitmap_index = output.len();
let mut bitmap: [u8; BITMAP_LENGTH] = [0; BITMAP_LENGTH];
(0..BITMAP_LENGTH).for_each(|_| output.push(0));
match value {
Some(Value::Inline(value)) => {
Compact(value.len() as u32).encode_to(&mut output);
output.extend_from_slice(value);
},
Some(Value::Node(hash)) => {
debug_assert!(hash.len() == H::LENGTH);
output.extend_from_slice(hash);
},
None => (),
}
Bitmap::encode(
children.map(|maybe_child| match maybe_child.borrow() {
Some(ChildReference::Hash(h)) => {
h.as_ref().encode_to(&mut output);
true
},
&Some(ChildReference::Inline(inline_data, len)) => {
inline_data.as_ref()[..len].encode_to(&mut output);
true
},
None => false,
}),
bitmap.as_mut(),
);
output[bitmap_index..bitmap_index + BITMAP_LENGTH]
.copy_from_slice(&bitmap[..BITMAP_LENGTH]);
output
}
}
// utils
/// Encode and allocate node type header (type and size), and partial value.
/// It uses an iterator over encoded partial bytes as input.
fn partial_from_iterator_encode<I: Iterator<Item = u8>>(
partial: I,
nibble_count: usize,
node_kind: NodeKind,
) -> Vec<u8> {
let nibble_count = std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibble_count);
let mut output = Vec::with_capacity(4 + (nibble_count / nibble_ops::NIBBLE_PER_BYTE));
match node_kind {
NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output),
NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output),
NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output),
NodeKind::HashedValueLeaf =>
NodeHeader::HashedValueLeaf(nibble_count).encode_to(&mut output),
NodeKind::HashedValueBranch =>
NodeHeader::HashedValueBranch(nibble_count).encode_to(&mut output),
};
output.extend(partial);
output
}
/// A node header.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub(crate) enum NodeHeader {
Null,
// contains wether there is a value and nibble count
Branch(bool, usize),
// contains nibble count
Leaf(usize),
// contains nibble count.
HashedValueBranch(usize),
// contains nibble count.
HashedValueLeaf(usize),
}
impl NodeHeader {
fn contains_hash_of_value(&self) -> bool {
match self {
NodeHeader::HashedValueBranch(_) | NodeHeader::HashedValueLeaf(_) => true,
_ => false,
}
}
}
/// NodeHeader without content
pub(crate) enum NodeKind {
Leaf,
BranchNoValue,
BranchWithValue,
HashedValueLeaf,
HashedValueBranch,
}
impl Encode for NodeHeader {
fn encode_to<T: Output +?Sized>(&self, output: &mut T) {
match self {
NodeHeader::Null => output.push_byte(trie_constants::EMPTY_TRIE),
NodeHeader::Branch(true, nibble_count) =>
encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITH_MASK, 2, output),
NodeHeader::Branch(false, nibble_count) => encode_size_and_prefix(
*nibble_count,
trie_constants::BRANCH_WITHOUT_MASK,
2,
output,
),
NodeHeader::Leaf(nibble_count) =>
encode_size_and_prefix(*nibble_count, trie_constants::LEAF_PREFIX_MASK, 2, output),
NodeHeader::HashedValueBranch(nibble_count) => encode_size_and_prefix(
*nibble_count,
trie_constants::ALT_HASHING_BRANCH_WITH_MASK,
4,
output,
),
NodeHeader::HashedValueLeaf(nibble_count) => encode_size_and_prefix(
*nibble_count,
trie_constants::ALT_HASHING_LEAF_PREFIX_MASK,
3,
output,
),
}
}
}
impl parity_scale_codec::EncodeLike for NodeHeader {}
impl Decode for NodeHeader {
fn decode<I: Input>(input: &mut I) -> Result<Self, Error> {
let i = input.read_byte()?;
if i == trie_constants::EMPTY_TRIE {
return Ok(NodeHeader::Null)
}
match i & (0b11 << 6) {
trie_constants::LEAF_PREFIX_MASK => Ok(NodeHeader::Leaf(decode_size(i, input, 2)?)),
trie_constants::BRANCH_WITH_MASK =>
Ok(NodeHeader::Branch(true, decode_size(i, input, 2)?)),
trie_constants::BRANCH_WITHOUT_MASK =>
Ok(NodeHeader::Branch(false, decode_size(i, input, 2)?)),
trie_constants::EMPTY_TRIE => {
if i & (0b111 << 5) == trie_constants::ALT_HASHING_LEAF_PREFIX_MASK {
Ok(NodeHeader::HashedValueLeaf(decode_size(i, input, 3)?))
} else if i & (0b1111 << 4) == trie_constants::ALT_HASHING_BRANCH_WITH_MASK {
Ok(NodeHeader::HashedValueBranch(decode_size(i, input, 4)?))
} else {
// do not allow any special encoding
Err("Unallowed encoding".into())
}
},
_ => unreachable!(),
}
}
}
/// Returns an iterator over encoded bytes for node header and size.
/// Size encoding allows unlimited, length inefficient, representation, but
/// is bounded to 16 bit maximum value to avoid possible DOS.
pub(crate) fn size_and_prefix_iterator(
size: usize,
prefix: u8,
prefix_mask: usize,
) -> impl Iterator<Item = u8> {
let size = std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, size);
let max_value = 255u8 >> prefix_mask;
let l1 = std::cmp::min(max_value as usize - 1, size);
let (first_byte, mut rem) = if size == l1 {
(once(prefix + l1 as u8), 0)
} else {
(once(prefix + max_value as u8), size - l1)
};
let next_bytes = move || {
if rem > 0 {
if rem < 256 {
let result = rem - 1;
rem = 0;
Some(result as u8)
} else {
rem = rem.saturating_sub(255);
Some(255)
}
} else {
None
}
};
first_byte.chain(std::iter::from_fn(next_bytes))
}
/// Encodes size and prefix to a stream output (prefix on 2 first bit only).
fn encode_size_and_prefix<W>(size: usize, prefix: u8, prefix_mask: usize, out: &mut W)
where
W: Output +?Sized,
|
/// Decode size only from stream input and header byte.
fn decode_size(first: u8, input: &mut impl Input, prefix_mask: usize) -> Result<usize, Error> {
let max_value = 255u8 >> prefix_mask;
let mut result = (first & max_value) as usize;
if result < max_value as usize {
return Ok(result)
}
result -= 1;
while result <= trie_constants::NIBBLE_SIZE_BOUND {
let n = input.read_byte()? as usize;
if n < 255 {
return Ok(result + n + 1)
}
result += 255;
}
Ok(trie_constants::NIBBLE_SIZE_BOUND)
}
/// Reference implementation of a `TrieStream` without extension.
#[derive(Default, Clone)]
pub struct ReferenceTrieStreamNoExt {
/// Current node buffer.
buffer: Vec<u8>,
}
/// Create a leaf/branch node, encoding a number of nibbles.
fn fuse_nibbles_node<'a>(nibbles: &'a [u8], kind: NodeKind) -> impl Iterator<Item = u8> + 'a {
let size = std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibbles.len());
let iter_start = match kind {
NodeKind::Leaf => size_and_prefix_iterator(size, trie_constants::LEAF_PREFIX_MASK, 2),
NodeKind::BranchNoValue =>
size_and_prefix_iterator(size, trie_constants::BRANCH_WITHOUT_MASK, 2),
NodeKind::BranchWithValue =>
size_and_prefix_iterator(size, trie_constants::BRANCH_WITH_MASK, 2),
NodeKind::HashedValueLeaf =>
size_and_prefix_iterator(size, trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, 3),
NodeKind::HashedValueBranch =>
size_and_prefix_iterator(size, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, 4),
};
iter_start
.chain(if nibbles.len() % 2 == 1 { Some(nibbles[0]) } else { None })
.chain(nibbles[nibbles.len() % 2..].chunks(2).map(|ch| ch[0] << 4 | ch[1]))
}
use trie_root::Value as TrieStreamValue;
impl TrieStream for ReferenceTrieStreamNoExt {
fn new() -> Self {
Self { buffer: Vec::new() }
}
fn append_empty_data(&mut self) {
self.buffer.push(trie_constants::EMPTY_TRIE);
}
fn append_leaf(&mut self, key: &[u8], value: TrieStreamValue) {
let kind = match &value {
TrieStreamValue::Inline(..) => NodeKind::Leaf,
TrieStreamValue::Node(..) => NodeKind::HashedValueLeaf,
};
self.buffer.extend(fuse_nibbles_node(key, kind));
match &value {
TrieStreamValue::Inline(value) => {
Compact(value.len() as u32).encode_to(&mut self.buffer);
self.buffer.extend_from_slice(value);
},
TrieStreamValue::Node(hash) => {
self.buffer.extend_from_slice(hash.as_slice());
},
};
}
fn begin_branch(
&mut self,
maybe_partial: Option<&[u8]>,
maybe_value: Option<TrieStreamValue>,
has_children: impl Iterator<Item = bool>,
) {
if let Some(partial) = maybe_partial {
let kind = match &maybe_value {
None => NodeKind::BranchNoValue,
Some(TrieStreamValue::Inline(..)) => NodeKind::BranchWithValue,
Some(TrieStreamValue::Node(..)) => NodeKind::HashedValueBranch,
};
self.buffer.extend(fuse_nibbles_node(partial, kind));
let bm = branch_node_bit_mask(has_children);
self.buffer.extend([bm.0, bm.1].iter());
} else {
unreachable!("trie stream codec only for no extension trie");
}
match maybe_value {
None => (),
Some(TrieStreamValue::Inline(value)) => {
Compact(value.len() as u32).encode_to(&mut self.buffer);
self.buffer.extend_from_slice(value);
},
Some(TrieStreamValue::Node(hash)) => {
self.buffer.extend_from_slice(hash.as_slice());
},
}
}
fn append_extension(&mut self, _key: &[u8]) {
unreachable!("trie stream codec only for no extension trie");
}
fn append_substream<H: Hasher>(&mut self, other: Self) {
let data = other.out();
match data.len() {
0..=31 => data.encode_to(&mut self.buffer),
_ => H::hash(&data).as_ref().encode_to(&mut self.buffer),
}
}
fn out(self) -> Vec<u8> {
self.buffer
}
}
| {
for b in size_and_prefix_iterator(size, prefix, prefix_mask) {
out.push_byte(b)
}
} | identifier_body |
metrics.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines the metrics system.
//!
//! # Design
//! The main design goals of this system are:
//! * Use lockless operations, preferably ones that don't require anything other than
//! simple reads/writes being atomic.
//! * Exploit interior mutability and atomics being Sync to allow all methods (including the ones
//! which are effectively mutable) to be callable on a global non-mut static.
//! * Rely on `serde` to provide the actual serialization for logging the metrics.
//! * Since all metrics start at 0, we implement the `Default` trait via derive for all of them,
//! to avoid having to initialize everything by hand.
//!
//! Moreover, the value of a metric is currently NOT reset to 0 each time it's being logged. The
//! current approach is to store two values (current and previous) and compute the delta between
//! them each time we do a flush (i.e by serialization). There are a number of advantages
//! to this approach, including:
//! * We don't have to introduce an additional write (to reset the value) from the thread which
//! does to actual logging, so less synchronization effort is required.
//! * We don't have to worry at all that much about losing some data if logging fails for a while
//! (this could be a concern, I guess).
//! If if turns out this approach is not really what we want, it's pretty easy to resort to
//! something else, while working behind the same interface.
use std::sync::atomic::{AtomicUsize, Ordering};
use chrono;
use serde::{Serialize, Serializer};
const SYSCALL_MAX: usize = 350;
/// Used for defining new types of metrics that can be either incremented with an unit
/// or an arbitrary amount of units.
// This trait helps with writing less code. It has to be in scope (via an use directive) in order
// for its methods to be available to call on structs that implement it.
pub trait Metric {
/// Adds `value` to the current counter.
fn add(&self, value: usize);
/// Increments by 1 unit the current counter.
fn inc(&self) {
self.add(1);
}
/// Returns current value of the counter.
fn count(&self) -> usize;
}
/// Representation of a metric that is expected to be incremented from a single thread, so it
/// can use simple loads and stores with no additional synchronization necessities.
// Loads are currently Relaxed everywhere, because we don't do anything besides
// logging the retrieved value (their outcome os not used to modify some memory location in a
// potentially inconsistent manner). There's no way currently to make sure a SimpleMetric is only
// incremented by a single thread, this has to be enforced via judicious use (although, every
// non-vCPU related metric is associated with a particular thread, so it shouldn't be that easy
// to misuse SimpleMetric fields).
#[derive(Default)]
pub struct SimpleMetric(AtomicUsize);
impl Metric for SimpleMetric {
fn add(&self, value: usize) {
let ref count = self.0;
count.store(count.load(Ordering::Relaxed) + value, Ordering::Relaxed);
}
fn count(&self) -> usize {
self.0.load(Ordering::Relaxed)
}
}
impl Serialize for SimpleMetric {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
// There's no serializer.serialize_usize().
serializer.serialize_u64(self.0.load(Ordering::Relaxed) as u64)
}
}
/// Representation of a metric that is expected to be incremented from more than one thread, so more
/// synchronization is necessary.
// It's currently used for vCPU metrics. An alternative here would be
// to have one instance of every metric for each thread (like a per-thread SimpleMetric), and to
// aggregate them when logging. However this probably overkill unless we have a lot of vCPUs
// incrementing metrics very often. Still, it's there if we ever need it :-s
#[derive(Default)]
// We will be keeping two values for each metric for being able to reset
// counters on each metric.
// 1st member - current value being updated
// 2nd member - old value that gets the current value whenever metrics is flushed to disk
pub struct SharedMetric(AtomicUsize, AtomicUsize);
impl Metric for SharedMetric {
// While the order specified for this operation is still Relaxed, the actual instruction will
// be an asm "LOCK; something" and thus atomic across multiple threads, simply because of the
// fetch_and_add (as opposed to "store(load() + 1)") implementation for atomics.
// TODO: would a stronger ordering make a difference here?
fn add(&self, value: usize) {
self.0.fetch_add(value, Ordering::Relaxed);
}
fn count(&self) -> usize {
self.0.load(Ordering::Relaxed)
}
}
impl Serialize for SharedMetric {
/// Reset counters of each metrics. Here we suppose that Serialize's goal is to help with the
/// flushing of metrics.
///!!! Any print of the metrics will also reset them. Use with caution!!!
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
// There's no serializer.serialize_usize() for some reason :(
let snapshot = self.0.load(Ordering::Relaxed);
let res = serializer.serialize_u64(snapshot as u64 - self.1.load(Ordering::Relaxed) as u64);
if res.is_ok() {
self.1.store(snapshot, Ordering::Relaxed);
}
res
}
}
// The following structs are used to define a certain organization for the set of metrics we
// are interested in. Whenever the name of a field differs from its ideal textual representation
// in the serialized form, we can use the #[serde(rename = "name")] attribute to, well, rename it.
/// Metrics related to the internal API server.
#[derive(Default, Serialize)]
pub struct ApiServerMetrics {
/// Measures the process's startup time in microseconds.
pub process_startup_time_us: SharedMetric,
/// Measures the cpu's startup time in microseconds.
pub process_startup_time_cpu_us: SharedMetric,
/// Number of failures on API requests triggered by internal errors.
pub sync_outcome_fails: SharedMetric,
/// Number of timeouts during communication with the VMM.
pub sync_vmm_send_timeout_count: SharedMetric,
}
/// Metrics specific to GET API Requests for counting user triggered actions and/or failures.
#[derive(Default, Serialize)]
pub struct GetRequestsMetrics {
/// Number of GETs for getting information on the instance.
pub instance_info_count: SharedMetric,
/// Number of failures when obtaining information on the current instance.
pub instance_info_fails: SharedMetric,
/// Number of GETs for getting status on attaching machine configuration.
pub machine_cfg_count: SharedMetric,
/// Number of failures during GETs for getting information on the instance.
pub machine_cfg_fails: SharedMetric,
}
/// Metrics specific to PUT API Requests for counting user triggered actions and/or failures.
#[derive(Default, Serialize)]
pub struct PutRequestsMetrics {
/// Number of PUTs triggering an action on the VM.
pub actions_count: SharedMetric,
/// Number of failures in triggering an action on the VM.
pub actions_fails: SharedMetric,
/// Number of PUTs for attaching source of boot.
pub boot_source_count: SharedMetric,
/// Number of failures during attaching source of boot.
pub boot_source_fails: SharedMetric,
/// Number of PUTs triggering a block attach.
pub drive_count: SharedMetric,
/// Number of failures in attaching a block device.
pub drive_fails: SharedMetric,
/// Number of PUTs for initializing the logging system.
pub logger_count: SharedMetric,
/// Number of failures in initializing the logging system.
pub logger_fails: SharedMetric,
/// Number of PUTs for configuring the machine.
pub machine_cfg_count: SharedMetric,
/// Number of failures in configuring the machine.
pub machine_cfg_fails: SharedMetric,
/// Number of PUTs for creating a new network interface.
pub network_count: SharedMetric,
/// Number of failures in creating a new network interface.
pub network_fails: SharedMetric,
}
/// Metrics specific to PATCH API Requests for counting user triggered actions and/or failures.
#[derive(Default, Serialize)]
pub struct PatchRequestsMetrics {
/// Number of tries to PATCH a block device.
pub drive_count: SharedMetric,
/// Number of failures in PATCHing a block device.
pub drive_fails: SharedMetric,
}
/// Block Device associated metrics.
#[derive(Default, Serialize)]
pub struct BlockDeviceMetrics {
/// Number of times when activate failed on a block device.
pub activate_fails: SharedMetric,
/// Number of times when interacting with the space config of a block device failed.
pub cfg_fails: SharedMetric,
/// Number of times when handling events on a block device failed.
pub event_fails: SharedMetric,
/// Number of failures in executing a request on a block device.
pub execute_fails: SharedMetric,
/// Number of invalid requests received for this block device.
pub invalid_reqs_count: SharedMetric,
/// Number of flushes operation triggered on this block device.
pub flush_count: SharedMetric,
/// Number of events triggerd on the queue of this block device.
pub queue_event_count: SharedMetric,
/// Number of events ratelimiter-related.
pub rate_limiter_event_count: SharedMetric,
/// Number of update operation triggered on this block device.
pub update_count: SharedMetric,
/// Number of failures while doing update on this block device.
pub update_fails: SharedMetric,
/// Number of bytes read by this block device.
pub read_count: SharedMetric,
/// Number of bytes written by this block device.
pub write_count: SharedMetric,
}
/// Metrics specific to the i8042 device.
#[derive(Default, Serialize)]
pub struct I8042DeviceMetrics {
/// Errors triggered while using the i8042 device.
pub error_count: SharedMetric,
/// Number of superfluous read intents on this i8042 device.
pub missed_read_count: SharedMetric,
/// Number of superfluous read intents on this i8042 device.
pub missed_write_count: SharedMetric,
/// Bytes read by this device.
pub read_count: SharedMetric,
/// Number of resets done by this device.
pub reset_count: SharedMetric,
/// Bytes written by this device.
pub write_count: SharedMetric,
}
/// Metrics for the logging subsystem.
#[derive(Default, Serialize)]
pub struct LoggerSystemMetrics {
/// Number of misses on flushing metrics.
pub missed_metrics_count: SharedMetric,
/// Number of errors during metrics handling.
pub metrics_fails: SharedMetric,
/// Number of misses on logging human readable content.
pub missed_log_count: SharedMetric,
/// Number of errors while trying to log human readable content.
pub log_fails: SharedMetric,
}
/// Metrics for the MMDS functionality.
#[derive(Default, Serialize)]
pub struct MmdsMetrics {
/// Number of frames rerouted to MMDS.
pub rx_accepted: SharedMetric,
/// Number of errors while handling a frame through MMDS.
pub rx_accepted_err: SharedMetric,
/// Number of uncommon events encountered while processing packets through MMDS.
pub rx_accepted_unusual: SharedMetric,
/// The number of buffers which couldn't be parsed as valid Ethernet frames by the MMDS.
pub rx_bad_eth: SharedMetric,
/// The total number of bytes sent by the MMDS.
pub tx_bytes: SharedMetric,
/// The number of errors raised by the MMDS while attempting to send frames/packets/segments.
pub tx_errors: SharedMetric,
/// The number of frames sent by the MMDS.
pub tx_frames: SharedMetric,
/// The number of connections successfully accepted by the MMDS TCP handler.
pub connections_created: SharedMetric,
/// The number of connections cleaned up by the MMDS TCP handler.
pub connections_destroyed: SharedMetric,
}
/// Network-related metrics.
#[derive(Default, Serialize)]
pub struct NetDeviceMetrics {
/// Number of times when activate failed on a network device.
pub activate_fails: SharedMetric,
/// Number of times when interacting with the space config of a network device failed.
pub cfg_fails: SharedMetric,
/// Number of times when handling events on a network device failed.
pub event_fails: SharedMetric,
/// Number of events associated with the receiving queue.
pub rx_queue_event_count: SharedMetric,
/// Number of events associated with the rate limiter installed on the receiving path.
pub rx_event_rate_limiter_count: SharedMetric,
/// Number of events received on the associated tap.
pub rx_tap_event_count: SharedMetric,
/// Number of bytes received.
pub rx_bytes_count: SharedMetric,
/// Number of packets received.
pub rx_packets_count: SharedMetric,
/// Number of errors while receiving data.
pub rx_fails: SharedMetric,
/// Number of transmitted bytes.
pub tx_bytes_count: SharedMetric,
/// Number of errors while transmitting data.
pub tx_fails: SharedMetric,
/// Number of transmitted packets.
pub tx_packets_count: SharedMetric,
/// Number of events associated with the transmitting queue.
pub tx_queue_event_count: SharedMetric,
/// Number of events associated with the rate limiter installed on the transmitting path.
pub tx_rate_limiter_event_count: SharedMetric,
}
/// Metrics for the seccomp filtering.
#[derive(Serialize)]
pub struct SeccompMetrics {
/// Number of black listed syscalls.
pub bad_syscalls: Vec<SharedMetric>,
/// Number of errors inside the seccomp filtering.
pub num_faults: SharedMetric,
}
impl Default for SeccompMetrics {
fn default() -> SeccompMetrics {
let mut def_syscalls = vec![];
for _syscall in 0..SYSCALL_MAX {
def_syscalls.push(SharedMetric::default());
}
SeccompMetrics {
num_faults: SharedMetric::default(),
bad_syscalls: def_syscalls,
}
}
}
/// Metrics specific to the UART device.
#[derive(Default, Serialize)]
pub struct SerialDeviceMetrics {
/// Errors triggered while using the UART device.
pub error_count: SharedMetric,
/// Number of flush operations.
pub flush_count: SharedMetric,
/// Number of read calls that did not trigger a read.
pub missed_read_count: SharedMetric,
/// Number of write calls that did not trigger a write.
pub missed_write_count: SharedMetric,
/// Number of succeeded read calls.
pub read_count: SharedMetric,
/// Number of succeeded write calls.
pub write_count: SharedMetric,
}
/// Metrics specific to VCPUs' mode of functioning.
#[derive(Default, Serialize)]
pub struct VcpuMetrics {
/// Number of KVM exits for handling input IO.
pub exit_io_in: SharedMetric,
/// Number of KVM exits for handling output IO.
pub exit_io_out: SharedMetric,
/// Number of KVM exits for handling MMIO reads.
pub exit_mmio_read: SharedMetric,
/// Number of KVM exits for handling MMIO writes.
pub exit_mmio_write: SharedMetric,
/// Number of errors during this VCPU's run.
pub failures: SharedMetric,
/// Failures in configuring the CPUID.
pub fitler_cpuid: SharedMetric,
}
/// Metrics specific to the machine manager as a whole.
#[derive(Default, Serialize)]
pub struct VmmMetrics {
/// Number of device related events received for a VM.
pub device_events: SharedMetric,
/// Metric for signaling a panic has occurred.
pub panic_count: SharedMetric,
}
/// Memory usage metrics.
#[derive(Default, Serialize)]
pub struct MemoryMetrics {
/// Number of pages dirtied since the last call to `KVM_GET_DIRTY_LOG`.
pub dirty_pages: SharedMetric,
}
// The sole purpose of this struct is to produce an UTC timestamp when an instance is serialized.
#[derive(Default)]
struct SerializeToUtcTimestampMs;
impl Serialize for SerializeToUtcTimestampMs {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> |
}
/// Structure storing all metrics while enforcing serialization support on them.
#[derive(Default, Serialize)]
pub struct FirecrackerMetrics {
utc_timestamp_ms: SerializeToUtcTimestampMs,
/// API Server related metrics.
pub api_server: ApiServerMetrics,
/// A block device's related metrics.
pub block: BlockDeviceMetrics,
/// Metrics related to API GET requests.
pub get_api_requests: GetRequestsMetrics,
/// Metrics relaetd to the i8042 device.
pub i8042: I8042DeviceMetrics,
/// Logging related metrics.
pub logger: LoggerSystemMetrics,
/// Metrics specific to MMDS functionality.
pub mmds: MmdsMetrics,
/// A network device's related metrics.
pub net: NetDeviceMetrics,
/// Metrics related to API PATCH requests.
pub patch_api_requests: PatchRequestsMetrics,
/// Metrics related to API PUT requests.
pub put_api_requests: PutRequestsMetrics,
/// Metrics related to seccomp filtering.
pub seccomp: SeccompMetrics,
/// Metrics related to a vcpu's functioning.
pub vcpu: VcpuMetrics,
/// Metrics related to the virtual machine manager.
pub vmm: VmmMetrics,
/// Metrics related to the UART device.
pub uart: SerialDeviceMetrics,
/// Memory usage metrics.
pub memory: MemoryMetrics,
}
lazy_static! {
/// Static instance used for handling metrics.
///
pub static ref METRICS: FirecrackerMetrics = FirecrackerMetrics::default();
}
#[cfg(test)]
mod tests {
extern crate serde_json;
use super::*;
use std::sync::Arc;
use std::thread;
#[test]
fn test_metric() {
let m1 = SimpleMetric::default();
m1.inc();
m1.inc();
m1.add(5);
m1.inc();
assert_eq!(m1.count(), 8);
let m2 = Arc::new(SharedMetric::default());
// We're going to create a number of threads that will attempt to increase this metric
// in parallel. If everything goes fine we still can't be sure the synchronization works,
// but it something fails, then we definitely have a problem :-s
const NUM_THREADS_TO_SPAWN: usize = 4;
const NUM_INCREMENTS_PER_THREAD: usize = 100000;
const M2_INITIAL_COUNT: usize = 123;
m2.add(M2_INITIAL_COUNT);
let mut v = Vec::with_capacity(NUM_THREADS_TO_SPAWN);
for _ in 0..NUM_THREADS_TO_SPAWN {
let r = m2.clone();
v.push(thread::spawn(move || {
for _ in 0..NUM_INCREMENTS_PER_THREAD {
r.inc();
}
}));
}
for handle in v {
handle.join().unwrap();
}
assert_eq!(
m2.count(),
M2_INITIAL_COUNT + NUM_THREADS_TO_SPAWN * NUM_INCREMENTS_PER_THREAD
);
}
#[test]
fn test_serialize() {
let s = serde_json::to_string(&FirecrackerMetrics::default());
assert!(s.is_ok());
}
}
| {
serializer.serialize_i64(chrono::Utc::now().timestamp_millis())
} | identifier_body |
metrics.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines the metrics system.
//!
//! # Design
//! The main design goals of this system are:
//! * Use lockless operations, preferably ones that don't require anything other than
//! simple reads/writes being atomic.
//! * Exploit interior mutability and atomics being Sync to allow all methods (including the ones
//! which are effectively mutable) to be callable on a global non-mut static.
//! * Rely on `serde` to provide the actual serialization for logging the metrics.
//! * Since all metrics start at 0, we implement the `Default` trait via derive for all of them,
//! to avoid having to initialize everything by hand.
//!
//! Moreover, the value of a metric is currently NOT reset to 0 each time it's being logged. The
//! current approach is to store two values (current and previous) and compute the delta between
//! them each time we do a flush (i.e by serialization). There are a number of advantages
//! to this approach, including:
//! * We don't have to introduce an additional write (to reset the value) from the thread which
//! does to actual logging, so less synchronization effort is required.
//! * We don't have to worry at all that much about losing some data if logging fails for a while
//! (this could be a concern, I guess).
//! If if turns out this approach is not really what we want, it's pretty easy to resort to
//! something else, while working behind the same interface.
use std::sync::atomic::{AtomicUsize, Ordering};
use chrono;
use serde::{Serialize, Serializer};
const SYSCALL_MAX: usize = 350;
/// Used for defining new types of metrics that can be either incremented with an unit
/// or an arbitrary amount of units.
// This trait helps with writing less code. It has to be in scope (via an use directive) in order
// for its methods to be available to call on structs that implement it.
pub trait Metric {
/// Adds `value` to the current counter.
fn add(&self, value: usize);
/// Increments by 1 unit the current counter.
fn inc(&self) {
self.add(1);
}
/// Returns current value of the counter.
fn count(&self) -> usize;
}
/// Representation of a metric that is expected to be incremented from a single thread, so it
/// can use simple loads and stores with no additional synchronization necessities.
// Loads are currently Relaxed everywhere, because we don't do anything besides
// logging the retrieved value (their outcome os not used to modify some memory location in a
// potentially inconsistent manner). There's no way currently to make sure a SimpleMetric is only
// incremented by a single thread, this has to be enforced via judicious use (although, every
// non-vCPU related metric is associated with a particular thread, so it shouldn't be that easy
// to misuse SimpleMetric fields).
#[derive(Default)]
pub struct SimpleMetric(AtomicUsize);
impl Metric for SimpleMetric {
fn add(&self, value: usize) {
let ref count = self.0;
count.store(count.load(Ordering::Relaxed) + value, Ordering::Relaxed);
}
fn count(&self) -> usize {
self.0.load(Ordering::Relaxed)
}
}
impl Serialize for SimpleMetric {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
// There's no serializer.serialize_usize().
serializer.serialize_u64(self.0.load(Ordering::Relaxed) as u64)
}
}
/// Representation of a metric that is expected to be incremented from more than one thread, so more
/// synchronization is necessary.
// It's currently used for vCPU metrics. An alternative here would be
// to have one instance of every metric for each thread (like a per-thread SimpleMetric), and to
// aggregate them when logging. However this probably overkill unless we have a lot of vCPUs
// incrementing metrics very often. Still, it's there if we ever need it :-s
#[derive(Default)]
// We will be keeping two values for each metric for being able to reset
// counters on each metric.
// 1st member - current value being updated
// 2nd member - old value that gets the current value whenever metrics is flushed to disk
pub struct SharedMetric(AtomicUsize, AtomicUsize);
impl Metric for SharedMetric {
// While the order specified for this operation is still Relaxed, the actual instruction will
// be an asm "LOCK; something" and thus atomic across multiple threads, simply because of the
// fetch_and_add (as opposed to "store(load() + 1)") implementation for atomics.
// TODO: would a stronger ordering make a difference here?
fn add(&self, value: usize) {
self.0.fetch_add(value, Ordering::Relaxed);
}
fn count(&self) -> usize {
self.0.load(Ordering::Relaxed)
}
}
impl Serialize for SharedMetric {
/// Reset counters of each metrics. Here we suppose that Serialize's goal is to help with the
/// flushing of metrics.
///!!! Any print of the metrics will also reset them. Use with caution!!!
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
// There's no serializer.serialize_usize() for some reason :(
let snapshot = self.0.load(Ordering::Relaxed);
let res = serializer.serialize_u64(snapshot as u64 - self.1.load(Ordering::Relaxed) as u64);
if res.is_ok() {
self.1.store(snapshot, Ordering::Relaxed);
}
res
}
}
// The following structs are used to define a certain organization for the set of metrics we
// are interested in. Whenever the name of a field differs from its ideal textual representation
// in the serialized form, we can use the #[serde(rename = "name")] attribute to, well, rename it.
/// Metrics related to the internal API server.
#[derive(Default, Serialize)]
pub struct ApiServerMetrics {
/// Measures the process's startup time in microseconds.
pub process_startup_time_us: SharedMetric,
/// Measures the cpu's startup time in microseconds.
pub process_startup_time_cpu_us: SharedMetric,
/// Number of failures on API requests triggered by internal errors.
pub sync_outcome_fails: SharedMetric,
/// Number of timeouts during communication with the VMM.
pub sync_vmm_send_timeout_count: SharedMetric,
}
/// Metrics specific to GET API Requests for counting user triggered actions and/or failures.
#[derive(Default, Serialize)]
pub struct GetRequestsMetrics {
/// Number of GETs for getting information on the instance.
pub instance_info_count: SharedMetric,
/// Number of failures when obtaining information on the current instance.
pub instance_info_fails: SharedMetric,
/// Number of GETs for getting status on attaching machine configuration.
pub machine_cfg_count: SharedMetric,
/// Number of failures during GETs for getting information on the instance.
pub machine_cfg_fails: SharedMetric,
}
/// Metrics specific to PUT API Requests for counting user triggered actions and/or failures.
#[derive(Default, Serialize)]
pub struct PutRequestsMetrics {
/// Number of PUTs triggering an action on the VM.
pub actions_count: SharedMetric,
/// Number of failures in triggering an action on the VM.
pub actions_fails: SharedMetric,
/// Number of PUTs for attaching source of boot.
pub boot_source_count: SharedMetric,
/// Number of failures during attaching source of boot.
pub boot_source_fails: SharedMetric,
/// Number of PUTs triggering a block attach.
pub drive_count: SharedMetric,
/// Number of failures in attaching a block device.
pub drive_fails: SharedMetric,
/// Number of PUTs for initializing the logging system.
pub logger_count: SharedMetric,
/// Number of failures in initializing the logging system.
pub logger_fails: SharedMetric,
/// Number of PUTs for configuring the machine.
pub machine_cfg_count: SharedMetric,
/// Number of failures in configuring the machine.
pub machine_cfg_fails: SharedMetric,
/// Number of PUTs for creating a new network interface.
pub network_count: SharedMetric,
/// Number of failures in creating a new network interface.
pub network_fails: SharedMetric,
}
/// Metrics specific to PATCH API Requests for counting user triggered actions and/or failures.
#[derive(Default, Serialize)]
pub struct PatchRequestsMetrics {
/// Number of tries to PATCH a block device.
pub drive_count: SharedMetric,
/// Number of failures in PATCHing a block device.
pub drive_fails: SharedMetric,
}
/// Block Device associated metrics.
#[derive(Default, Serialize)]
pub struct BlockDeviceMetrics {
/// Number of times when activate failed on a block device.
pub activate_fails: SharedMetric,
/// Number of times when interacting with the space config of a block device failed.
pub cfg_fails: SharedMetric,
/// Number of times when handling events on a block device failed.
pub event_fails: SharedMetric,
/// Number of failures in executing a request on a block device.
pub execute_fails: SharedMetric,
/// Number of invalid requests received for this block device.
pub invalid_reqs_count: SharedMetric,
/// Number of flushes operation triggered on this block device.
pub flush_count: SharedMetric,
/// Number of events triggerd on the queue of this block device.
pub queue_event_count: SharedMetric,
/// Number of events ratelimiter-related.
pub rate_limiter_event_count: SharedMetric,
/// Number of update operation triggered on this block device.
pub update_count: SharedMetric,
/// Number of failures while doing update on this block device.
pub update_fails: SharedMetric,
/// Number of bytes read by this block device.
pub read_count: SharedMetric,
/// Number of bytes written by this block device.
pub write_count: SharedMetric,
}
/// Metrics specific to the i8042 device.
#[derive(Default, Serialize)]
pub struct I8042DeviceMetrics {
/// Errors triggered while using the i8042 device.
pub error_count: SharedMetric,
/// Number of superfluous read intents on this i8042 device.
pub missed_read_count: SharedMetric,
/// Number of superfluous read intents on this i8042 device.
pub missed_write_count: SharedMetric,
/// Bytes read by this device.
pub read_count: SharedMetric,
/// Number of resets done by this device.
pub reset_count: SharedMetric,
/// Bytes written by this device.
pub write_count: SharedMetric,
}
/// Metrics for the logging subsystem.
#[derive(Default, Serialize)]
pub struct LoggerSystemMetrics {
/// Number of misses on flushing metrics.
pub missed_metrics_count: SharedMetric,
/// Number of errors during metrics handling.
pub metrics_fails: SharedMetric,
/// Number of misses on logging human readable content.
pub missed_log_count: SharedMetric,
/// Number of errors while trying to log human readable content.
pub log_fails: SharedMetric,
}
/// Metrics for the MMDS functionality.
#[derive(Default, Serialize)]
pub struct MmdsMetrics {
/// Number of frames rerouted to MMDS.
pub rx_accepted: SharedMetric,
/// Number of errors while handling a frame through MMDS.
pub rx_accepted_err: SharedMetric,
/// Number of uncommon events encountered while processing packets through MMDS.
pub rx_accepted_unusual: SharedMetric,
/// The number of buffers which couldn't be parsed as valid Ethernet frames by the MMDS.
pub rx_bad_eth: SharedMetric,
/// The total number of bytes sent by the MMDS.
pub tx_bytes: SharedMetric,
/// The number of errors raised by the MMDS while attempting to send frames/packets/segments.
pub tx_errors: SharedMetric,
/// The number of frames sent by the MMDS.
pub tx_frames: SharedMetric,
/// The number of connections successfully accepted by the MMDS TCP handler.
pub connections_created: SharedMetric,
/// The number of connections cleaned up by the MMDS TCP handler.
pub connections_destroyed: SharedMetric,
}
/// Network-related metrics.
#[derive(Default, Serialize)]
pub struct NetDeviceMetrics {
/// Number of times when activate failed on a network device.
pub activate_fails: SharedMetric,
/// Number of times when interacting with the space config of a network device failed.
pub cfg_fails: SharedMetric,
/// Number of times when handling events on a network device failed.
pub event_fails: SharedMetric,
/// Number of events associated with the receiving queue.
pub rx_queue_event_count: SharedMetric,
/// Number of events associated with the rate limiter installed on the receiving path.
pub rx_event_rate_limiter_count: SharedMetric,
/// Number of events received on the associated tap.
pub rx_tap_event_count: SharedMetric,
/// Number of bytes received.
pub rx_bytes_count: SharedMetric,
/// Number of packets received.
pub rx_packets_count: SharedMetric,
/// Number of errors while receiving data.
pub rx_fails: SharedMetric,
/// Number of transmitted bytes.
pub tx_bytes_count: SharedMetric,
/// Number of errors while transmitting data.
pub tx_fails: SharedMetric,
/// Number of transmitted packets.
pub tx_packets_count: SharedMetric,
/// Number of events associated with the transmitting queue.
pub tx_queue_event_count: SharedMetric,
/// Number of events associated with the rate limiter installed on the transmitting path.
pub tx_rate_limiter_event_count: SharedMetric,
}
/// Metrics for the seccomp filtering.
#[derive(Serialize)]
pub struct SeccompMetrics {
/// Number of black listed syscalls.
pub bad_syscalls: Vec<SharedMetric>,
/// Number of errors inside the seccomp filtering.
pub num_faults: SharedMetric,
}
impl Default for SeccompMetrics {
fn default() -> SeccompMetrics {
let mut def_syscalls = vec![];
for _syscall in 0..SYSCALL_MAX {
def_syscalls.push(SharedMetric::default());
}
SeccompMetrics {
num_faults: SharedMetric::default(),
bad_syscalls: def_syscalls,
}
}
}
/// Metrics specific to the UART device.
#[derive(Default, Serialize)]
pub struct SerialDeviceMetrics {
/// Errors triggered while using the UART device.
pub error_count: SharedMetric,
/// Number of flush operations.
pub flush_count: SharedMetric,
/// Number of read calls that did not trigger a read.
pub missed_read_count: SharedMetric,
/// Number of write calls that did not trigger a write.
pub missed_write_count: SharedMetric,
/// Number of succeeded read calls.
pub read_count: SharedMetric,
/// Number of succeeded write calls.
pub write_count: SharedMetric,
}
/// Metrics specific to VCPUs' mode of functioning.
#[derive(Default, Serialize)]
pub struct VcpuMetrics {
/// Number of KVM exits for handling input IO.
pub exit_io_in: SharedMetric,
/// Number of KVM exits for handling output IO.
pub exit_io_out: SharedMetric,
/// Number of KVM exits for handling MMIO reads.
pub exit_mmio_read: SharedMetric,
/// Number of KVM exits for handling MMIO writes.
pub exit_mmio_write: SharedMetric,
/// Number of errors during this VCPU's run.
pub failures: SharedMetric,
/// Failures in configuring the CPUID.
pub fitler_cpuid: SharedMetric,
}
/// Metrics specific to the machine manager as a whole.
#[derive(Default, Serialize)]
pub struct VmmMetrics {
/// Number of device related events received for a VM.
pub device_events: SharedMetric,
/// Metric for signaling a panic has occurred.
pub panic_count: SharedMetric,
}
/// Memory usage metrics.
#[derive(Default, Serialize)]
pub struct MemoryMetrics {
/// Number of pages dirtied since the last call to `KVM_GET_DIRTY_LOG`.
pub dirty_pages: SharedMetric,
}
// The sole purpose of this struct is to produce an UTC timestamp when an instance is serialized.
#[derive(Default)]
struct SerializeToUtcTimestampMs;
impl Serialize for SerializeToUtcTimestampMs {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
serializer.serialize_i64(chrono::Utc::now().timestamp_millis())
}
}
/// Structure storing all metrics while enforcing serialization support on them.
#[derive(Default, Serialize)]
pub struct FirecrackerMetrics {
utc_timestamp_ms: SerializeToUtcTimestampMs,
/// API Server related metrics.
pub api_server: ApiServerMetrics,
/// A block device's related metrics.
pub block: BlockDeviceMetrics,
/// Metrics related to API GET requests.
pub get_api_requests: GetRequestsMetrics,
/// Metrics relaetd to the i8042 device.
pub i8042: I8042DeviceMetrics,
/// Logging related metrics.
pub logger: LoggerSystemMetrics,
/// Metrics specific to MMDS functionality.
pub mmds: MmdsMetrics,
/// A network device's related metrics.
pub net: NetDeviceMetrics,
/// Metrics related to API PATCH requests.
pub patch_api_requests: PatchRequestsMetrics,
/// Metrics related to API PUT requests.
pub put_api_requests: PutRequestsMetrics,
/// Metrics related to seccomp filtering.
pub seccomp: SeccompMetrics,
/// Metrics related to a vcpu's functioning.
pub vcpu: VcpuMetrics,
/// Metrics related to the virtual machine manager.
pub vmm: VmmMetrics,
/// Metrics related to the UART device.
pub uart: SerialDeviceMetrics,
/// Memory usage metrics.
pub memory: MemoryMetrics,
}
lazy_static! {
/// Static instance used for handling metrics.
///
pub static ref METRICS: FirecrackerMetrics = FirecrackerMetrics::default();
}
#[cfg(test)]
mod tests {
extern crate serde_json;
use super::*;
use std::sync::Arc;
use std::thread;
#[test]
fn test_metric() {
let m1 = SimpleMetric::default();
m1.inc();
m1.inc();
m1.add(5);
m1.inc();
assert_eq!(m1.count(), 8);
let m2 = Arc::new(SharedMetric::default());
// We're going to create a number of threads that will attempt to increase this metric
// in parallel. If everything goes fine we still can't be sure the synchronization works,
// but it something fails, then we definitely have a problem :-s
const NUM_THREADS_TO_SPAWN: usize = 4;
const NUM_INCREMENTS_PER_THREAD: usize = 100000;
const M2_INITIAL_COUNT: usize = 123;
m2.add(M2_INITIAL_COUNT);
let mut v = Vec::with_capacity(NUM_THREADS_TO_SPAWN);
for _ in 0..NUM_THREADS_TO_SPAWN {
let r = m2.clone();
v.push(thread::spawn(move || {
for _ in 0..NUM_INCREMENTS_PER_THREAD {
r.inc();
}
}));
}
for handle in v {
handle.join().unwrap();
}
assert_eq!(
m2.count(), | #[test]
fn test_serialize() {
let s = serde_json::to_string(&FirecrackerMetrics::default());
assert!(s.is_ok());
}
} | M2_INITIAL_COUNT + NUM_THREADS_TO_SPAWN * NUM_INCREMENTS_PER_THREAD
);
}
| random_line_split |
metrics.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines the metrics system.
//!
//! # Design
//! The main design goals of this system are:
//! * Use lockless operations, preferably ones that don't require anything other than
//! simple reads/writes being atomic.
//! * Exploit interior mutability and atomics being Sync to allow all methods (including the ones
//! which are effectively mutable) to be callable on a global non-mut static.
//! * Rely on `serde` to provide the actual serialization for logging the metrics.
//! * Since all metrics start at 0, we implement the `Default` trait via derive for all of them,
//! to avoid having to initialize everything by hand.
//!
//! Moreover, the value of a metric is currently NOT reset to 0 each time it's being logged. The
//! current approach is to store two values (current and previous) and compute the delta between
//! them each time we do a flush (i.e by serialization). There are a number of advantages
//! to this approach, including:
//! * We don't have to introduce an additional write (to reset the value) from the thread which
//! does to actual logging, so less synchronization effort is required.
//! * We don't have to worry at all that much about losing some data if logging fails for a while
//! (this could be a concern, I guess).
//! If if turns out this approach is not really what we want, it's pretty easy to resort to
//! something else, while working behind the same interface.
use std::sync::atomic::{AtomicUsize, Ordering};
use chrono;
use serde::{Serialize, Serializer};
const SYSCALL_MAX: usize = 350;
/// Used for defining new types of metrics that can be either incremented with an unit
/// or an arbitrary amount of units.
// This trait helps with writing less code. It has to be in scope (via an use directive) in order
// for its methods to be available to call on structs that implement it.
pub trait Metric {
/// Adds `value` to the current counter.
fn add(&self, value: usize);
/// Increments by 1 unit the current counter.
fn inc(&self) {
self.add(1);
}
/// Returns current value of the counter.
fn count(&self) -> usize;
}
/// Representation of a metric that is expected to be incremented from a single thread, so it
/// can use simple loads and stores with no additional synchronization necessities.
// Loads are currently Relaxed everywhere, because we don't do anything besides
// logging the retrieved value (their outcome os not used to modify some memory location in a
// potentially inconsistent manner). There's no way currently to make sure a SimpleMetric is only
// incremented by a single thread, this has to be enforced via judicious use (although, every
// non-vCPU related metric is associated with a particular thread, so it shouldn't be that easy
// to misuse SimpleMetric fields).
#[derive(Default)]
pub struct SimpleMetric(AtomicUsize);
impl Metric for SimpleMetric {
fn add(&self, value: usize) {
let ref count = self.0;
count.store(count.load(Ordering::Relaxed) + value, Ordering::Relaxed);
}
fn count(&self) -> usize {
self.0.load(Ordering::Relaxed)
}
}
impl Serialize for SimpleMetric {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
// There's no serializer.serialize_usize().
serializer.serialize_u64(self.0.load(Ordering::Relaxed) as u64)
}
}
/// Representation of a metric that is expected to be incremented from more than one thread, so more
/// synchronization is necessary.
// It's currently used for vCPU metrics. An alternative here would be
// to have one instance of every metric for each thread (like a per-thread SimpleMetric), and to
// aggregate them when logging. However this probably overkill unless we have a lot of vCPUs
// incrementing metrics very often. Still, it's there if we ever need it :-s
#[derive(Default)]
// We will be keeping two values for each metric for being able to reset
// counters on each metric.
// 1st member - current value being updated
// 2nd member - old value that gets the current value whenever metrics is flushed to disk
pub struct SharedMetric(AtomicUsize, AtomicUsize);
impl Metric for SharedMetric {
// While the order specified for this operation is still Relaxed, the actual instruction will
// be an asm "LOCK; something" and thus atomic across multiple threads, simply because of the
// fetch_and_add (as opposed to "store(load() + 1)") implementation for atomics.
// TODO: would a stronger ordering make a difference here?
fn add(&self, value: usize) {
self.0.fetch_add(value, Ordering::Relaxed);
}
fn count(&self) -> usize {
self.0.load(Ordering::Relaxed)
}
}
impl Serialize for SharedMetric {
/// Reset counters of each metrics. Here we suppose that Serialize's goal is to help with the
/// flushing of metrics.
///!!! Any print of the metrics will also reset them. Use with caution!!!
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
// There's no serializer.serialize_usize() for some reason :(
let snapshot = self.0.load(Ordering::Relaxed);
let res = serializer.serialize_u64(snapshot as u64 - self.1.load(Ordering::Relaxed) as u64);
if res.is_ok() {
self.1.store(snapshot, Ordering::Relaxed);
}
res
}
}
// The following structs are used to define a certain organization for the set of metrics we
// are interested in. Whenever the name of a field differs from its ideal textual representation
// in the serialized form, we can use the #[serde(rename = "name")] attribute to, well, rename it.
/// Metrics related to the internal API server.
#[derive(Default, Serialize)]
pub struct ApiServerMetrics {
/// Measures the process's startup time in microseconds.
pub process_startup_time_us: SharedMetric,
/// Measures the cpu's startup time in microseconds.
pub process_startup_time_cpu_us: SharedMetric,
/// Number of failures on API requests triggered by internal errors.
pub sync_outcome_fails: SharedMetric,
/// Number of timeouts during communication with the VMM.
pub sync_vmm_send_timeout_count: SharedMetric,
}
/// Metrics specific to GET API Requests for counting user triggered actions and/or failures.
#[derive(Default, Serialize)]
pub struct GetRequestsMetrics {
/// Number of GETs for getting information on the instance.
pub instance_info_count: SharedMetric,
/// Number of failures when obtaining information on the current instance.
pub instance_info_fails: SharedMetric,
/// Number of GETs for getting status on attaching machine configuration.
pub machine_cfg_count: SharedMetric,
/// Number of failures during GETs for getting information on the instance.
pub machine_cfg_fails: SharedMetric,
}
/// Metrics specific to PUT API Requests for counting user triggered actions and/or failures.
#[derive(Default, Serialize)]
pub struct PutRequestsMetrics {
/// Number of PUTs triggering an action on the VM.
pub actions_count: SharedMetric,
/// Number of failures in triggering an action on the VM.
pub actions_fails: SharedMetric,
/// Number of PUTs for attaching source of boot.
pub boot_source_count: SharedMetric,
/// Number of failures during attaching source of boot.
pub boot_source_fails: SharedMetric,
/// Number of PUTs triggering a block attach.
pub drive_count: SharedMetric,
/// Number of failures in attaching a block device.
pub drive_fails: SharedMetric,
/// Number of PUTs for initializing the logging system.
pub logger_count: SharedMetric,
/// Number of failures in initializing the logging system.
pub logger_fails: SharedMetric,
/// Number of PUTs for configuring the machine.
pub machine_cfg_count: SharedMetric,
/// Number of failures in configuring the machine.
pub machine_cfg_fails: SharedMetric,
/// Number of PUTs for creating a new network interface.
pub network_count: SharedMetric,
/// Number of failures in creating a new network interface.
pub network_fails: SharedMetric,
}
/// Metrics specific to PATCH API Requests for counting user triggered actions and/or failures.
#[derive(Default, Serialize)]
pub struct PatchRequestsMetrics {
/// Number of tries to PATCH a block device.
pub drive_count: SharedMetric,
/// Number of failures in PATCHing a block device.
pub drive_fails: SharedMetric,
}
/// Block Device associated metrics.
#[derive(Default, Serialize)]
pub struct BlockDeviceMetrics {
/// Number of times when activate failed on a block device.
pub activate_fails: SharedMetric,
/// Number of times when interacting with the space config of a block device failed.
pub cfg_fails: SharedMetric,
/// Number of times when handling events on a block device failed.
pub event_fails: SharedMetric,
/// Number of failures in executing a request on a block device.
pub execute_fails: SharedMetric,
/// Number of invalid requests received for this block device.
pub invalid_reqs_count: SharedMetric,
/// Number of flushes operation triggered on this block device.
pub flush_count: SharedMetric,
/// Number of events triggerd on the queue of this block device.
pub queue_event_count: SharedMetric,
/// Number of events ratelimiter-related.
pub rate_limiter_event_count: SharedMetric,
/// Number of update operation triggered on this block device.
pub update_count: SharedMetric,
/// Number of failures while doing update on this block device.
pub update_fails: SharedMetric,
/// Number of bytes read by this block device.
pub read_count: SharedMetric,
/// Number of bytes written by this block device.
pub write_count: SharedMetric,
}
/// Metrics specific to the i8042 device.
#[derive(Default, Serialize)]
pub struct I8042DeviceMetrics {
/// Errors triggered while using the i8042 device.
pub error_count: SharedMetric,
/// Number of superfluous read intents on this i8042 device.
pub missed_read_count: SharedMetric,
/// Number of superfluous read intents on this i8042 device.
pub missed_write_count: SharedMetric,
/// Bytes read by this device.
pub read_count: SharedMetric,
/// Number of resets done by this device.
pub reset_count: SharedMetric,
/// Bytes written by this device.
pub write_count: SharedMetric,
}
/// Metrics for the logging subsystem.
#[derive(Default, Serialize)]
pub struct LoggerSystemMetrics {
/// Number of misses on flushing metrics.
pub missed_metrics_count: SharedMetric,
/// Number of errors during metrics handling.
pub metrics_fails: SharedMetric,
/// Number of misses on logging human readable content.
pub missed_log_count: SharedMetric,
/// Number of errors while trying to log human readable content.
pub log_fails: SharedMetric,
}
/// Metrics for the MMDS functionality.
#[derive(Default, Serialize)]
pub struct MmdsMetrics {
/// Number of frames rerouted to MMDS.
pub rx_accepted: SharedMetric,
/// Number of errors while handling a frame through MMDS.
pub rx_accepted_err: SharedMetric,
/// Number of uncommon events encountered while processing packets through MMDS.
pub rx_accepted_unusual: SharedMetric,
/// The number of buffers which couldn't be parsed as valid Ethernet frames by the MMDS.
pub rx_bad_eth: SharedMetric,
/// The total number of bytes sent by the MMDS.
pub tx_bytes: SharedMetric,
/// The number of errors raised by the MMDS while attempting to send frames/packets/segments.
pub tx_errors: SharedMetric,
/// The number of frames sent by the MMDS.
pub tx_frames: SharedMetric,
/// The number of connections successfully accepted by the MMDS TCP handler.
pub connections_created: SharedMetric,
/// The number of connections cleaned up by the MMDS TCP handler.
pub connections_destroyed: SharedMetric,
}
/// Network-related metrics.
#[derive(Default, Serialize)]
pub struct NetDeviceMetrics {
/// Number of times when activate failed on a network device.
pub activate_fails: SharedMetric,
/// Number of times when interacting with the space config of a network device failed.
pub cfg_fails: SharedMetric,
/// Number of times when handling events on a network device failed.
pub event_fails: SharedMetric,
/// Number of events associated with the receiving queue.
pub rx_queue_event_count: SharedMetric,
/// Number of events associated with the rate limiter installed on the receiving path.
pub rx_event_rate_limiter_count: SharedMetric,
/// Number of events received on the associated tap.
pub rx_tap_event_count: SharedMetric,
/// Number of bytes received.
pub rx_bytes_count: SharedMetric,
/// Number of packets received.
pub rx_packets_count: SharedMetric,
/// Number of errors while receiving data.
pub rx_fails: SharedMetric,
/// Number of transmitted bytes.
pub tx_bytes_count: SharedMetric,
/// Number of errors while transmitting data.
pub tx_fails: SharedMetric,
/// Number of transmitted packets.
pub tx_packets_count: SharedMetric,
/// Number of events associated with the transmitting queue.
pub tx_queue_event_count: SharedMetric,
/// Number of events associated with the rate limiter installed on the transmitting path.
pub tx_rate_limiter_event_count: SharedMetric,
}
/// Metrics for the seccomp filtering.
#[derive(Serialize)]
pub struct SeccompMetrics {
/// Number of black listed syscalls.
pub bad_syscalls: Vec<SharedMetric>,
/// Number of errors inside the seccomp filtering.
pub num_faults: SharedMetric,
}
impl Default for SeccompMetrics {
fn | () -> SeccompMetrics {
let mut def_syscalls = vec![];
for _syscall in 0..SYSCALL_MAX {
def_syscalls.push(SharedMetric::default());
}
SeccompMetrics {
num_faults: SharedMetric::default(),
bad_syscalls: def_syscalls,
}
}
}
/// Metrics specific to the UART device.
#[derive(Default, Serialize)]
pub struct SerialDeviceMetrics {
/// Errors triggered while using the UART device.
pub error_count: SharedMetric,
/// Number of flush operations.
pub flush_count: SharedMetric,
/// Number of read calls that did not trigger a read.
pub missed_read_count: SharedMetric,
/// Number of write calls that did not trigger a write.
pub missed_write_count: SharedMetric,
/// Number of succeeded read calls.
pub read_count: SharedMetric,
/// Number of succeeded write calls.
pub write_count: SharedMetric,
}
/// Metrics specific to VCPUs' mode of functioning.
#[derive(Default, Serialize)]
pub struct VcpuMetrics {
/// Number of KVM exits for handling input IO.
pub exit_io_in: SharedMetric,
/// Number of KVM exits for handling output IO.
pub exit_io_out: SharedMetric,
/// Number of KVM exits for handling MMIO reads.
pub exit_mmio_read: SharedMetric,
/// Number of KVM exits for handling MMIO writes.
pub exit_mmio_write: SharedMetric,
/// Number of errors during this VCPU's run.
pub failures: SharedMetric,
/// Failures in configuring the CPUID.
pub fitler_cpuid: SharedMetric,
}
/// Metrics specific to the machine manager as a whole.
#[derive(Default, Serialize)]
pub struct VmmMetrics {
/// Number of device related events received for a VM.
pub device_events: SharedMetric,
/// Metric for signaling a panic has occurred.
pub panic_count: SharedMetric,
}
/// Memory usage metrics.
#[derive(Default, Serialize)]
pub struct MemoryMetrics {
/// Number of pages dirtied since the last call to `KVM_GET_DIRTY_LOG`.
pub dirty_pages: SharedMetric,
}
// The sole purpose of this struct is to produce an UTC timestamp when an instance is serialized.
#[derive(Default)]
struct SerializeToUtcTimestampMs;
impl Serialize for SerializeToUtcTimestampMs {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
serializer.serialize_i64(chrono::Utc::now().timestamp_millis())
}
}
/// Structure storing all metrics while enforcing serialization support on them.
#[derive(Default, Serialize)]
pub struct FirecrackerMetrics {
utc_timestamp_ms: SerializeToUtcTimestampMs,
/// API Server related metrics.
pub api_server: ApiServerMetrics,
/// A block device's related metrics.
pub block: BlockDeviceMetrics,
/// Metrics related to API GET requests.
pub get_api_requests: GetRequestsMetrics,
/// Metrics relaetd to the i8042 device.
pub i8042: I8042DeviceMetrics,
/// Logging related metrics.
pub logger: LoggerSystemMetrics,
/// Metrics specific to MMDS functionality.
pub mmds: MmdsMetrics,
/// A network device's related metrics.
pub net: NetDeviceMetrics,
/// Metrics related to API PATCH requests.
pub patch_api_requests: PatchRequestsMetrics,
/// Metrics related to API PUT requests.
pub put_api_requests: PutRequestsMetrics,
/// Metrics related to seccomp filtering.
pub seccomp: SeccompMetrics,
/// Metrics related to a vcpu's functioning.
pub vcpu: VcpuMetrics,
/// Metrics related to the virtual machine manager.
pub vmm: VmmMetrics,
/// Metrics related to the UART device.
pub uart: SerialDeviceMetrics,
/// Memory usage metrics.
pub memory: MemoryMetrics,
}
lazy_static! {
/// Static instance used for handling metrics.
///
pub static ref METRICS: FirecrackerMetrics = FirecrackerMetrics::default();
}
#[cfg(test)]
mod tests {
extern crate serde_json;
use super::*;
use std::sync::Arc;
use std::thread;
#[test]
fn test_metric() {
let m1 = SimpleMetric::default();
m1.inc();
m1.inc();
m1.add(5);
m1.inc();
assert_eq!(m1.count(), 8);
let m2 = Arc::new(SharedMetric::default());
// We're going to create a number of threads that will attempt to increase this metric
// in parallel. If everything goes fine we still can't be sure the synchronization works,
// but it something fails, then we definitely have a problem :-s
const NUM_THREADS_TO_SPAWN: usize = 4;
const NUM_INCREMENTS_PER_THREAD: usize = 100000;
const M2_INITIAL_COUNT: usize = 123;
m2.add(M2_INITIAL_COUNT);
let mut v = Vec::with_capacity(NUM_THREADS_TO_SPAWN);
for _ in 0..NUM_THREADS_TO_SPAWN {
let r = m2.clone();
v.push(thread::spawn(move || {
for _ in 0..NUM_INCREMENTS_PER_THREAD {
r.inc();
}
}));
}
for handle in v {
handle.join().unwrap();
}
assert_eq!(
m2.count(),
M2_INITIAL_COUNT + NUM_THREADS_TO_SPAWN * NUM_INCREMENTS_PER_THREAD
);
}
#[test]
fn test_serialize() {
let s = serde_json::to_string(&FirecrackerMetrics::default());
assert!(s.is_ok());
}
}
| default | identifier_name |
metrics.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//! Defines the metrics system.
//!
//! # Design
//! The main design goals of this system are:
//! * Use lockless operations, preferably ones that don't require anything other than
//! simple reads/writes being atomic.
//! * Exploit interior mutability and atomics being Sync to allow all methods (including the ones
//! which are effectively mutable) to be callable on a global non-mut static.
//! * Rely on `serde` to provide the actual serialization for logging the metrics.
//! * Since all metrics start at 0, we implement the `Default` trait via derive for all of them,
//! to avoid having to initialize everything by hand.
//!
//! Moreover, the value of a metric is currently NOT reset to 0 each time it's being logged. The
//! current approach is to store two values (current and previous) and compute the delta between
//! them each time we do a flush (i.e by serialization). There are a number of advantages
//! to this approach, including:
//! * We don't have to introduce an additional write (to reset the value) from the thread which
//! does to actual logging, so less synchronization effort is required.
//! * We don't have to worry at all that much about losing some data if logging fails for a while
//! (this could be a concern, I guess).
//! If if turns out this approach is not really what we want, it's pretty easy to resort to
//! something else, while working behind the same interface.
use std::sync::atomic::{AtomicUsize, Ordering};
use chrono;
use serde::{Serialize, Serializer};
const SYSCALL_MAX: usize = 350;
/// Used for defining new types of metrics that can be either incremented with an unit
/// or an arbitrary amount of units.
// This trait helps with writing less code. It has to be in scope (via an use directive) in order
// for its methods to be available to call on structs that implement it.
pub trait Metric {
/// Adds `value` to the current counter.
fn add(&self, value: usize);
/// Increments by 1 unit the current counter.
fn inc(&self) {
self.add(1);
}
/// Returns current value of the counter.
fn count(&self) -> usize;
}
/// Representation of a metric that is expected to be incremented from a single thread, so it
/// can use simple loads and stores with no additional synchronization necessities.
// Loads are currently Relaxed everywhere, because we don't do anything besides
// logging the retrieved value (their outcome os not used to modify some memory location in a
// potentially inconsistent manner). There's no way currently to make sure a SimpleMetric is only
// incremented by a single thread, this has to be enforced via judicious use (although, every
// non-vCPU related metric is associated with a particular thread, so it shouldn't be that easy
// to misuse SimpleMetric fields).
#[derive(Default)]
pub struct SimpleMetric(AtomicUsize);
impl Metric for SimpleMetric {
fn add(&self, value: usize) {
let ref count = self.0;
count.store(count.load(Ordering::Relaxed) + value, Ordering::Relaxed);
}
fn count(&self) -> usize {
self.0.load(Ordering::Relaxed)
}
}
impl Serialize for SimpleMetric {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
// There's no serializer.serialize_usize().
serializer.serialize_u64(self.0.load(Ordering::Relaxed) as u64)
}
}
/// Representation of a metric that is expected to be incremented from more than one thread, so more
/// synchronization is necessary.
// It's currently used for vCPU metrics. An alternative here would be
// to have one instance of every metric for each thread (like a per-thread SimpleMetric), and to
// aggregate them when logging. However this probably overkill unless we have a lot of vCPUs
// incrementing metrics very often. Still, it's there if we ever need it :-s
#[derive(Default)]
// We will be keeping two values for each metric for being able to reset
// counters on each metric.
// 1st member - current value being updated
// 2nd member - old value that gets the current value whenever metrics is flushed to disk
pub struct SharedMetric(AtomicUsize, AtomicUsize);
impl Metric for SharedMetric {
// While the order specified for this operation is still Relaxed, the actual instruction will
// be an asm "LOCK; something" and thus atomic across multiple threads, simply because of the
// fetch_and_add (as opposed to "store(load() + 1)") implementation for atomics.
// TODO: would a stronger ordering make a difference here?
fn add(&self, value: usize) {
self.0.fetch_add(value, Ordering::Relaxed);
}
fn count(&self) -> usize {
self.0.load(Ordering::Relaxed)
}
}
impl Serialize for SharedMetric {
/// Reset counters of each metrics. Here we suppose that Serialize's goal is to help with the
/// flushing of metrics.
///!!! Any print of the metrics will also reset them. Use with caution!!!
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
// There's no serializer.serialize_usize() for some reason :(
let snapshot = self.0.load(Ordering::Relaxed);
let res = serializer.serialize_u64(snapshot as u64 - self.1.load(Ordering::Relaxed) as u64);
if res.is_ok() |
res
}
}
// The following structs are used to define a certain organization for the set of metrics we
// are interested in. Whenever the name of a field differs from its ideal textual representation
// in the serialized form, we can use the #[serde(rename = "name")] attribute to, well, rename it.
/// Metrics related to the internal API server.
#[derive(Default, Serialize)]
pub struct ApiServerMetrics {
/// Measures the process's startup time in microseconds.
pub process_startup_time_us: SharedMetric,
/// Measures the cpu's startup time in microseconds.
pub process_startup_time_cpu_us: SharedMetric,
/// Number of failures on API requests triggered by internal errors.
pub sync_outcome_fails: SharedMetric,
/// Number of timeouts during communication with the VMM.
pub sync_vmm_send_timeout_count: SharedMetric,
}
/// Metrics specific to GET API Requests for counting user triggered actions and/or failures.
#[derive(Default, Serialize)]
pub struct GetRequestsMetrics {
/// Number of GETs for getting information on the instance.
pub instance_info_count: SharedMetric,
/// Number of failures when obtaining information on the current instance.
pub instance_info_fails: SharedMetric,
/// Number of GETs for getting status on attaching machine configuration.
pub machine_cfg_count: SharedMetric,
/// Number of failures during GETs for getting information on the instance.
pub machine_cfg_fails: SharedMetric,
}
/// Metrics specific to PUT API Requests for counting user triggered actions and/or failures.
#[derive(Default, Serialize)]
pub struct PutRequestsMetrics {
/// Number of PUTs triggering an action on the VM.
pub actions_count: SharedMetric,
/// Number of failures in triggering an action on the VM.
pub actions_fails: SharedMetric,
/// Number of PUTs for attaching source of boot.
pub boot_source_count: SharedMetric,
/// Number of failures during attaching source of boot.
pub boot_source_fails: SharedMetric,
/// Number of PUTs triggering a block attach.
pub drive_count: SharedMetric,
/// Number of failures in attaching a block device.
pub drive_fails: SharedMetric,
/// Number of PUTs for initializing the logging system.
pub logger_count: SharedMetric,
/// Number of failures in initializing the logging system.
pub logger_fails: SharedMetric,
/// Number of PUTs for configuring the machine.
pub machine_cfg_count: SharedMetric,
/// Number of failures in configuring the machine.
pub machine_cfg_fails: SharedMetric,
/// Number of PUTs for creating a new network interface.
pub network_count: SharedMetric,
/// Number of failures in creating a new network interface.
pub network_fails: SharedMetric,
}
/// Metrics specific to PATCH API Requests for counting user triggered actions and/or failures.
#[derive(Default, Serialize)]
pub struct PatchRequestsMetrics {
/// Number of tries to PATCH a block device.
pub drive_count: SharedMetric,
/// Number of failures in PATCHing a block device.
pub drive_fails: SharedMetric,
}
/// Block Device associated metrics.
#[derive(Default, Serialize)]
pub struct BlockDeviceMetrics {
/// Number of times when activate failed on a block device.
pub activate_fails: SharedMetric,
/// Number of times when interacting with the space config of a block device failed.
pub cfg_fails: SharedMetric,
/// Number of times when handling events on a block device failed.
pub event_fails: SharedMetric,
/// Number of failures in executing a request on a block device.
pub execute_fails: SharedMetric,
/// Number of invalid requests received for this block device.
pub invalid_reqs_count: SharedMetric,
/// Number of flushes operation triggered on this block device.
pub flush_count: SharedMetric,
/// Number of events triggerd on the queue of this block device.
pub queue_event_count: SharedMetric,
/// Number of events ratelimiter-related.
pub rate_limiter_event_count: SharedMetric,
/// Number of update operation triggered on this block device.
pub update_count: SharedMetric,
/// Number of failures while doing update on this block device.
pub update_fails: SharedMetric,
/// Number of bytes read by this block device.
pub read_count: SharedMetric,
/// Number of bytes written by this block device.
pub write_count: SharedMetric,
}
/// Metrics specific to the i8042 device.
#[derive(Default, Serialize)]
pub struct I8042DeviceMetrics {
/// Errors triggered while using the i8042 device.
pub error_count: SharedMetric,
/// Number of superfluous read intents on this i8042 device.
pub missed_read_count: SharedMetric,
/// Number of superfluous read intents on this i8042 device.
pub missed_write_count: SharedMetric,
/// Bytes read by this device.
pub read_count: SharedMetric,
/// Number of resets done by this device.
pub reset_count: SharedMetric,
/// Bytes written by this device.
pub write_count: SharedMetric,
}
/// Metrics for the logging subsystem.
#[derive(Default, Serialize)]
pub struct LoggerSystemMetrics {
/// Number of misses on flushing metrics.
pub missed_metrics_count: SharedMetric,
/// Number of errors during metrics handling.
pub metrics_fails: SharedMetric,
/// Number of misses on logging human readable content.
pub missed_log_count: SharedMetric,
/// Number of errors while trying to log human readable content.
pub log_fails: SharedMetric,
}
/// Metrics for the MMDS functionality.
#[derive(Default, Serialize)]
pub struct MmdsMetrics {
/// Number of frames rerouted to MMDS.
pub rx_accepted: SharedMetric,
/// Number of errors while handling a frame through MMDS.
pub rx_accepted_err: SharedMetric,
/// Number of uncommon events encountered while processing packets through MMDS.
pub rx_accepted_unusual: SharedMetric,
/// The number of buffers which couldn't be parsed as valid Ethernet frames by the MMDS.
pub rx_bad_eth: SharedMetric,
/// The total number of bytes sent by the MMDS.
pub tx_bytes: SharedMetric,
/// The number of errors raised by the MMDS while attempting to send frames/packets/segments.
pub tx_errors: SharedMetric,
/// The number of frames sent by the MMDS.
pub tx_frames: SharedMetric,
/// The number of connections successfully accepted by the MMDS TCP handler.
pub connections_created: SharedMetric,
/// The number of connections cleaned up by the MMDS TCP handler.
pub connections_destroyed: SharedMetric,
}
/// Network-related metrics.
#[derive(Default, Serialize)]
pub struct NetDeviceMetrics {
/// Number of times when activate failed on a network device.
pub activate_fails: SharedMetric,
/// Number of times when interacting with the space config of a network device failed.
pub cfg_fails: SharedMetric,
/// Number of times when handling events on a network device failed.
pub event_fails: SharedMetric,
/// Number of events associated with the receiving queue.
pub rx_queue_event_count: SharedMetric,
/// Number of events associated with the rate limiter installed on the receiving path.
pub rx_event_rate_limiter_count: SharedMetric,
/// Number of events received on the associated tap.
pub rx_tap_event_count: SharedMetric,
/// Number of bytes received.
pub rx_bytes_count: SharedMetric,
/// Number of packets received.
pub rx_packets_count: SharedMetric,
/// Number of errors while receiving data.
pub rx_fails: SharedMetric,
/// Number of transmitted bytes.
pub tx_bytes_count: SharedMetric,
/// Number of errors while transmitting data.
pub tx_fails: SharedMetric,
/// Number of transmitted packets.
pub tx_packets_count: SharedMetric,
/// Number of events associated with the transmitting queue.
pub tx_queue_event_count: SharedMetric,
/// Number of events associated with the rate limiter installed on the transmitting path.
pub tx_rate_limiter_event_count: SharedMetric,
}
/// Metrics for the seccomp filtering.
#[derive(Serialize)]
pub struct SeccompMetrics {
/// Number of black listed syscalls.
pub bad_syscalls: Vec<SharedMetric>,
/// Number of errors inside the seccomp filtering.
pub num_faults: SharedMetric,
}
impl Default for SeccompMetrics {
fn default() -> SeccompMetrics {
let mut def_syscalls = vec![];
for _syscall in 0..SYSCALL_MAX {
def_syscalls.push(SharedMetric::default());
}
SeccompMetrics {
num_faults: SharedMetric::default(),
bad_syscalls: def_syscalls,
}
}
}
/// Metrics specific to the UART device.
#[derive(Default, Serialize)]
pub struct SerialDeviceMetrics {
/// Errors triggered while using the UART device.
pub error_count: SharedMetric,
/// Number of flush operations.
pub flush_count: SharedMetric,
/// Number of read calls that did not trigger a read.
pub missed_read_count: SharedMetric,
/// Number of write calls that did not trigger a write.
pub missed_write_count: SharedMetric,
/// Number of succeeded read calls.
pub read_count: SharedMetric,
/// Number of succeeded write calls.
pub write_count: SharedMetric,
}
/// Metrics specific to VCPUs' mode of functioning.
#[derive(Default, Serialize)]
pub struct VcpuMetrics {
/// Number of KVM exits for handling input IO.
pub exit_io_in: SharedMetric,
/// Number of KVM exits for handling output IO.
pub exit_io_out: SharedMetric,
/// Number of KVM exits for handling MMIO reads.
pub exit_mmio_read: SharedMetric,
/// Number of KVM exits for handling MMIO writes.
pub exit_mmio_write: SharedMetric,
/// Number of errors during this VCPU's run.
pub failures: SharedMetric,
/// Failures in configuring the CPUID.
pub fitler_cpuid: SharedMetric,
}
/// Metrics specific to the machine manager as a whole.
#[derive(Default, Serialize)]
pub struct VmmMetrics {
/// Number of device related events received for a VM.
pub device_events: SharedMetric,
/// Metric for signaling a panic has occurred.
pub panic_count: SharedMetric,
}
/// Memory usage metrics.
#[derive(Default, Serialize)]
pub struct MemoryMetrics {
/// Number of pages dirtied since the last call to `KVM_GET_DIRTY_LOG`.
pub dirty_pages: SharedMetric,
}
// The sole purpose of this struct is to produce an UTC timestamp when an instance is serialized.
#[derive(Default)]
struct SerializeToUtcTimestampMs;
impl Serialize for SerializeToUtcTimestampMs {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
serializer.serialize_i64(chrono::Utc::now().timestamp_millis())
}
}
/// Structure storing all metrics while enforcing serialization support on them.
#[derive(Default, Serialize)]
pub struct FirecrackerMetrics {
utc_timestamp_ms: SerializeToUtcTimestampMs,
/// API Server related metrics.
pub api_server: ApiServerMetrics,
/// A block device's related metrics.
pub block: BlockDeviceMetrics,
/// Metrics related to API GET requests.
pub get_api_requests: GetRequestsMetrics,
/// Metrics relaetd to the i8042 device.
pub i8042: I8042DeviceMetrics,
/// Logging related metrics.
pub logger: LoggerSystemMetrics,
/// Metrics specific to MMDS functionality.
pub mmds: MmdsMetrics,
/// A network device's related metrics.
pub net: NetDeviceMetrics,
/// Metrics related to API PATCH requests.
pub patch_api_requests: PatchRequestsMetrics,
/// Metrics related to API PUT requests.
pub put_api_requests: PutRequestsMetrics,
/// Metrics related to seccomp filtering.
pub seccomp: SeccompMetrics,
/// Metrics related to a vcpu's functioning.
pub vcpu: VcpuMetrics,
/// Metrics related to the virtual machine manager.
pub vmm: VmmMetrics,
/// Metrics related to the UART device.
pub uart: SerialDeviceMetrics,
/// Memory usage metrics.
pub memory: MemoryMetrics,
}
lazy_static! {
/// Static instance used for handling metrics.
///
pub static ref METRICS: FirecrackerMetrics = FirecrackerMetrics::default();
}
#[cfg(test)]
mod tests {
extern crate serde_json;
use super::*;
use std::sync::Arc;
use std::thread;
#[test]
fn test_metric() {
let m1 = SimpleMetric::default();
m1.inc();
m1.inc();
m1.add(5);
m1.inc();
assert_eq!(m1.count(), 8);
let m2 = Arc::new(SharedMetric::default());
// We're going to create a number of threads that will attempt to increase this metric
// in parallel. If everything goes fine we still can't be sure the synchronization works,
// but it something fails, then we definitely have a problem :-s
const NUM_THREADS_TO_SPAWN: usize = 4;
const NUM_INCREMENTS_PER_THREAD: usize = 100000;
const M2_INITIAL_COUNT: usize = 123;
m2.add(M2_INITIAL_COUNT);
let mut v = Vec::with_capacity(NUM_THREADS_TO_SPAWN);
for _ in 0..NUM_THREADS_TO_SPAWN {
let r = m2.clone();
v.push(thread::spawn(move || {
for _ in 0..NUM_INCREMENTS_PER_THREAD {
r.inc();
}
}));
}
for handle in v {
handle.join().unwrap();
}
assert_eq!(
m2.count(),
M2_INITIAL_COUNT + NUM_THREADS_TO_SPAWN * NUM_INCREMENTS_PER_THREAD
);
}
#[test]
fn test_serialize() {
let s = serde_json::to_string(&FirecrackerMetrics::default());
assert!(s.is_ok());
}
}
| {
self.1.store(snapshot, Ordering::Relaxed);
} | conditional_block |
decode.rs | /* Copyright 2013 10gen Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::str::from_bytes;
use std::int::range;
use std::cast::transmute;
use encode::*;
use tools::stream::*;
static L_END: bool = true;
//Format codes
static DOUBLE: u8 = 0x01;
static STRING: u8 = 0x02;
static EMBED: u8 = 0x03;
static ARRAY: u8 = 0x04;
static BINARY: u8 = 0x05;
static OBJID: u8 = 0x07;
static BOOL: u8 = 0x08;
static UTCDATE: u8 = 0x09;
static NULL: u8 = 0x0A;
static REGEX: u8 = 0x0B;
static DBREF: u8 = 0x0C;
static JSCRIPT: u8 = 0x0D;
static JSCOPE: u8 = 0x0F;
static INT32: u8 = 0x10;
static TSTAMP: u8 = 0x11;
static INT64: u8 = 0x12;
static MINKEY: u8 = 0xFF;
static MAXKEY: u8 = 0x7F;
///Parser object for BSON. T is constrained to Stream<u8>.
pub struct BsonParser<T> {
stream: T
}
///Collects up to 8 bytes in order as a u64.
priv fn bytesum(bytes: &[u8]) -> u64 {
let mut i = 0;
let mut ret: u64 = 0;
for bytes.iter().advance |&byte| {
ret |= (byte as u64) >> (8 * i);
i += 1;
}
ret
}
impl<T:Stream<u8>> BsonParser<T> {
///Parse a byte stream into a BsonDocument. Returns an error string on parse failure.
///Initializing a BsonParser and calling document() will fully convert a ~[u8]
///into a BsonDocument if it was formatted correctly.
pub fn document(&mut self) -> Result<BsonDocument,~str> {
let size = bytesum(self.stream.aggregate(4)) as i32;
let mut elemcode = self.stream.expect(&[
DOUBLE,STRING,EMBED,ARRAY,BINARY,OBJID,
BOOL,UTCDATE,NULL,REGEX,DBREF,JSCRIPT,JSCOPE,
INT32,TSTAMP,INT64,MINKEY,MAXKEY]);
self.stream.pass(1);
let mut ret = BsonDocument::new();
while elemcode!= None {
let key = self.cstring();
let val: Document = match elemcode {
Some(DOUBLE) => self._double(),
Some(STRING) => self._string(),
Some(EMBED) => {
let doc = self._embed();
match doc {
Ok(d) => d,
Err(e) => return Err(e)
}
}
Some(ARRAY) => {
let doc = self._array();
match doc {
Ok(d) => d,
Err(e) => return Err(e)
}
}
Some(BINARY) => self._binary(),
Some(OBJID) => ObjectId(self.stream.aggregate(12)),
Some(BOOL) => self._bool(),
Some(UTCDATE) => UTCDate(bytesum(self.stream.aggregate(8)) as i64),
Some(NULL) => Null,
Some(REGEX) => self._regex(),
Some(DBREF) => {
let doc = self._dbref();
match doc {
Ok(d) => d,
Err(e) => return Err(e)
}
}
Some(JSCRIPT) => {
let doc = self._jscript();
match doc {
Ok(d) => d,
Err(e) => return Err(e)
}
}
Some(JSCOPE) => {
let doc = self._jscope();
match doc {
Ok(d) => d,
Err(e) => return Err(e)
}
}
Some(INT32) => Int32(bytesum(self.stream.aggregate(4)) as i32),
Some(TSTAMP) => Timestamp(bytesum(self.stream.aggregate(4)) as u32,
bytesum(self.stream.aggregate(4)) as u32),
Some(INT64) => Int64(bytesum(self.stream.aggregate(8)) as i64),
Some(MINKEY) => MinKey,
Some(MAXKEY) => MaxKey,
_ => return Err(~"an invalid element code was found")
};
ret.put(key, val);
elemcode = self.stream.expect(&[
DOUBLE,STRING,EMBED,ARRAY,BINARY,OBJID,
BOOL,UTCDATE,NULL,REGEX,DBREF,JSCRIPT,JSCOPE,
INT32,TSTAMP,INT64,MINKEY,MAXKEY]);
if self.stream.has_next() { self.stream.pass(1); }
}
ret.size = size;
Ok(ret)
}
///Parse a string without denoting its length. Mainly for keys.
fn cstring(&mut self) -> ~str {
let is_0: &fn(&u8) -> bool = |&x| x == 0x00;
let s = from_bytes(self.stream.until(is_0));
self.stream.pass(1);
s
}
///Parse a double.
fn _double(&mut self) -> Document {
let mut u: u64 = 0;
for range(0,8) |i| {
//TODO: how will this hold up on big-endian architectures?
u |= (*self.stream.first() as u64 << ((8 * i)));
self.stream.pass(1);
}
let v: &f64 = unsafe { transmute(&u) };
Double(*v)
}
///Parse a string with length.
fn _string(&mut self) -> Document {
self.stream.pass(4); //skip length
let v = self.cstring();
UString(v)
}
///Parse an embedded object. May fail.
fn _embed(&mut self) -> Result<Document,~str> {
return self.document().chain(|s| Ok(Embedded(~s)));
}
///Parse an embedded array. May fail.
fn _array(&mut self) -> Result<Document,~str> {
return self.document().chain(|s| Ok(Array(~s)));
}
///Parse generic binary data.
fn _binary(&mut self) -> Document {
let count = bytesum(self.stream.aggregate(4));
let subtype = *(self.stream.first());
self.stream.pass(1);
let data = self.stream.aggregate(count as uint);
Binary(subtype, data)
}
///Parse a boolean.
fn _bool(&mut self) -> Document {
let ret = (*self.stream.first()) as bool;
self.stream.pass(1);
Bool(ret)
}
///Parse a regex.
fn _regex(&mut self) -> Document {
let s1 = self.cstring();
let s2 = self.cstring();
Regex(s1, s2)
}
fn _dbref(&mut self) -> Result<Document, ~str> {
let s = match self._string() {
UString(rs) => rs,
_ => return Err(~"invalid string found in dbref")
};
let d = self.stream.aggregate(12);
Ok(DBRef(s, ~ObjectId(d)))
} | fn _jscript(&mut self) -> Result<Document, ~str> {
let s = self._string();
//using this to avoid irrefutable pattern error
match s {
UString(s) => Ok(JScript(s)),
_ => Err(~"invalid string found in javascript")
}
}
///Parse a scoped javascript object.
fn _jscope(&mut self) -> Result<Document,~str> {
self.stream.pass(4);
let s = self.cstring();
let doc = self.document();
return doc.chain(|d| Ok(JScriptWithScope(s.clone(),~d)));
}
///Create a new parser with a given stream.
pub fn new(stream: T) -> BsonParser<T> { BsonParser { stream: stream } }
}
///Standalone decode binding.
///This is equivalent to initializing a parser and calling document().
pub fn decode(b: ~[u8]) -> Result<BsonDocument,~str> {
let mut parser = BsonParser::new(b);
parser.document()
}
#[cfg(test)]
mod tests {
use super::*;
use encode::*;
use extra::test::BenchHarness;
#[test]
fn test_decode_size() {
let doc = decode(~[10,0,0,0,10,100,100,100,0]);
assert_eq!(doc.unwrap().size, 10);
}
#[test]
fn test_cstring_decode() {
let stream: ~[u8] = ~[104,101,108,108,111,0];
let mut parser = BsonParser::new(stream);
assert_eq!(parser.cstring(), ~"hello");
}
#[test]
fn test_double_decode() {
let stream: ~[u8] = ~[110,134,27,240,249,33,9,64];
let mut parser = BsonParser::new(stream);
let d = parser._double();
match d {
Double(d2) => {
assert!(d2.approx_eq(&3.14159f64));
}
_ => fail!("failed in a test case; how did I get here?")
}
}
#[test]
fn test_document_decode() {
let stream1: ~[u8] = ~[11,0,0,0,8,102,111,111,0,1,0];
let mut parser1 = BsonParser::new(stream1);
let mut doc1 = BsonDocument::new();
doc1.put(~"foo", Bool(true));
assert_eq!(parser1.document().unwrap(), doc1);
let stream2: ~[u8] = ~[45,0,0,0,4,102,111,111,0,22,0,0,0,2,48,0,
6,0,0,0,104,101,108,108,111,0,8,49,0,0,
0,2,98,97,122,0,4,0,0,0,113,117,120,0,0];
let mut inside = BsonDocument::new();
inside.put_all(~[(~"0", UString(~"hello")), (~"1", Bool(false))]);
let mut doc2 = BsonDocument::new();
doc2.put_all(~[(~"foo", Array(~inside.clone())), (~"baz", UString(~"qux"))]);
assert_eq!(decode(stream2).unwrap(), doc2);
}
#[test]
fn test_binary_decode() {
let stream: ~[u8] = ~[6,0,0,0,0,1,2,3,4,5,6];
let mut parser = BsonParser::new(stream);
assert_eq!(parser._binary(), Binary(0, ~[1,2,3,4,5,6]));
}
#[test]
fn test_dbref_encode() {
let mut doc = BsonDocument::new();
doc.put(~"foo", DBRef(~"bar", ~ObjectId(~[0u8,1,2,3,4,5,6,7,8,9,10,11])));
let stream: ~[u8] = ~[30,0,0,0,12,102,111,111,0,4,0,0,0,98,97,114,0,0,1,2,3,4,5,6,7,8,9,10,11,0];
assert_eq!(decode(stream).unwrap(), doc)
}
//TODO: get bson strings of torture-test objects
#[bench]
fn bench_basic_obj_decode(b: &mut BenchHarness) {
do b.iter {
let stream: ~[u8] = ~[45,0,0,0,4,102,111,
111,0,22,0,0,0,2,48,0,6,0,0,0,104,101,108,
108,111,0,8,49,0,0,0,2,98,97,122,0,4,0,0,0,
113,117,120,0,0];
decode(stream);
}
}
} | ///Parse a javascript object. | random_line_split |
decode.rs | /* Copyright 2013 10gen Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::str::from_bytes;
use std::int::range;
use std::cast::transmute;
use encode::*;
use tools::stream::*;
static L_END: bool = true;
//Format codes
static DOUBLE: u8 = 0x01;
static STRING: u8 = 0x02;
static EMBED: u8 = 0x03;
static ARRAY: u8 = 0x04;
static BINARY: u8 = 0x05;
static OBJID: u8 = 0x07;
static BOOL: u8 = 0x08;
static UTCDATE: u8 = 0x09;
static NULL: u8 = 0x0A;
static REGEX: u8 = 0x0B;
static DBREF: u8 = 0x0C;
static JSCRIPT: u8 = 0x0D;
static JSCOPE: u8 = 0x0F;
static INT32: u8 = 0x10;
static TSTAMP: u8 = 0x11;
static INT64: u8 = 0x12;
static MINKEY: u8 = 0xFF;
static MAXKEY: u8 = 0x7F;
///Parser object for BSON. T is constrained to Stream<u8>.
pub struct BsonParser<T> {
stream: T
}
///Collects up to 8 bytes in order as a u64.
priv fn bytesum(bytes: &[u8]) -> u64 {
let mut i = 0;
let mut ret: u64 = 0;
for bytes.iter().advance |&byte| {
ret |= (byte as u64) >> (8 * i);
i += 1;
}
ret
}
impl<T:Stream<u8>> BsonParser<T> {
///Parse a byte stream into a BsonDocument. Returns an error string on parse failure.
///Initializing a BsonParser and calling document() will fully convert a ~[u8]
///into a BsonDocument if it was formatted correctly.
pub fn document(&mut self) -> Result<BsonDocument,~str> {
let size = bytesum(self.stream.aggregate(4)) as i32;
let mut elemcode = self.stream.expect(&[
DOUBLE,STRING,EMBED,ARRAY,BINARY,OBJID,
BOOL,UTCDATE,NULL,REGEX,DBREF,JSCRIPT,JSCOPE,
INT32,TSTAMP,INT64,MINKEY,MAXKEY]);
self.stream.pass(1);
let mut ret = BsonDocument::new();
while elemcode!= None {
let key = self.cstring();
let val: Document = match elemcode {
Some(DOUBLE) => self._double(),
Some(STRING) => self._string(),
Some(EMBED) => {
let doc = self._embed();
match doc {
Ok(d) => d,
Err(e) => return Err(e)
}
}
Some(ARRAY) => {
let doc = self._array();
match doc {
Ok(d) => d,
Err(e) => return Err(e)
}
}
Some(BINARY) => self._binary(),
Some(OBJID) => ObjectId(self.stream.aggregate(12)),
Some(BOOL) => self._bool(),
Some(UTCDATE) => UTCDate(bytesum(self.stream.aggregate(8)) as i64),
Some(NULL) => Null,
Some(REGEX) => self._regex(),
Some(DBREF) => {
let doc = self._dbref();
match doc {
Ok(d) => d,
Err(e) => return Err(e)
}
}
Some(JSCRIPT) => {
let doc = self._jscript();
match doc {
Ok(d) => d,
Err(e) => return Err(e)
}
}
Some(JSCOPE) => {
let doc = self._jscope();
match doc {
Ok(d) => d,
Err(e) => return Err(e)
}
}
Some(INT32) => Int32(bytesum(self.stream.aggregate(4)) as i32),
Some(TSTAMP) => Timestamp(bytesum(self.stream.aggregate(4)) as u32,
bytesum(self.stream.aggregate(4)) as u32),
Some(INT64) => Int64(bytesum(self.stream.aggregate(8)) as i64),
Some(MINKEY) => MinKey,
Some(MAXKEY) => MaxKey,
_ => return Err(~"an invalid element code was found")
};
ret.put(key, val);
elemcode = self.stream.expect(&[
DOUBLE,STRING,EMBED,ARRAY,BINARY,OBJID,
BOOL,UTCDATE,NULL,REGEX,DBREF,JSCRIPT,JSCOPE,
INT32,TSTAMP,INT64,MINKEY,MAXKEY]);
if self.stream.has_next() { self.stream.pass(1); }
}
ret.size = size;
Ok(ret)
}
///Parse a string without denoting its length. Mainly for keys.
fn cstring(&mut self) -> ~str {
let is_0: &fn(&u8) -> bool = |&x| x == 0x00;
let s = from_bytes(self.stream.until(is_0));
self.stream.pass(1);
s
}
///Parse a double.
fn _double(&mut self) -> Document {
let mut u: u64 = 0;
for range(0,8) |i| {
//TODO: how will this hold up on big-endian architectures?
u |= (*self.stream.first() as u64 << ((8 * i)));
self.stream.pass(1);
}
let v: &f64 = unsafe { transmute(&u) };
Double(*v)
}
///Parse a string with length.
fn _string(&mut self) -> Document {
self.stream.pass(4); //skip length
let v = self.cstring();
UString(v)
}
///Parse an embedded object. May fail.
fn _embed(&mut self) -> Result<Document,~str> {
return self.document().chain(|s| Ok(Embedded(~s)));
}
///Parse an embedded array. May fail.
fn _array(&mut self) -> Result<Document,~str> {
return self.document().chain(|s| Ok(Array(~s)));
}
///Parse generic binary data.
fn _binary(&mut self) -> Document {
let count = bytesum(self.stream.aggregate(4));
let subtype = *(self.stream.first());
self.stream.pass(1);
let data = self.stream.aggregate(count as uint);
Binary(subtype, data)
}
///Parse a boolean.
fn _bool(&mut self) -> Document {
let ret = (*self.stream.first()) as bool;
self.stream.pass(1);
Bool(ret)
}
///Parse a regex.
fn _regex(&mut self) -> Document {
let s1 = self.cstring();
let s2 = self.cstring();
Regex(s1, s2)
}
fn _dbref(&mut self) -> Result<Document, ~str> |
///Parse a javascript object.
fn _jscript(&mut self) -> Result<Document, ~str> {
let s = self._string();
//using this to avoid irrefutable pattern error
match s {
UString(s) => Ok(JScript(s)),
_ => Err(~"invalid string found in javascript")
}
}
///Parse a scoped javascript object.
fn _jscope(&mut self) -> Result<Document,~str> {
self.stream.pass(4);
let s = self.cstring();
let doc = self.document();
return doc.chain(|d| Ok(JScriptWithScope(s.clone(),~d)));
}
///Create a new parser with a given stream.
pub fn new(stream: T) -> BsonParser<T> { BsonParser { stream: stream } }
}
///Standalone decode binding.
///This is equivalent to initializing a parser and calling document().
pub fn decode(b: ~[u8]) -> Result<BsonDocument,~str> {
let mut parser = BsonParser::new(b);
parser.document()
}
#[cfg(test)]
mod tests {
use super::*;
use encode::*;
use extra::test::BenchHarness;
#[test]
fn test_decode_size() {
let doc = decode(~[10,0,0,0,10,100,100,100,0]);
assert_eq!(doc.unwrap().size, 10);
}
#[test]
fn test_cstring_decode() {
let stream: ~[u8] = ~[104,101,108,108,111,0];
let mut parser = BsonParser::new(stream);
assert_eq!(parser.cstring(), ~"hello");
}
#[test]
fn test_double_decode() {
let stream: ~[u8] = ~[110,134,27,240,249,33,9,64];
let mut parser = BsonParser::new(stream);
let d = parser._double();
match d {
Double(d2) => {
assert!(d2.approx_eq(&3.14159f64));
}
_ => fail!("failed in a test case; how did I get here?")
}
}
#[test]
fn test_document_decode() {
let stream1: ~[u8] = ~[11,0,0,0,8,102,111,111,0,1,0];
let mut parser1 = BsonParser::new(stream1);
let mut doc1 = BsonDocument::new();
doc1.put(~"foo", Bool(true));
assert_eq!(parser1.document().unwrap(), doc1);
let stream2: ~[u8] = ~[45,0,0,0,4,102,111,111,0,22,0,0,0,2,48,0,
6,0,0,0,104,101,108,108,111,0,8,49,0,0,
0,2,98,97,122,0,4,0,0,0,113,117,120,0,0];
let mut inside = BsonDocument::new();
inside.put_all(~[(~"0", UString(~"hello")), (~"1", Bool(false))]);
let mut doc2 = BsonDocument::new();
doc2.put_all(~[(~"foo", Array(~inside.clone())), (~"baz", UString(~"qux"))]);
assert_eq!(decode(stream2).unwrap(), doc2);
}
#[test]
fn test_binary_decode() {
let stream: ~[u8] = ~[6,0,0,0,0,1,2,3,4,5,6];
let mut parser = BsonParser::new(stream);
assert_eq!(parser._binary(), Binary(0, ~[1,2,3,4,5,6]));
}
#[test]
fn test_dbref_encode() {
let mut doc = BsonDocument::new();
doc.put(~"foo", DBRef(~"bar", ~ObjectId(~[0u8,1,2,3,4,5,6,7,8,9,10,11])));
let stream: ~[u8] = ~[30,0,0,0,12,102,111,111,0,4,0,0,0,98,97,114,0,0,1,2,3,4,5,6,7,8,9,10,11,0];
assert_eq!(decode(stream).unwrap(), doc)
}
//TODO: get bson strings of torture-test objects
#[bench]
fn bench_basic_obj_decode(b: &mut BenchHarness) {
do b.iter {
let stream: ~[u8] = ~[45,0,0,0,4,102,111,
111,0,22,0,0,0,2,48,0,6,0,0,0,104,101,108,
108,111,0,8,49,0,0,0,2,98,97,122,0,4,0,0,0,
113,117,120,0,0];
decode(stream);
}
}
}
| {
let s = match self._string() {
UString(rs) => rs,
_ => return Err(~"invalid string found in dbref")
};
let d = self.stream.aggregate(12);
Ok(DBRef(s, ~ObjectId(d)))
} | identifier_body |
decode.rs | /* Copyright 2013 10gen Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::str::from_bytes;
use std::int::range;
use std::cast::transmute;
use encode::*;
use tools::stream::*;
static L_END: bool = true;
//Format codes
static DOUBLE: u8 = 0x01;
static STRING: u8 = 0x02;
static EMBED: u8 = 0x03;
static ARRAY: u8 = 0x04;
static BINARY: u8 = 0x05;
static OBJID: u8 = 0x07;
static BOOL: u8 = 0x08;
static UTCDATE: u8 = 0x09;
static NULL: u8 = 0x0A;
static REGEX: u8 = 0x0B;
static DBREF: u8 = 0x0C;
static JSCRIPT: u8 = 0x0D;
static JSCOPE: u8 = 0x0F;
static INT32: u8 = 0x10;
static TSTAMP: u8 = 0x11;
static INT64: u8 = 0x12;
static MINKEY: u8 = 0xFF;
static MAXKEY: u8 = 0x7F;
///Parser object for BSON. T is constrained to Stream<u8>.
pub struct BsonParser<T> {
stream: T
}
///Collects up to 8 bytes in order as a u64.
priv fn bytesum(bytes: &[u8]) -> u64 {
let mut i = 0;
let mut ret: u64 = 0;
for bytes.iter().advance |&byte| {
ret |= (byte as u64) >> (8 * i);
i += 1;
}
ret
}
impl<T:Stream<u8>> BsonParser<T> {
///Parse a byte stream into a BsonDocument. Returns an error string on parse failure.
///Initializing a BsonParser and calling document() will fully convert a ~[u8]
///into a BsonDocument if it was formatted correctly.
pub fn document(&mut self) -> Result<BsonDocument,~str> {
let size = bytesum(self.stream.aggregate(4)) as i32;
let mut elemcode = self.stream.expect(&[
DOUBLE,STRING,EMBED,ARRAY,BINARY,OBJID,
BOOL,UTCDATE,NULL,REGEX,DBREF,JSCRIPT,JSCOPE,
INT32,TSTAMP,INT64,MINKEY,MAXKEY]);
self.stream.pass(1);
let mut ret = BsonDocument::new();
while elemcode!= None {
let key = self.cstring();
let val: Document = match elemcode {
Some(DOUBLE) => self._double(),
Some(STRING) => self._string(),
Some(EMBED) => {
let doc = self._embed();
match doc {
Ok(d) => d,
Err(e) => return Err(e)
}
}
Some(ARRAY) => {
let doc = self._array();
match doc {
Ok(d) => d,
Err(e) => return Err(e)
}
}
Some(BINARY) => self._binary(),
Some(OBJID) => ObjectId(self.stream.aggregate(12)),
Some(BOOL) => self._bool(),
Some(UTCDATE) => UTCDate(bytesum(self.stream.aggregate(8)) as i64),
Some(NULL) => Null,
Some(REGEX) => self._regex(),
Some(DBREF) => {
let doc = self._dbref();
match doc {
Ok(d) => d,
Err(e) => return Err(e)
}
}
Some(JSCRIPT) => {
let doc = self._jscript();
match doc {
Ok(d) => d,
Err(e) => return Err(e)
}
}
Some(JSCOPE) => {
let doc = self._jscope();
match doc {
Ok(d) => d,
Err(e) => return Err(e)
}
}
Some(INT32) => Int32(bytesum(self.stream.aggregate(4)) as i32),
Some(TSTAMP) => Timestamp(bytesum(self.stream.aggregate(4)) as u32,
bytesum(self.stream.aggregate(4)) as u32),
Some(INT64) => Int64(bytesum(self.stream.aggregate(8)) as i64),
Some(MINKEY) => MinKey,
Some(MAXKEY) => MaxKey,
_ => return Err(~"an invalid element code was found")
};
ret.put(key, val);
elemcode = self.stream.expect(&[
DOUBLE,STRING,EMBED,ARRAY,BINARY,OBJID,
BOOL,UTCDATE,NULL,REGEX,DBREF,JSCRIPT,JSCOPE,
INT32,TSTAMP,INT64,MINKEY,MAXKEY]);
if self.stream.has_next() { self.stream.pass(1); }
}
ret.size = size;
Ok(ret)
}
///Parse a string without denoting its length. Mainly for keys.
fn cstring(&mut self) -> ~str {
let is_0: &fn(&u8) -> bool = |&x| x == 0x00;
let s = from_bytes(self.stream.until(is_0));
self.stream.pass(1);
s
}
///Parse a double.
fn _double(&mut self) -> Document {
let mut u: u64 = 0;
for range(0,8) |i| {
//TODO: how will this hold up on big-endian architectures?
u |= (*self.stream.first() as u64 << ((8 * i)));
self.stream.pass(1);
}
let v: &f64 = unsafe { transmute(&u) };
Double(*v)
}
///Parse a string with length.
fn _string(&mut self) -> Document {
self.stream.pass(4); //skip length
let v = self.cstring();
UString(v)
}
///Parse an embedded object. May fail.
fn _embed(&mut self) -> Result<Document,~str> {
return self.document().chain(|s| Ok(Embedded(~s)));
}
///Parse an embedded array. May fail.
fn _array(&mut self) -> Result<Document,~str> {
return self.document().chain(|s| Ok(Array(~s)));
}
///Parse generic binary data.
fn _binary(&mut self) -> Document {
let count = bytesum(self.stream.aggregate(4));
let subtype = *(self.stream.first());
self.stream.pass(1);
let data = self.stream.aggregate(count as uint);
Binary(subtype, data)
}
///Parse a boolean.
fn _bool(&mut self) -> Document {
let ret = (*self.stream.first()) as bool;
self.stream.pass(1);
Bool(ret)
}
///Parse a regex.
fn _regex(&mut self) -> Document {
let s1 = self.cstring();
let s2 = self.cstring();
Regex(s1, s2)
}
fn _dbref(&mut self) -> Result<Document, ~str> {
let s = match self._string() {
UString(rs) => rs,
_ => return Err(~"invalid string found in dbref")
};
let d = self.stream.aggregate(12);
Ok(DBRef(s, ~ObjectId(d)))
}
///Parse a javascript object.
fn _jscript(&mut self) -> Result<Document, ~str> {
let s = self._string();
//using this to avoid irrefutable pattern error
match s {
UString(s) => Ok(JScript(s)),
_ => Err(~"invalid string found in javascript")
}
}
///Parse a scoped javascript object.
fn _jscope(&mut self) -> Result<Document,~str> {
self.stream.pass(4);
let s = self.cstring();
let doc = self.document();
return doc.chain(|d| Ok(JScriptWithScope(s.clone(),~d)));
}
///Create a new parser with a given stream.
pub fn new(stream: T) -> BsonParser<T> { BsonParser { stream: stream } }
}
///Standalone decode binding.
///This is equivalent to initializing a parser and calling document().
pub fn decode(b: ~[u8]) -> Result<BsonDocument,~str> {
let mut parser = BsonParser::new(b);
parser.document()
}
#[cfg(test)]
mod tests {
use super::*;
use encode::*;
use extra::test::BenchHarness;
#[test]
fn test_decode_size() {
let doc = decode(~[10,0,0,0,10,100,100,100,0]);
assert_eq!(doc.unwrap().size, 10);
}
#[test]
fn test_cstring_decode() {
let stream: ~[u8] = ~[104,101,108,108,111,0];
let mut parser = BsonParser::new(stream);
assert_eq!(parser.cstring(), ~"hello");
}
#[test]
fn test_double_decode() {
let stream: ~[u8] = ~[110,134,27,240,249,33,9,64];
let mut parser = BsonParser::new(stream);
let d = parser._double();
match d {
Double(d2) => {
assert!(d2.approx_eq(&3.14159f64));
}
_ => fail!("failed in a test case; how did I get here?")
}
}
#[test]
fn test_document_decode() {
let stream1: ~[u8] = ~[11,0,0,0,8,102,111,111,0,1,0];
let mut parser1 = BsonParser::new(stream1);
let mut doc1 = BsonDocument::new();
doc1.put(~"foo", Bool(true));
assert_eq!(parser1.document().unwrap(), doc1);
let stream2: ~[u8] = ~[45,0,0,0,4,102,111,111,0,22,0,0,0,2,48,0,
6,0,0,0,104,101,108,108,111,0,8,49,0,0,
0,2,98,97,122,0,4,0,0,0,113,117,120,0,0];
let mut inside = BsonDocument::new();
inside.put_all(~[(~"0", UString(~"hello")), (~"1", Bool(false))]);
let mut doc2 = BsonDocument::new();
doc2.put_all(~[(~"foo", Array(~inside.clone())), (~"baz", UString(~"qux"))]);
assert_eq!(decode(stream2).unwrap(), doc2);
}
#[test]
fn test_binary_decode() {
let stream: ~[u8] = ~[6,0,0,0,0,1,2,3,4,5,6];
let mut parser = BsonParser::new(stream);
assert_eq!(parser._binary(), Binary(0, ~[1,2,3,4,5,6]));
}
#[test]
fn | () {
let mut doc = BsonDocument::new();
doc.put(~"foo", DBRef(~"bar", ~ObjectId(~[0u8,1,2,3,4,5,6,7,8,9,10,11])));
let stream: ~[u8] = ~[30,0,0,0,12,102,111,111,0,4,0,0,0,98,97,114,0,0,1,2,3,4,5,6,7,8,9,10,11,0];
assert_eq!(decode(stream).unwrap(), doc)
}
//TODO: get bson strings of torture-test objects
#[bench]
fn bench_basic_obj_decode(b: &mut BenchHarness) {
do b.iter {
let stream: ~[u8] = ~[45,0,0,0,4,102,111,
111,0,22,0,0,0,2,48,0,6,0,0,0,104,101,108,
108,111,0,8,49,0,0,0,2,98,97,122,0,4,0,0,0,
113,117,120,0,0];
decode(stream);
}
}
}
| test_dbref_encode | identifier_name |
interop.rs | use crate::network::{self, get, post};
use anyhow::{anyhow, Result};
use std::collections::{BTreeMap, BTreeSet};
use std::fs::File;
use std::path::Path;
use url::Url;
use wptfyi::interop::{Category, FocusArea};
use wptfyi::metadata::MetadataEntry;
use wptfyi::result::Status;
use wptfyi::search::{AndClause, Clause, LabelClause, NotClause, OrClause, Query, ResultClause};
use wptfyi::{interop, metadata, result, run, search, Wptfyi};
fn fx_failures_query(labels: &[&str]) -> Query {
let pass_statuses = &[Status::Ok, Status::Pass];
let mut root_clause = AndClause {
and: Vec::with_capacity(3),
};
for status in pass_statuses.iter() {
root_clause.push(Clause::Not(NotClause {
not: Box::new(Clause::Result(ResultClause {
browser_name: "firefox".to_owned(),
status: status.clone(),
})),
}));
}
if!labels.is_empty() {
let mut labels_clause = OrClause {
or: Vec::with_capacity(labels.len()),
};
for label in labels {
labels_clause.push(Clause::Label(LabelClause {
label: (*label).into(),
}));
}
root_clause.push(Clause::Or(labels_clause));
}
Query {
query: Clause::And(root_clause),
}
}
fn get_run_data(wptfyi: &Wptfyi, client: &reqwest::blocking::Client) -> Result<Vec<result::Run>> {
let mut runs = wptfyi.runs();
for product in ["chrome", "firefox", "safari"].iter() {
runs.add_product(product, "experimental")
}
runs.add_label("master");
runs.set_max_count(100);
Ok(run::parse(&get(client, &String::from(runs.url()), None)?)?)
}
fn get_metadata(
wptfyi: &Wptfyi,
client: &reqwest::blocking::Client,
) -> Result<BTreeMap<String, Vec<MetadataEntry>>> {
let mut metadata = wptfyi.metadata();
for product in ["firefox"].iter() {
metadata.add_product(product)
}
Ok(metadata::parse(&get(
client,
&String::from(metadata.url()),
None,
)?)?)
}
pub fn get_fx_failures(
wptfyi: &Wptfyi,
client: &reqwest::blocking::Client,
run_ids: &[i64],
labels: &[&str],
) -> Result<result::SearchData> {
let mut search = wptfyi.search();
for product in ["chrome", "firefox", "safari"].iter() {
search.add_product(product, "experimental")
}
search.set_query(run_ids, fx_failures_query(labels));
search.add_label("master");
Ok(search::parse(&post(
client,
&String::from(search.url()),
None,
search.body(),
)?)?)
}
pub fn get_interop_data(
wptfyi: &Wptfyi,
client: &reqwest::blocking::Client,
) -> Result<BTreeMap<String, interop::YearData>> {
let runs = wptfyi.interop_data();
Ok(interop::parse(&get(
client,
&String::from(runs.url()),
None,
)?)?)
}
pub fn get_interop_categories(
wptfyi: &Wptfyi,
client: &reqwest::blocking::Client,
) -> Result<BTreeMap<String, interop::Categories>> {
Ok(interop::parse_categories(&get(
client,
&String::from(wptfyi.interop_categories().url()),
None,
)?)?)
}
pub fn get_interop_scores(
wptfyi: &Wptfyi,
client: &reqwest::blocking::Client,
browser_channel: interop::BrowserChannel,
) -> Result<Vec<interop::ScoreRow>> {
Ok(interop::parse_scores(&get(
client,
&String::from(wptfyi.interop_scores(browser_channel).url()),
None,
)?)?)
}
fn latest_runs(runs: &[result::Run]) -> Result<Vec<&result::Run>> {
let mut runs_by_commit = run::runs_by_commit(runs);
let latest_rev = runs_by_commit
.iter()
.filter(|(_, value)| value.len() == 3)
.max_by(|(_, value_1), (_, value_2)| {
let date_1 = value_1.iter().map(|x| x.created_at).max();
let date_2 = value_2.iter().map(|x| x.created_at).max();
date_1.cmp(&date_2)
})
.map(|(key, _)| key.clone());
latest_rev
.and_then(|x| runs_by_commit.remove(&x))
.ok_or_else(|| anyhow!("Failed to find any complete runs"))
}
pub fn write_focus_area(
fyi: &Wptfyi,
client: &reqwest::blocking::Client,
name: &str,
focus_area: &FocusArea,
run_ids: &[i64],
categories_by_name: &BTreeMap<String, &Category>,
metadata: &BTreeMap<String, Vec<MetadataEntry>>,
) -> Result<()> {
if!focus_area.counts_toward_score {
return Ok(());
}
let labels = &categories_by_name
.get(name)
.ok_or_else(|| anyhow!("Didn't find category {}", name))?
.labels;
let path = format!("../docs/interop-2023/{}.csv", name);
let data_path = Path::new(&path);
let out_f = File::create(data_path)?;
let mut writer = csv::WriterBuilder::new()
.quote_style(csv::QuoteStyle::NonNumeric)
.from_writer(out_f);
let results = get_fx_failures(
&fyi,
&client,
&run_ids,
&labels
.iter()
.filter_map(|x| {
if x.starts_with("interop-") {
Some(x.as_ref())
} else {
None
}
})
.collect::<Vec<&str>>(),
)?;
let order = &["firefox", "chrome", "safari"];
let maybe_browser_list = results
.runs
.iter()
.map(|x| order.iter().position(|target| *target == x.browser_name))
.collect::<Option<Vec<usize>>>();
if maybe_browser_list.is_none() {
return Err(anyhow!("Didn't get results for all three browsers"));
}
let browser_list = maybe_browser_list.unwrap();
writer.write_record([
"Test",
"Firefox Failures",
"Chrome Failures",
"Safari Failures",
"Bugs",
])?;
for result in results.results.iter() {
let mut scores = vec![String::new(), String::new(), String::new()];
for (output_idx, browser_idx) in browser_list.iter().enumerate() {
if let Some(status) = result.legacy_status.get(*browser_idx) {
if output_idx == 0 {
// For Firefox output the total as this is the number of failures
scores[output_idx].push_str(&format!("{}", status.total));
} else {
// For Firefox output the total as this is the number of failures
scores[output_idx].push_str(&format!("{}", status.total - status.passes));
}
}
}
let mut bugs = BTreeSet::new();
if let Some(test_meta) = metadata.get(&result.test) {
for metadata_entry in test_meta.iter() {
if metadata_entry.product!= "firefox"
||!metadata_entry
.url
.starts_with("https://bugzilla.mozilla.org")
{
continue;
}
// For now add all bugs irrespective of status or subtest
if let Ok(bug_url) = Url::parse(&metadata_entry.url) {
if let Some((_, bug_id)) = bug_url.query_pairs().find(|(key, _)| key == "id") {
bugs.insert(bug_id.into_owned());
}
}
}
}
let mut bugs_col = String::with_capacity(8 * bugs.len());
for bug in bugs.iter() {
if!bugs_col.is_empty() {
bugs_col.push(' ');
}
bugs_col.push_str(bug);
}
let record = &[&result.test, &scores[0], &scores[1], &scores[2], &bugs_col];
writer.write_record(record)?;
}
Ok(())
}
pub fn interop_columns(focus_areas: &BTreeMap<String, interop::FocusArea>) -> Vec<&str> {
let mut columns = Vec::with_capacity(focus_areas.len());
for (name, data) in focus_areas.iter() {
if data.counts_toward_score {
columns.push(name.as_ref());
}
}
columns
}
fn browser_score(browser: &str, columns: &[&str], row: &interop::ScoreRow) -> Result<f64> |
pub fn write_browser_interop_scores(
browsers: &[&str],
scores: &[interop::ScoreRow],
interop_2023_data: &interop::YearData,
) -> Result<()> {
let browser_columns = interop_columns(&interop_2023_data.focus_areas);
let data_path = Path::new("../docs/interop-2023/scores.csv");
let out_f = File::create(data_path)?;
let mut writer = csv::WriterBuilder::new()
.quote_style(csv::QuoteStyle::NonNumeric)
.from_writer(out_f);
let mut headers = Vec::with_capacity(browsers.len() + 1);
headers.push("date");
headers.extend_from_slice(browsers);
writer.write_record(headers)?;
let mut output: Vec<String> = Vec::with_capacity(browsers.len() + 1);
for row in scores {
output.resize(0, "".into());
output.push(
row.get("date")
.ok_or_else(|| anyhow!("Failed to read date"))?
.into(),
);
for browser in browsers {
let score = browser_score(browser, &browser_columns, row)?;
output.push(format!("{:.2}", score))
}
writer.write_record(&output)?;
}
Ok(())
}
pub fn run() -> Result<()> {
let client = network::client();
let fyi = Wptfyi::new(None);
let runs = get_run_data(&fyi, &client)?;
let run_ids = latest_runs(&runs)?
.iter()
.map(|x| x.id)
.collect::<Vec<i64>>();
let interop_data = get_interop_data(&fyi, &client)?;
let interop_2023_data = interop_data
.get("2023")
.ok_or_else(|| anyhow!("Failed to get Interop-2023 metadata"))?;
let interop_categories = get_interop_categories(&fyi, &client)?;
let interop_2023_categories = interop_categories
.get("2023")
.ok_or_else(|| anyhow!("Failed to get Interop-2023 categories"))?;
let categories_by_name = interop_2023_categories.by_name();
let metadata = get_metadata(&fyi, &client)?;
for (name, focus_area) in interop_2023_data.focus_areas.iter() {
write_focus_area(
&fyi,
&client,
name,
focus_area,
&run_ids,
&categories_by_name,
&metadata,
)?;
}
let scores = get_interop_scores(&fyi, &client, interop::BrowserChannel::Experimental)?;
write_browser_interop_scores(&["firefox", "chrome", "safari"], &scores, interop_2023_data)?;
Ok(())
}
| {
let mut total_score: u64 = 0;
for column in columns {
let column = format!("{}-{}", browser, column);
let score = row
.get(&column)
.ok_or_else(|| anyhow!("Failed to get column {}", column))?;
let value: u64 = score
.parse::<u64>()
.map_err(|_| anyhow!("Failed to parse score"))?;
total_score += value;
}
Ok(total_score as f64 / (10 * columns.len()) as f64)
} | identifier_body |
interop.rs | use crate::network::{self, get, post};
use anyhow::{anyhow, Result};
use std::collections::{BTreeMap, BTreeSet};
use std::fs::File;
use std::path::Path;
use url::Url;
use wptfyi::interop::{Category, FocusArea};
use wptfyi::metadata::MetadataEntry;
use wptfyi::result::Status;
use wptfyi::search::{AndClause, Clause, LabelClause, NotClause, OrClause, Query, ResultClause};
use wptfyi::{interop, metadata, result, run, search, Wptfyi};
fn fx_failures_query(labels: &[&str]) -> Query {
let pass_statuses = &[Status::Ok, Status::Pass];
let mut root_clause = AndClause {
and: Vec::with_capacity(3),
};
for status in pass_statuses.iter() {
root_clause.push(Clause::Not(NotClause {
not: Box::new(Clause::Result(ResultClause {
browser_name: "firefox".to_owned(),
status: status.clone(),
})),
}));
}
if!labels.is_empty() {
let mut labels_clause = OrClause {
or: Vec::with_capacity(labels.len()),
};
for label in labels {
labels_clause.push(Clause::Label(LabelClause {
label: (*label).into(),
}));
}
root_clause.push(Clause::Or(labels_clause));
}
Query {
query: Clause::And(root_clause),
}
}
fn get_run_data(wptfyi: &Wptfyi, client: &reqwest::blocking::Client) -> Result<Vec<result::Run>> {
let mut runs = wptfyi.runs();
for product in ["chrome", "firefox", "safari"].iter() {
runs.add_product(product, "experimental")
}
runs.add_label("master");
runs.set_max_count(100);
Ok(run::parse(&get(client, &String::from(runs.url()), None)?)?)
}
fn get_metadata(
wptfyi: &Wptfyi,
client: &reqwest::blocking::Client,
) -> Result<BTreeMap<String, Vec<MetadataEntry>>> {
let mut metadata = wptfyi.metadata();
for product in ["firefox"].iter() {
metadata.add_product(product)
}
Ok(metadata::parse(&get(
client,
&String::from(metadata.url()),
None,
)?)?)
}
pub fn get_fx_failures(
wptfyi: &Wptfyi,
client: &reqwest::blocking::Client,
run_ids: &[i64],
labels: &[&str],
) -> Result<result::SearchData> {
let mut search = wptfyi.search();
for product in ["chrome", "firefox", "safari"].iter() {
search.add_product(product, "experimental")
}
search.set_query(run_ids, fx_failures_query(labels));
search.add_label("master");
Ok(search::parse(&post(
client,
&String::from(search.url()),
None,
search.body(),
)?)?)
}
pub fn get_interop_data(
wptfyi: &Wptfyi,
client: &reqwest::blocking::Client,
) -> Result<BTreeMap<String, interop::YearData>> {
let runs = wptfyi.interop_data();
Ok(interop::parse(&get(
client,
&String::from(runs.url()),
None,
)?)?)
}
pub fn get_interop_categories(
wptfyi: &Wptfyi,
client: &reqwest::blocking::Client,
) -> Result<BTreeMap<String, interop::Categories>> {
Ok(interop::parse_categories(&get(
client,
&String::from(wptfyi.interop_categories().url()),
None,
)?)?)
}
pub fn get_interop_scores(
wptfyi: &Wptfyi,
client: &reqwest::blocking::Client,
browser_channel: interop::BrowserChannel,
) -> Result<Vec<interop::ScoreRow>> {
Ok(interop::parse_scores(&get(
client,
&String::from(wptfyi.interop_scores(browser_channel).url()),
None,
)?)?)
}
fn latest_runs(runs: &[result::Run]) -> Result<Vec<&result::Run>> {
let mut runs_by_commit = run::runs_by_commit(runs);
let latest_rev = runs_by_commit
.iter()
.filter(|(_, value)| value.len() == 3)
.max_by(|(_, value_1), (_, value_2)| {
let date_1 = value_1.iter().map(|x| x.created_at).max();
let date_2 = value_2.iter().map(|x| x.created_at).max();
date_1.cmp(&date_2)
})
.map(|(key, _)| key.clone());
latest_rev
.and_then(|x| runs_by_commit.remove(&x))
.ok_or_else(|| anyhow!("Failed to find any complete runs"))
}
pub fn write_focus_area(
fyi: &Wptfyi,
client: &reqwest::blocking::Client,
name: &str,
focus_area: &FocusArea,
run_ids: &[i64],
categories_by_name: &BTreeMap<String, &Category>,
metadata: &BTreeMap<String, Vec<MetadataEntry>>,
) -> Result<()> {
if!focus_area.counts_toward_score {
return Ok(());
}
let labels = &categories_by_name
.get(name)
.ok_or_else(|| anyhow!("Didn't find category {}", name))?
.labels;
let path = format!("../docs/interop-2023/{}.csv", name);
let data_path = Path::new(&path);
let out_f = File::create(data_path)?;
let mut writer = csv::WriterBuilder::new()
.quote_style(csv::QuoteStyle::NonNumeric)
.from_writer(out_f);
let results = get_fx_failures(
&fyi,
&client,
&run_ids,
&labels
.iter()
.filter_map(|x| {
if x.starts_with("interop-") {
Some(x.as_ref())
} else {
None
}
})
.collect::<Vec<&str>>(),
)?;
let order = &["firefox", "chrome", "safari"];
let maybe_browser_list = results
.runs
.iter()
.map(|x| order.iter().position(|target| *target == x.browser_name))
.collect::<Option<Vec<usize>>>();
if maybe_browser_list.is_none() |
let browser_list = maybe_browser_list.unwrap();
writer.write_record([
"Test",
"Firefox Failures",
"Chrome Failures",
"Safari Failures",
"Bugs",
])?;
for result in results.results.iter() {
let mut scores = vec![String::new(), String::new(), String::new()];
for (output_idx, browser_idx) in browser_list.iter().enumerate() {
if let Some(status) = result.legacy_status.get(*browser_idx) {
if output_idx == 0 {
// For Firefox output the total as this is the number of failures
scores[output_idx].push_str(&format!("{}", status.total));
} else {
// For Firefox output the total as this is the number of failures
scores[output_idx].push_str(&format!("{}", status.total - status.passes));
}
}
}
let mut bugs = BTreeSet::new();
if let Some(test_meta) = metadata.get(&result.test) {
for metadata_entry in test_meta.iter() {
if metadata_entry.product!= "firefox"
||!metadata_entry
.url
.starts_with("https://bugzilla.mozilla.org")
{
continue;
}
// For now add all bugs irrespective of status or subtest
if let Ok(bug_url) = Url::parse(&metadata_entry.url) {
if let Some((_, bug_id)) = bug_url.query_pairs().find(|(key, _)| key == "id") {
bugs.insert(bug_id.into_owned());
}
}
}
}
let mut bugs_col = String::with_capacity(8 * bugs.len());
for bug in bugs.iter() {
if!bugs_col.is_empty() {
bugs_col.push(' ');
}
bugs_col.push_str(bug);
}
let record = &[&result.test, &scores[0], &scores[1], &scores[2], &bugs_col];
writer.write_record(record)?;
}
Ok(())
}
pub fn interop_columns(focus_areas: &BTreeMap<String, interop::FocusArea>) -> Vec<&str> {
let mut columns = Vec::with_capacity(focus_areas.len());
for (name, data) in focus_areas.iter() {
if data.counts_toward_score {
columns.push(name.as_ref());
}
}
columns
}
fn browser_score(browser: &str, columns: &[&str], row: &interop::ScoreRow) -> Result<f64> {
let mut total_score: u64 = 0;
for column in columns {
let column = format!("{}-{}", browser, column);
let score = row
.get(&column)
.ok_or_else(|| anyhow!("Failed to get column {}", column))?;
let value: u64 = score
.parse::<u64>()
.map_err(|_| anyhow!("Failed to parse score"))?;
total_score += value;
}
Ok(total_score as f64 / (10 * columns.len()) as f64)
}
pub fn write_browser_interop_scores(
browsers: &[&str],
scores: &[interop::ScoreRow],
interop_2023_data: &interop::YearData,
) -> Result<()> {
let browser_columns = interop_columns(&interop_2023_data.focus_areas);
let data_path = Path::new("../docs/interop-2023/scores.csv");
let out_f = File::create(data_path)?;
let mut writer = csv::WriterBuilder::new()
.quote_style(csv::QuoteStyle::NonNumeric)
.from_writer(out_f);
let mut headers = Vec::with_capacity(browsers.len() + 1);
headers.push("date");
headers.extend_from_slice(browsers);
writer.write_record(headers)?;
let mut output: Vec<String> = Vec::with_capacity(browsers.len() + 1);
for row in scores {
output.resize(0, "".into());
output.push(
row.get("date")
.ok_or_else(|| anyhow!("Failed to read date"))?
.into(),
);
for browser in browsers {
let score = browser_score(browser, &browser_columns, row)?;
output.push(format!("{:.2}", score))
}
writer.write_record(&output)?;
}
Ok(())
}
pub fn run() -> Result<()> {
let client = network::client();
let fyi = Wptfyi::new(None);
let runs = get_run_data(&fyi, &client)?;
let run_ids = latest_runs(&runs)?
.iter()
.map(|x| x.id)
.collect::<Vec<i64>>();
let interop_data = get_interop_data(&fyi, &client)?;
let interop_2023_data = interop_data
.get("2023")
.ok_or_else(|| anyhow!("Failed to get Interop-2023 metadata"))?;
let interop_categories = get_interop_categories(&fyi, &client)?;
let interop_2023_categories = interop_categories
.get("2023")
.ok_or_else(|| anyhow!("Failed to get Interop-2023 categories"))?;
let categories_by_name = interop_2023_categories.by_name();
let metadata = get_metadata(&fyi, &client)?;
for (name, focus_area) in interop_2023_data.focus_areas.iter() {
write_focus_area(
&fyi,
&client,
name,
focus_area,
&run_ids,
&categories_by_name,
&metadata,
)?;
}
let scores = get_interop_scores(&fyi, &client, interop::BrowserChannel::Experimental)?;
write_browser_interop_scores(&["firefox", "chrome", "safari"], &scores, interop_2023_data)?;
Ok(())
}
| {
return Err(anyhow!("Didn't get results for all three browsers"));
} | conditional_block |
interop.rs | use crate::network::{self, get, post};
use anyhow::{anyhow, Result};
use std::collections::{BTreeMap, BTreeSet};
use std::fs::File;
use std::path::Path;
use url::Url;
use wptfyi::interop::{Category, FocusArea};
use wptfyi::metadata::MetadataEntry;
use wptfyi::result::Status;
use wptfyi::search::{AndClause, Clause, LabelClause, NotClause, OrClause, Query, ResultClause};
use wptfyi::{interop, metadata, result, run, search, Wptfyi};
fn fx_failures_query(labels: &[&str]) -> Query {
let pass_statuses = &[Status::Ok, Status::Pass];
let mut root_clause = AndClause {
and: Vec::with_capacity(3),
};
for status in pass_statuses.iter() {
root_clause.push(Clause::Not(NotClause {
not: Box::new(Clause::Result(ResultClause {
browser_name: "firefox".to_owned(),
status: status.clone(),
})),
}));
}
if!labels.is_empty() {
let mut labels_clause = OrClause {
or: Vec::with_capacity(labels.len()),
};
for label in labels {
labels_clause.push(Clause::Label(LabelClause {
label: (*label).into(),
}));
}
root_clause.push(Clause::Or(labels_clause));
}
Query {
query: Clause::And(root_clause),
}
}
fn | (wptfyi: &Wptfyi, client: &reqwest::blocking::Client) -> Result<Vec<result::Run>> {
let mut runs = wptfyi.runs();
for product in ["chrome", "firefox", "safari"].iter() {
runs.add_product(product, "experimental")
}
runs.add_label("master");
runs.set_max_count(100);
Ok(run::parse(&get(client, &String::from(runs.url()), None)?)?)
}
fn get_metadata(
wptfyi: &Wptfyi,
client: &reqwest::blocking::Client,
) -> Result<BTreeMap<String, Vec<MetadataEntry>>> {
let mut metadata = wptfyi.metadata();
for product in ["firefox"].iter() {
metadata.add_product(product)
}
Ok(metadata::parse(&get(
client,
&String::from(metadata.url()),
None,
)?)?)
}
pub fn get_fx_failures(
wptfyi: &Wptfyi,
client: &reqwest::blocking::Client,
run_ids: &[i64],
labels: &[&str],
) -> Result<result::SearchData> {
let mut search = wptfyi.search();
for product in ["chrome", "firefox", "safari"].iter() {
search.add_product(product, "experimental")
}
search.set_query(run_ids, fx_failures_query(labels));
search.add_label("master");
Ok(search::parse(&post(
client,
&String::from(search.url()),
None,
search.body(),
)?)?)
}
pub fn get_interop_data(
wptfyi: &Wptfyi,
client: &reqwest::blocking::Client,
) -> Result<BTreeMap<String, interop::YearData>> {
let runs = wptfyi.interop_data();
Ok(interop::parse(&get(
client,
&String::from(runs.url()),
None,
)?)?)
}
pub fn get_interop_categories(
wptfyi: &Wptfyi,
client: &reqwest::blocking::Client,
) -> Result<BTreeMap<String, interop::Categories>> {
Ok(interop::parse_categories(&get(
client,
&String::from(wptfyi.interop_categories().url()),
None,
)?)?)
}
pub fn get_interop_scores(
wptfyi: &Wptfyi,
client: &reqwest::blocking::Client,
browser_channel: interop::BrowserChannel,
) -> Result<Vec<interop::ScoreRow>> {
Ok(interop::parse_scores(&get(
client,
&String::from(wptfyi.interop_scores(browser_channel).url()),
None,
)?)?)
}
fn latest_runs(runs: &[result::Run]) -> Result<Vec<&result::Run>> {
let mut runs_by_commit = run::runs_by_commit(runs);
let latest_rev = runs_by_commit
.iter()
.filter(|(_, value)| value.len() == 3)
.max_by(|(_, value_1), (_, value_2)| {
let date_1 = value_1.iter().map(|x| x.created_at).max();
let date_2 = value_2.iter().map(|x| x.created_at).max();
date_1.cmp(&date_2)
})
.map(|(key, _)| key.clone());
latest_rev
.and_then(|x| runs_by_commit.remove(&x))
.ok_or_else(|| anyhow!("Failed to find any complete runs"))
}
pub fn write_focus_area(
fyi: &Wptfyi,
client: &reqwest::blocking::Client,
name: &str,
focus_area: &FocusArea,
run_ids: &[i64],
categories_by_name: &BTreeMap<String, &Category>,
metadata: &BTreeMap<String, Vec<MetadataEntry>>,
) -> Result<()> {
if!focus_area.counts_toward_score {
return Ok(());
}
let labels = &categories_by_name
.get(name)
.ok_or_else(|| anyhow!("Didn't find category {}", name))?
.labels;
let path = format!("../docs/interop-2023/{}.csv", name);
let data_path = Path::new(&path);
let out_f = File::create(data_path)?;
let mut writer = csv::WriterBuilder::new()
.quote_style(csv::QuoteStyle::NonNumeric)
.from_writer(out_f);
let results = get_fx_failures(
&fyi,
&client,
&run_ids,
&labels
.iter()
.filter_map(|x| {
if x.starts_with("interop-") {
Some(x.as_ref())
} else {
None
}
})
.collect::<Vec<&str>>(),
)?;
let order = &["firefox", "chrome", "safari"];
let maybe_browser_list = results
.runs
.iter()
.map(|x| order.iter().position(|target| *target == x.browser_name))
.collect::<Option<Vec<usize>>>();
if maybe_browser_list.is_none() {
return Err(anyhow!("Didn't get results for all three browsers"));
}
let browser_list = maybe_browser_list.unwrap();
writer.write_record([
"Test",
"Firefox Failures",
"Chrome Failures",
"Safari Failures",
"Bugs",
])?;
for result in results.results.iter() {
let mut scores = vec![String::new(), String::new(), String::new()];
for (output_idx, browser_idx) in browser_list.iter().enumerate() {
if let Some(status) = result.legacy_status.get(*browser_idx) {
if output_idx == 0 {
// For Firefox output the total as this is the number of failures
scores[output_idx].push_str(&format!("{}", status.total));
} else {
// For Firefox output the total as this is the number of failures
scores[output_idx].push_str(&format!("{}", status.total - status.passes));
}
}
}
let mut bugs = BTreeSet::new();
if let Some(test_meta) = metadata.get(&result.test) {
for metadata_entry in test_meta.iter() {
if metadata_entry.product!= "firefox"
||!metadata_entry
.url
.starts_with("https://bugzilla.mozilla.org")
{
continue;
}
// For now add all bugs irrespective of status or subtest
if let Ok(bug_url) = Url::parse(&metadata_entry.url) {
if let Some((_, bug_id)) = bug_url.query_pairs().find(|(key, _)| key == "id") {
bugs.insert(bug_id.into_owned());
}
}
}
}
let mut bugs_col = String::with_capacity(8 * bugs.len());
for bug in bugs.iter() {
if!bugs_col.is_empty() {
bugs_col.push(' ');
}
bugs_col.push_str(bug);
}
let record = &[&result.test, &scores[0], &scores[1], &scores[2], &bugs_col];
writer.write_record(record)?;
}
Ok(())
}
pub fn interop_columns(focus_areas: &BTreeMap<String, interop::FocusArea>) -> Vec<&str> {
let mut columns = Vec::with_capacity(focus_areas.len());
for (name, data) in focus_areas.iter() {
if data.counts_toward_score {
columns.push(name.as_ref());
}
}
columns
}
fn browser_score(browser: &str, columns: &[&str], row: &interop::ScoreRow) -> Result<f64> {
let mut total_score: u64 = 0;
for column in columns {
let column = format!("{}-{}", browser, column);
let score = row
.get(&column)
.ok_or_else(|| anyhow!("Failed to get column {}", column))?;
let value: u64 = score
.parse::<u64>()
.map_err(|_| anyhow!("Failed to parse score"))?;
total_score += value;
}
Ok(total_score as f64 / (10 * columns.len()) as f64)
}
pub fn write_browser_interop_scores(
browsers: &[&str],
scores: &[interop::ScoreRow],
interop_2023_data: &interop::YearData,
) -> Result<()> {
let browser_columns = interop_columns(&interop_2023_data.focus_areas);
let data_path = Path::new("../docs/interop-2023/scores.csv");
let out_f = File::create(data_path)?;
let mut writer = csv::WriterBuilder::new()
.quote_style(csv::QuoteStyle::NonNumeric)
.from_writer(out_f);
let mut headers = Vec::with_capacity(browsers.len() + 1);
headers.push("date");
headers.extend_from_slice(browsers);
writer.write_record(headers)?;
let mut output: Vec<String> = Vec::with_capacity(browsers.len() + 1);
for row in scores {
output.resize(0, "".into());
output.push(
row.get("date")
.ok_or_else(|| anyhow!("Failed to read date"))?
.into(),
);
for browser in browsers {
let score = browser_score(browser, &browser_columns, row)?;
output.push(format!("{:.2}", score))
}
writer.write_record(&output)?;
}
Ok(())
}
pub fn run() -> Result<()> {
let client = network::client();
let fyi = Wptfyi::new(None);
let runs = get_run_data(&fyi, &client)?;
let run_ids = latest_runs(&runs)?
.iter()
.map(|x| x.id)
.collect::<Vec<i64>>();
let interop_data = get_interop_data(&fyi, &client)?;
let interop_2023_data = interop_data
.get("2023")
.ok_or_else(|| anyhow!("Failed to get Interop-2023 metadata"))?;
let interop_categories = get_interop_categories(&fyi, &client)?;
let interop_2023_categories = interop_categories
.get("2023")
.ok_or_else(|| anyhow!("Failed to get Interop-2023 categories"))?;
let categories_by_name = interop_2023_categories.by_name();
let metadata = get_metadata(&fyi, &client)?;
for (name, focus_area) in interop_2023_data.focus_areas.iter() {
write_focus_area(
&fyi,
&client,
name,
focus_area,
&run_ids,
&categories_by_name,
&metadata,
)?;
}
let scores = get_interop_scores(&fyi, &client, interop::BrowserChannel::Experimental)?;
write_browser_interop_scores(&["firefox", "chrome", "safari"], &scores, interop_2023_data)?;
Ok(())
}
| get_run_data | identifier_name |
interop.rs | use crate::network::{self, get, post};
use anyhow::{anyhow, Result};
use std::collections::{BTreeMap, BTreeSet};
use std::fs::File;
use std::path::Path;
use url::Url;
use wptfyi::interop::{Category, FocusArea};
use wptfyi::metadata::MetadataEntry;
use wptfyi::result::Status;
use wptfyi::search::{AndClause, Clause, LabelClause, NotClause, OrClause, Query, ResultClause};
use wptfyi::{interop, metadata, result, run, search, Wptfyi};
fn fx_failures_query(labels: &[&str]) -> Query {
let pass_statuses = &[Status::Ok, Status::Pass];
let mut root_clause = AndClause {
and: Vec::with_capacity(3),
};
for status in pass_statuses.iter() {
root_clause.push(Clause::Not(NotClause {
not: Box::new(Clause::Result(ResultClause {
browser_name: "firefox".to_owned(),
status: status.clone(),
})),
}));
}
if!labels.is_empty() {
let mut labels_clause = OrClause {
or: Vec::with_capacity(labels.len()),
};
for label in labels {
labels_clause.push(Clause::Label(LabelClause {
label: (*label).into(),
}));
}
root_clause.push(Clause::Or(labels_clause));
}
Query {
query: Clause::And(root_clause),
}
}
fn get_run_data(wptfyi: &Wptfyi, client: &reqwest::blocking::Client) -> Result<Vec<result::Run>> {
let mut runs = wptfyi.runs();
for product in ["chrome", "firefox", "safari"].iter() {
runs.add_product(product, "experimental")
}
runs.add_label("master");
runs.set_max_count(100);
Ok(run::parse(&get(client, &String::from(runs.url()), None)?)?)
}
fn get_metadata(
wptfyi: &Wptfyi,
client: &reqwest::blocking::Client,
) -> Result<BTreeMap<String, Vec<MetadataEntry>>> {
let mut metadata = wptfyi.metadata();
for product in ["firefox"].iter() {
metadata.add_product(product)
}
Ok(metadata::parse(&get(
client,
&String::from(metadata.url()),
None,
)?)?)
}
pub fn get_fx_failures(
wptfyi: &Wptfyi,
client: &reqwest::blocking::Client,
run_ids: &[i64],
labels: &[&str],
) -> Result<result::SearchData> {
let mut search = wptfyi.search();
for product in ["chrome", "firefox", "safari"].iter() {
search.add_product(product, "experimental")
}
search.set_query(run_ids, fx_failures_query(labels));
search.add_label("master");
Ok(search::parse(&post(
client,
&String::from(search.url()),
None,
search.body(),
)?)?)
}
pub fn get_interop_data(
wptfyi: &Wptfyi,
client: &reqwest::blocking::Client,
) -> Result<BTreeMap<String, interop::YearData>> {
let runs = wptfyi.interop_data();
Ok(interop::parse(&get(
client,
&String::from(runs.url()),
None,
)?)?)
}
pub fn get_interop_categories(
wptfyi: &Wptfyi,
client: &reqwest::blocking::Client,
) -> Result<BTreeMap<String, interop::Categories>> {
Ok(interop::parse_categories(&get(
client,
&String::from(wptfyi.interop_categories().url()),
None,
)?)?)
}
pub fn get_interop_scores(
wptfyi: &Wptfyi,
client: &reqwest::blocking::Client,
browser_channel: interop::BrowserChannel,
) -> Result<Vec<interop::ScoreRow>> {
Ok(interop::parse_scores(&get(
client,
&String::from(wptfyi.interop_scores(browser_channel).url()),
None,
)?)?)
}
fn latest_runs(runs: &[result::Run]) -> Result<Vec<&result::Run>> {
let mut runs_by_commit = run::runs_by_commit(runs);
let latest_rev = runs_by_commit
.iter()
.filter(|(_, value)| value.len() == 3)
.max_by(|(_, value_1), (_, value_2)| {
let date_1 = value_1.iter().map(|x| x.created_at).max();
let date_2 = value_2.iter().map(|x| x.created_at).max();
date_1.cmp(&date_2)
})
.map(|(key, _)| key.clone());
latest_rev
.and_then(|x| runs_by_commit.remove(&x))
.ok_or_else(|| anyhow!("Failed to find any complete runs"))
}
pub fn write_focus_area(
fyi: &Wptfyi,
client: &reqwest::blocking::Client,
name: &str,
focus_area: &FocusArea,
run_ids: &[i64],
categories_by_name: &BTreeMap<String, &Category>,
metadata: &BTreeMap<String, Vec<MetadataEntry>>,
) -> Result<()> {
if!focus_area.counts_toward_score {
return Ok(());
}
let labels = &categories_by_name
.get(name)
.ok_or_else(|| anyhow!("Didn't find category {}", name))?
.labels;
let path = format!("../docs/interop-2023/{}.csv", name);
let data_path = Path::new(&path);
let out_f = File::create(data_path)?;
let mut writer = csv::WriterBuilder::new()
.quote_style(csv::QuoteStyle::NonNumeric)
.from_writer(out_f);
let results = get_fx_failures(
&fyi,
&client,
&run_ids,
&labels
.iter()
.filter_map(|x| {
if x.starts_with("interop-") {
Some(x.as_ref())
} else {
None
}
})
.collect::<Vec<&str>>(),
)?;
let order = &["firefox", "chrome", "safari"];
let maybe_browser_list = results
.runs
.iter()
.map(|x| order.iter().position(|target| *target == x.browser_name))
.collect::<Option<Vec<usize>>>();
if maybe_browser_list.is_none() {
return Err(anyhow!("Didn't get results for all three browsers"));
}
let browser_list = maybe_browser_list.unwrap();
writer.write_record([
"Test",
"Firefox Failures",
"Chrome Failures",
"Safari Failures",
"Bugs",
])?;
for result in results.results.iter() {
let mut scores = vec![String::new(), String::new(), String::new()];
for (output_idx, browser_idx) in browser_list.iter().enumerate() {
if let Some(status) = result.legacy_status.get(*browser_idx) {
if output_idx == 0 {
// For Firefox output the total as this is the number of failures
scores[output_idx].push_str(&format!("{}", status.total));
} else {
// For Firefox output the total as this is the number of failures
scores[output_idx].push_str(&format!("{}", status.total - status.passes));
}
}
}
let mut bugs = BTreeSet::new();
if let Some(test_meta) = metadata.get(&result.test) {
for metadata_entry in test_meta.iter() {
if metadata_entry.product!= "firefox"
||!metadata_entry
.url
.starts_with("https://bugzilla.mozilla.org")
{
continue;
}
// For now add all bugs irrespective of status or subtest
if let Ok(bug_url) = Url::parse(&metadata_entry.url) {
if let Some((_, bug_id)) = bug_url.query_pairs().find(|(key, _)| key == "id") {
bugs.insert(bug_id.into_owned());
}
}
}
}
let mut bugs_col = String::with_capacity(8 * bugs.len());
for bug in bugs.iter() {
if!bugs_col.is_empty() {
bugs_col.push(' ');
}
bugs_col.push_str(bug);
}
let record = &[&result.test, &scores[0], &scores[1], &scores[2], &bugs_col];
writer.write_record(record)?;
}
Ok(())
}
pub fn interop_columns(focus_areas: &BTreeMap<String, interop::FocusArea>) -> Vec<&str> {
let mut columns = Vec::with_capacity(focus_areas.len());
for (name, data) in focus_areas.iter() {
if data.counts_toward_score {
columns.push(name.as_ref());
}
}
columns
}
fn browser_score(browser: &str, columns: &[&str], row: &interop::ScoreRow) -> Result<f64> {
let mut total_score: u64 = 0;
for column in columns {
let column = format!("{}-{}", browser, column);
let score = row
.get(&column)
.ok_or_else(|| anyhow!("Failed to get column {}", column))?;
let value: u64 = score
.parse::<u64>()
.map_err(|_| anyhow!("Failed to parse score"))?;
total_score += value;
}
Ok(total_score as f64 / (10 * columns.len()) as f64)
}
pub fn write_browser_interop_scores(
browsers: &[&str],
scores: &[interop::ScoreRow],
interop_2023_data: &interop::YearData,
) -> Result<()> {
let browser_columns = interop_columns(&interop_2023_data.focus_areas);
let data_path = Path::new("../docs/interop-2023/scores.csv"); | let out_f = File::create(data_path)?;
let mut writer = csv::WriterBuilder::new()
.quote_style(csv::QuoteStyle::NonNumeric)
.from_writer(out_f);
let mut headers = Vec::with_capacity(browsers.len() + 1);
headers.push("date");
headers.extend_from_slice(browsers);
writer.write_record(headers)?;
let mut output: Vec<String> = Vec::with_capacity(browsers.len() + 1);
for row in scores {
output.resize(0, "".into());
output.push(
row.get("date")
.ok_or_else(|| anyhow!("Failed to read date"))?
.into(),
);
for browser in browsers {
let score = browser_score(browser, &browser_columns, row)?;
output.push(format!("{:.2}", score))
}
writer.write_record(&output)?;
}
Ok(())
}
pub fn run() -> Result<()> {
let client = network::client();
let fyi = Wptfyi::new(None);
let runs = get_run_data(&fyi, &client)?;
let run_ids = latest_runs(&runs)?
.iter()
.map(|x| x.id)
.collect::<Vec<i64>>();
let interop_data = get_interop_data(&fyi, &client)?;
let interop_2023_data = interop_data
.get("2023")
.ok_or_else(|| anyhow!("Failed to get Interop-2023 metadata"))?;
let interop_categories = get_interop_categories(&fyi, &client)?;
let interop_2023_categories = interop_categories
.get("2023")
.ok_or_else(|| anyhow!("Failed to get Interop-2023 categories"))?;
let categories_by_name = interop_2023_categories.by_name();
let metadata = get_metadata(&fyi, &client)?;
for (name, focus_area) in interop_2023_data.focus_areas.iter() {
write_focus_area(
&fyi,
&client,
name,
focus_area,
&run_ids,
&categories_by_name,
&metadata,
)?;
}
let scores = get_interop_scores(&fyi, &client, interop::BrowserChannel::Experimental)?;
write_browser_interop_scores(&["firefox", "chrome", "safari"], &scores, interop_2023_data)?;
Ok(())
} | random_line_split |
|
lib.rs | name {
#[inline]
fn pwm_pin() -> PwmPin<$name> {
PwmPin::new($pwm)
}
}
)+
)
}
macro_rules! impl_clock {
($($name:ident: $pwm:expr),+) => (
$(
impl GpioClock for $name {
#[inline]
fn clock_pin() -> ClockPin<$name> {
ClockPin::new($pwm)
}
}
)+
)
}
macro_rules! require_root {
($($name:ident),+) => (
$(
impl RequiresRoot for $name {}
)+
)
}
mod bindings;
pub mod thread {
use bindings;
use libc;
///This attempts to shift your program (or thread in a multi-threaded
///program) to a higher priority and enables a real-time scheduling.
///
///The priority parameter should be from 0 (the default) to 99 (the
///maximum). This won’t make your program go any faster, but it will give
///it a bigger slice of time when other programs are running. The priority
///parameter works relative to others – so you can make one program
///priority 1 and another priority 2 and it will have the same effect as
///setting one to 10 and the other to 90 (as long as no other programs are
///running with elevated priorities)
///
///The return value is `true` for success and `false` for error. If an
///error is returned, the program should then consult the _errno_ global
///variable, as per the usual conventions.
///
///_Note_: Only programs running as root can change their priority. If
///called from a non-root program then nothing happens.
pub fn priority(priority: u8) -> bool {
unsafe {
bindings::piHiPri(priority as libc::c_int) >= 0
}
}
}
pub mod pin {
use bindings;
use libc;
use self::Value::{Low, High};
use std::marker::PhantomData;
const INPUT: libc::c_int = 0;
const OUTPUT: libc::c_int = 1;
const PWM_OUTPUT: libc::c_int = 2;
const GPIO_CLOCK: libc::c_int = 3;
//const SOFT_PWM_OUTPUT: libc::c_int = 4;
//const SOFT_TONE_OUTPUT: libc::c_int = 5;
//const PWM_TONE_OUTPUT: libc::c_int = 6;
///This returns the BCM_GPIO pin number of the supplied **wiringPi** pin.
///
///It takes the board revision into account.
pub fn wpi_to_gpio_number(wpi_number: u16) -> u16 {
unsafe {
bindings::wpiPinToGpio(wpi_number as libc::c_int) as u16
}
}
///This returns the BCM_GPIO pin number of the supplied physical pin on
///the P1 connector.
pub fn phys_to_gpio_number(phys_number: u16) -> u16 {
unsafe {
bindings::physPinToGpio(phys_number as libc::c_int) as u16
}
}
impl_pins!(WiringPi, Gpio, Phys, Sys);
impl_pwm!(WiringPi: 1, Gpio: 18, Phys: 12);
impl_clock!(WiringPi: 7, Gpio: 4, Phys: 7);
require_root!(WiringPi, Gpio, Phys);
pub trait Pin {}
pub trait Pwm: RequiresRoot + Sized {
fn pwm_pin() -> PwmPin<Self>;
}
pub trait GpioClock: RequiresRoot + Sized {
fn clock_pin() -> ClockPin<Self>;
}
pub trait RequiresRoot: Pin {}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Value {
Low = 0,
High
}
#[derive(Debug, Clone, Copy)]
pub enum Edge {
///No setup is performed, it is assumed the trigger has already been set up previosuly
Setup = 0,
Falling = 1,
Rising = 2,
Both = 3
}
#[derive(Debug, Clone, Copy)]
pub enum Pull {
Off = 0,
Down,
Up
}
#[derive(Debug, Clone, Copy)]
pub enum PwmMode {
MarkSpace = 0,
Balanced
}
pub struct InputPin<Pin>(libc::c_int, PhantomData<Pin>);
impl<P: Pin> InputPin<P> {
pub fn new(pin: libc::c_int) -> InputPin<P> {
unsafe {
bindings::pinMode(pin, INPUT);
}
InputPin(pin, PhantomData)
}
#[inline]
pub fn number(&self) -> libc::c_int {
let &InputPin(number, _) = self;
number
}
///This function returns the value read at the given pin.
///
///It will be `High` or `Low` (1 or 0) depending on the logic level at the pin.
pub fn digital_read(&self) -> Value {
let value = unsafe {
bindings::digitalRead(self.number())
};
if value == 0 {
Low
} else {
| }
///This returns the value read on the supplied analog input pin. You
///will need to register additional analog modules to enable this
///function for devices such as the Gertboard, quick2Wire analog
///board, etc.
pub fn analog_read(&self) -> u16 {
unsafe {
bindings::analogRead(self.number()) as u16
}
}
/// This will register an "Interrupt" to be called when the pin changes state
/// Note the quotes around Interrupt, because the current implementation in the C
/// library seems to be a dedicated thread that polls the gpio device driver,
/// and this callback is called from that thread synchronously, so it's not something that
/// you would call a real interrupt in an embedded environement.
///
/// The callback function does not need to be reentrant.
///
/// The callback must be an actual function (not a closure!), and must be using
/// the extern "C" modifier so that it can be passed to the wiringpi library,
/// and called from C code.
///
/// Unfortunately the C implementation does not allow userdata to be passed around,
/// so the callback must be able to determine what caused the interrupt just by the
/// function that was invoked.
///
/// See https://github.com/Ogeon/rust-wiringpi/pull/28 for
/// ideas on how to work around these limitations if you find them too constraining.
///
/// ```
/// use wiringpi;
///
/// extern "C" fn change_state() {
/// println!("Look ma, I'm being called from an another thread");
/// }
///
/// fn main() {
/// let pi = wiringpi::setup();
/// let pin = pi.output_pin(0);
///
/// pin.register_isr(Edge::Falling, Some(change_state));
///
/// thread::sleep(60000);
/// }
///
/// ```
///
///
pub fn register_isr(&self, edge: Edge, f: Option<extern "C" fn()>) {
unsafe {
bindings::wiringPiISR(self.number(), edge as i32, f);
}
}
}
impl<P: Pin + RequiresRoot> InputPin<P> {
///This sets the pull-up or pull-down resistor mode on the given pin.
///
///Unlike the Arduino, the BCM2835 has both pull-up an down internal
///resistors. The parameter pud should be; `Off`, (no pull up/down),
///`Down` (pull to ground) or `Up` (pull to 3.3v)
pub fn pull_up_dn_control(&self, pud: Pull) {
unsafe {
bindings::pullUpDnControl(self.number(), pud as libc::c_int);
}
}
pub fn into_output(self) -> OutputPin<P> {
let InputPin(number, _) = self;
OutputPin::new(number)
}
pub fn into_soft_pwm(self) -> SoftPwmPin<P> {
let InputPin(number, _) = self;
SoftPwmPin::new(number)
}
}
impl<P: Pin + Pwm> InputPin<P> {
pub fn into_pwm(self) -> PwmPin<P> {
let InputPin(number, _) = self;
PwmPin::new(number)
}
}
impl<P: Pin + GpioClock> InputPin<P> {
pub fn into_clock(self) -> ClockPin<P> {
let InputPin(number, _) = self;
ClockPin::new(number)
}
}
/// A pin with software controlled PWM output.
///
/// Due to limitations of the chip only one pin is able to do
/// hardware-controlled PWM output. The `SoftPwmPin`s on the
/// other hand allow for all GPIOs to output PWM signals.
///
/// The pulse width of the signal will be 100μs with a value range
/// of [0,100] \(where `0` is a constant low and `100` is a
/// constant high) resulting in a frequenzy of 100 Hz.
///
/// **Important**: In order to use software PWM pins *wiringPi*
/// has to be setup in GPIO mode via `setup_gpio()`.
pub struct SoftPwmPin<Pin>(libc::c_int, PhantomData<Pin>);
impl<P: Pin + RequiresRoot> SoftPwmPin<P> {
/// Configures the given `pin` to output a software controlled PWM
/// signal.
pub fn new(pin: libc::c_int) -> SoftPwmPin<P> {
unsafe {
bindings::softPwmCreate(pin, 0, 100);
}
SoftPwmPin(pin, PhantomData)
}
#[inline]
pub fn number(&self) -> libc::c_int {
let &SoftPwmPin(number, _) = self;
number
}
/// Sets the duty cycle.
///
/// `value` has to be in the interval [0,100].
pub fn pwm_write(&self, value: libc::c_int) {
unsafe {
bindings::softPwmWrite(self.number(), value);
}
}
/// Stops the software handling of this pin.
///
/// _Note_: In order to control this pin via software PWM again
/// it will need to be recreated using `new()`.
pub fn pwm_stop(self) {
unsafe {
bindings::softPwmStop(self.number());
}
}
pub fn into_input(self) -> InputPin<P> {
let SoftPwmPin(number, _) = self;
self.pwm_stop();
InputPin::new(number)
}
pub fn into_output(self) -> OutputPin<P> {
let SoftPwmPin(number, _) = self;
self.pwm_stop();
OutputPin::new(number)
}
}
impl<P: Pin + Pwm> SoftPwmPin<P> {
pub fn into_pwm(self) -> PwmPin<P> {
let SoftPwmPin(number, _) = self;
self.pwm_stop();
PwmPin::new(number)
}
}
impl<P: Pin + GpioClock> SoftPwmPin<P> {
pub fn into_clock(self) -> ClockPin<P> {
let SoftPwmPin(number, _) = self;
self.pwm_stop();
ClockPin::new(number)
}
}
pub struct OutputPin<Pin>(libc::c_int, PhantomData<Pin>);
impl<P: Pin> OutputPin<P> {
pub fn new(pin: libc::c_int) -> OutputPin<P> {
unsafe {
bindings::pinMode(pin, OUTPUT);
}
OutputPin(pin, PhantomData)
}
#[inline]
pub fn number(&self) -> libc::c_int {
let &OutputPin(number, _) = self;
number
}
///Writes the value `High` or `Low` (1 or 0) to the given pin which must have been previously set as an output.
pub fn digital_write(&self, value: Value) {
unsafe {
bindings::digitalWrite(self.number(), value as libc::c_int);
}
}
///This writes the given value to the supplied analog pin. You will
///need to register additional analog modules to enable this function
///for devices such as the Gertboard.
pub fn analog_write(&self, value: u16) {
unsafe {
bindings::analogWrite(self.number(), value as libc::c_int);
}
}
}
impl<P: Pin + RequiresRoot> OutputPin<P> {
pub fn into_soft_pwm(self) -> SoftPwmPin<P> {
let OutputPin(number, _) = self;
SoftPwmPin::new(number)
}
}
impl<P: Pin + RequiresRoot> OutputPin<P> {
pub fn into_input(self) -> InputPin<P> {
let OutputPin(number, _) = self;
InputPin::new(number)
}
}
impl<P: Pin + Pwm> OutputPin<P> {
pub fn into_pwm(self) -> PwmPin<P> {
let OutputPin(number, _) = self;
PwmPin::new(number)
}
}
impl<P: Pin + GpioClock> OutputPin<P> {
pub fn into_clock(self) -> ClockPin<P> {
let OutputPin(number, _) = self;
ClockPin::new(number)
}
}
///To understand more about the PWM system, you’ll need to read the Broadcom ARM peripherals manual.
pub struct PwmPin<Pin>(libc::c_int, PhantomData<Pin>);
impl<P: Pin + Pwm> PwmPin<P> {
pub fn new(pin: libc::c_int) -> PwmPin<P> {
unsafe {
bindings::pinMode(pin, PWM_OUTPUT);
}
PwmPin(pin, PhantomData)
}
#[inline]
pub fn number(&self) -> libc::c_int {
let &PwmPin(number, _) = self;
number
}
pub fn into_input(self) -> InputPin<P> {
let PwmPin(number, _) = self;
InputPin::new(number)
}
pub fn into_output(self) -> OutputPin<P> {
let PwmPin(number, _) = self;
OutputPin::new(number)
}
pub fn into_soft_pwm(self) -> SoftPwmPin<P> {
let PwmPin(number, _) = self;
SoftPwmPin::new(number)
}
///Writes the value to the PWM register for the given pin.
///
///The value must be between 0 and 1024.
pub fn write(&self, value: u16) {
unsafe {
bindings::pwmWrite(self.number(), value as libc::c_int);
}
}
///The PWM generator can run in 2 modes – "balanced" and "mark:space".
///
///The mark:space mode is traditional, however the default mode in the
///Pi is "balanced". You can switch modes by supplying the parameter:
///`Balanced` or `MarkSpace`.
pub fn set_mode(&self, mode: PwmMode) {
unsafe {
bindings::pwmSetMode(mode as libc::c_int);
}
}
///This sets the range register in the PWM generator. The default is 1024.
pub fn set_range(&self, value: u16) {
unsafe {
bindings::pwmSetRange(value as libc::c_uint);
}
}
///This sets the divisor for the PWM clock.
pub fn set_clock(&self, value: u16) {
unsafe {
bindings::pwmSetClock(value as libc::c_int);
}
}
}
pub struct ClockPin<Pin>(libc::c_int, PhantomData<Pin>);
impl<P: Pin + GpioClock> ClockPin<P> {
pub fn new(pin: libc::c_int) -> ClockPin<P> {
unsafe {
bindings::pinMode(pin, GPIO_CLOCK);
}
ClockPin(pin, PhantomData)
}
#[inline]
pub fn number(&self) -> libc::c_int {
let &ClockPin(number, _) = self;
number
}
pub fn into_input(self) -> InputPin<P> {
let ClockPin(number, _) = self;
InputPin::new(number)
}
pub fn into_output(self) -> OutputPin<P> {
let ClockPin(number, _) = self;
OutputPin::new(number)
}
pub fn into_soft_pwm(self) -> SoftPwmPin<P> {
let ClockPin(number, _) = self;
SoftPwmPin::new(number)
}
///Set the freuency on a GPIO clock pin.
pub fn frequency(&self, freq: u16) {
unsafe {
bindings::gpioClockSet(self.number(), freq as libc::c_int);
}
}
}
}
///This initialises the wiringPi system and assumes that the calling program
///is going to be using the **wiringPi** pin numbering scheme.
///
///This is a simplified numbering scheme which provides a mapping from virtual
///pin numbers 0 through 16 to the real underlying Broadcom GPIO pin numbers.
///See the pins page for a table which maps the **wiringPi** pin number to the
///Broadcom GPIO pin number to the physical location on the edge connector.
///
///This function needs to be called with root privileges.
pub fn setup() -> WiringPi<pin::WiringPi> {
unsafe { bindings::wiringPiSetup(); }
WiringPi(PhantomData)
}
///This is identical to `setup()`, however it allows the calling programs to
///use the Broadcom GPIO pin numbers directly with no re-mapping.
///
///This function needs to be called with root privileges.
pub fn setup_gpio() -> WiringPi<pin::Gpio> {
unsafe { bindings::wiringPiSetupGpio(); }
WiringPi(PhantomData)
}
///This is identical to `setup()`, however it allows the calling programs to
///use the physical pin numbers _on the P1 connector only_.
///
///This function needs to be called with root privileges.
pub fn setup_phys() -> WiringPi<pin::Phys> {
unsafe { bindings::wiringPiSetupPhys(); }
WiringPi(PhantomData)
}
///This initialises the wiringPi system but uses the /sys/class/gpio interface
///rather than accessing the hardware directly.
///
///This can be called as a non-root user provided the GPIO pins have been
///exported before-hand using the gpio program. Pin number in this mode is the
///native Broadcom GPIO numbers.
///
///_Note_: In this mode you can only use the pins which have been exported via
///the /sys/class/gpio interface. You must export these pins before you call
///your program. You can do this in a separate shell-script, or by using the
///system() function from inside your program.
///
///Also note that some functions have no effect when using this mode as
///they’re not currently possible to action unless called with root
///privileges.
pub fn setup_sys() -> WiringPi<pin::Sys> {
unsafe { bindings::wiringPiSetupSys(); }
WiringPi(PhantomData)
}
///This returns the board revision of the Raspberry Pi.
///
///It will be either 1 or 2. Some of the BCM_GPIO pins changed number and
///function when moving from board revision 1 to 2, so if you are using
///BCM_GPIO pin numbers, then you need to be aware of the differences.
pub fn board_revision() -> i32 {
unsafe {
bindings::piBoardRev()
}
}
pub struct WiringPi<Pin>(PhantomData<Pin>);
impl<P: Pin> WiringPi<P> {
pub fn input_pin(&self, pin: u16) -> pin::InputPin<P> {
let pin = pin as libc::c_int;
pin::InputPin::new(pin)
}
pub fn output_pin(&self, pin: u16) -> pin::OutputPin<P> {
let pin = pin as libc::c_int;
pin::OutputPin::new(pin)
}
///This returns a number representing the number if milliseconds since
///your program called | High
}
| conditional_block |
lib.rs | {
#[inline]
fn pwm_pin() -> PwmPin<$name> {
PwmPin::new($pwm)
}
}
)+
)
}
macro_rules! impl_clock {
($($name:ident: $pwm:expr),+) => (
$(
impl GpioClock for $name {
#[inline]
fn clock_pin() -> ClockPin<$name> {
ClockPin::new($pwm)
}
}
)+
)
}
macro_rules! require_root {
($($name:ident),+) => (
$(
impl RequiresRoot for $name {}
)+
)
}
mod bindings;
pub mod thread {
use bindings;
use libc;
///This attempts to shift your program (or thread in a multi-threaded
///program) to a higher priority and enables a real-time scheduling.
///
///The priority parameter should be from 0 (the default) to 99 (the
///maximum). This won’t make your program go any faster, but it will give
///it a bigger slice of time when other programs are running. The priority
///parameter works relative to others – so you can make one program
///priority 1 and another priority 2 and it will have the same effect as
///setting one to 10 and the other to 90 (as long as no other programs are
///running with elevated priorities)
///
///The return value is `true` for success and `false` for error. If an
///error is returned, the program should then consult the _errno_ global
///variable, as per the usual conventions.
///
///_Note_: Only programs running as root can change their priority. If
///called from a non-root program then nothing happens.
pub fn priority(priority: u8) -> bool {
unsafe {
bindings::piHiPri(priority as libc::c_int) >= 0
}
}
}
pub mod pin {
use bindings;
use libc;
use self::Value::{Low, High};
use std::marker::PhantomData;
const INPUT: libc::c_int = 0;
const OUTPUT: libc::c_int = 1;
const PWM_OUTPUT: libc::c_int = 2;
const GPIO_CLOCK: libc::c_int = 3;
//const SOFT_PWM_OUTPUT: libc::c_int = 4;
//const SOFT_TONE_OUTPUT: libc::c_int = 5;
//const PWM_TONE_OUTPUT: libc::c_int = 6;
///This returns the BCM_GPIO pin number of the supplied **wiringPi** pin.
///
///It takes the board revision into account.
pub fn wpi_to_gpio_number(wpi_number: u16) -> u16 {
unsafe {
bindings::wpiPinToGpio(wpi_number as libc::c_int) as u16
}
}
///This returns the BCM_GPIO pin number of the supplied physical pin on
///the P1 connector.
pub fn phys_to_gpio_number(phys_number: u16) -> u16 {
unsafe {
bindings::physPinToGpio(phys_number as libc::c_int) as u16
}
}
impl_pins!(WiringPi, Gpio, Phys, Sys);
impl_pwm!(WiringPi: 1, Gpio: 18, Phys: 12);
impl_clock!(WiringPi: 7, Gpio: 4, Phys: 7);
require_root!(WiringPi, Gpio, Phys);
pub trait Pin {}
pub trait Pwm: RequiresRoot + Sized {
fn pwm_pin() -> PwmPin<Self>;
}
pub trait GpioClock: RequiresRoot + Sized {
fn clock_pin() -> ClockPin<Self>;
}
pub trait RequiresRoot: Pin {}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Value {
Low = 0,
High
}
#[derive(Debug, Clone, Copy)]
pub enum Edge {
///No setup is performed, it is assumed the trigger has already been set up previosuly
Setup = 0,
Falling = 1,
Rising = 2,
Both = 3
}
#[derive(Debug, Clone, Copy)]
pub enum Pull {
Off = 0,
Down,
Up
}
#[derive(Debug, Clone, Copy)]
pub enum PwmMode {
MarkSpace = 0,
Balanced
}
pub struct InputPin<Pin>(libc::c_int, PhantomData<Pin>);
impl<P: Pin> InputPin<P> {
pub fn new(pin: libc::c_int) -> InputPin<P> {
unsafe {
bindings::pinMode(pin, INPUT);
}
InputPin(pin, PhantomData)
}
#[inline]
pub fn number(&self) -> libc::c_int {
let &InputPin(number, _) = self;
number
}
///This function returns the value read at the given pin.
///
///It will be `High` or `Low` (1 or 0) depending on the logic level at the pin.
pub fn digital_read(&self) -> Value {
let value = unsafe {
bindings::digitalRead(self.number())
};
if value == 0 {
Low
} else {
High
}
}
///This returns the value read on the supplied analog input pin. You
///will need to register additional analog modules to enable this
///function for devices such as the Gertboard, quick2Wire analog
///board, etc.
pub fn analog_read(&self) -> u16 {
unsafe {
bindings::analogRead(self.number()) as u16
}
}
/// This will register an "Interrupt" to be called when the pin changes state
/// Note the quotes around Interrupt, because the current implementation in the C
/// library seems to be a dedicated thread that polls the gpio device driver,
/// and this callback is called from that thread synchronously, so it's not something that
/// you would call a real interrupt in an embedded environement.
///
/// The callback function does not need to be reentrant.
///
/// The callback must be an actual function (not a closure!), and must be using
/// the extern "C" modifier so that it can be passed to the wiringpi library,
/// and called from C code.
///
/// Unfortunately the C implementation does not allow userdata to be passed around,
/// so the callback must be able to determine what caused the interrupt just by the
/// function that was invoked.
///
/// See https://github.com/Ogeon/rust-wiringpi/pull/28 for
/// ideas on how to work around these limitations if you find them too constraining.
///
/// ```
/// use wiringpi;
///
/// extern "C" fn change_state() {
/// println!("Look ma, I'm being called from an another thread");
/// }
///
/// fn main() {
/// let pi = wiringpi::setup();
/// let pin = pi.output_pin(0);
///
/// pin.register_isr(Edge::Falling, Some(change_state));
///
/// thread::sleep(60000);
/// }
///
/// ```
///
///
pub fn register_isr(&self, edge: Edge, f: Option<extern "C" fn()>) {
unsafe {
bindings::wiringPiISR(self.number(), edge as i32, f);
}
}
}
impl<P: Pin + RequiresRoot> InputPin<P> {
///This sets the pull-up or pull-down resistor mode on the given pin.
///
///Unlike the Arduino, the BCM2835 has both pull-up an down internal
///resistors. The parameter pud should be; `Off`, (no pull up/down),
///`Down` (pull to ground) or `Up` (pull to 3.3v)
pub fn pull_up_dn_control(&self, pud: Pull) {
unsafe {
bindings::pullUpDnControl(self.number(), pud as libc::c_int);
}
}
pub fn into_output(self) -> OutputPin<P> {
let InputPin(number, _) = self;
OutputPin::new(number)
}
pub fn into_soft_pwm(self) -> SoftPwmPin<P> {
let InputPin(number, _) = self;
SoftPwmPin::new(number)
}
}
impl<P: Pin + Pwm> InputPin<P> {
pub fn into_pwm(self) -> PwmPin<P> {
let InputPin(number, _) = self;
PwmPin::new(number)
}
}
impl<P: Pin + GpioClock> InputPin<P> {
pub fn into_clock(self) -> ClockPin<P> {
let InputPin(number, _) = self;
ClockPin::new(number)
}
}
/// A pin with software controlled PWM output.
///
/// Due to limitations of the chip only one pin is able to do
/// hardware-controlled PWM output. The `SoftPwmPin`s on the
/// other hand allow for all GPIOs to output PWM signals.
///
/// The pulse width of the signal will be 100μs with a value range
/// of [0,100] \(where `0` is a constant low and `100` is a
/// constant high) resulting in a frequenzy of 100 Hz.
///
/// **Important**: In order to use software PWM pins *wiringPi*
/// has to be setup in GPIO mode via `setup_gpio()`.
pub struct SoftPwmPin<Pin>(libc::c_int, PhantomData<Pin>);
impl<P: Pin + RequiresRoot> SoftPwmPin<P> {
/// Configures the given `pin` to output a software controlled PWM
/// signal.
pub fn new(pin: libc::c_int) -> SoftPwmPin<P> {
unsafe {
bindings::softPwmCreate(pin, 0, 100);
}
SoftPwmPin(pin, PhantomData)
}
#[inline]
pub fn number(&self) -> libc::c_int {
let &SoftPwmPin(number, _) = self;
number
}
/// Sets the duty cycle.
///
/// `value` has to be in the interval [0,100].
pub fn pwm_write(&self, value: libc::c_int) {
unsafe {
bindings::softPwmWrite(self.number(), value);
}
}
/// Stops the software handling of this pin.
///
/// _Note_: In order to control this pin via software PWM again
/// it will need to be recreated using `new()`.
pub fn pwm_stop(self) {
unsafe {
bindings::softPwmStop(self.number());
}
}
pub fn into_input(self) -> InputPin<P> {
let SoftPwmPin(number, _) = self;
self.pwm_stop();
InputPin::new(number)
}
pub fn into_output(self) -> OutputPin<P> {
let SoftPwmPin(number, _) = self;
self.pwm_stop();
OutputPin::new(number)
}
}
impl<P: Pin + Pwm> SoftPwmPin<P> {
pub fn into_pwm(self) -> PwmPin<P> {
let SoftPwmPin(number, _) = self;
self.pwm_stop();
PwmPin::new(number)
}
}
impl<P: Pin + GpioClock> SoftPwmPin<P> {
pub fn into_clock(self) -> ClockPin<P> {
let SoftPwmPin(number, _) = self;
self.pwm_stop();
ClockPin::new(number)
}
}
pub struct OutputPin<Pin>(libc::c_int, PhantomData<Pin>);
impl<P: Pin> OutputPin<P> {
pub fn new(pin: libc::c_int) -> OutputPin<P> {
unsafe {
bindings::pinMode(pin, OUTPUT);
}
OutputPin(pin, PhantomData)
}
#[inline]
pub fn number(&self) -> libc::c_int {
let &OutputPin(number, _) = self;
number
}
///Writes the value `High` or `Low` (1 or 0) to the given pin which must have been previously set as an output.
pub fn digital_write(&self, value: Value) {
unsafe {
bindings::digitalWrite(self.number(), value as libc::c_int);
}
}
///This writes the given value to the supplied analog pin. You will
///need to register additional analog modules to enable this function
///for devices such as the Gertboard.
pub fn analog_write(&self, value: u16) {
unsafe {
bindings::analogWrite(self.number(), value as libc::c_int);
}
}
}
impl<P: Pin + RequiresRoot> OutputPin<P> {
pub fn into_soft_pwm(self) -> SoftPwmPin<P> {
let OutputPin(number, _) = self;
SoftPwmPin::new(number)
}
}
impl<P: Pin + RequiresRoot> OutputPin<P> {
pub fn into_input(self) -> InputPin<P> {
let OutputPin(number, _) = self;
InputPin::new(number)
}
}
impl<P: Pin + Pwm> OutputPin<P> {
pub fn into_pwm(self) -> PwmPin<P> {
let OutputPin(number, _) = self;
PwmPin::new(number)
}
}
impl<P: Pin + GpioClock> OutputPin<P> {
pub fn into_clock(self) -> ClockPin<P> {
let OutputPin(number, _) = self;
ClockPin::new(number)
}
}
///To understand more about the PWM system, you’ll need to read the Broadcom ARM peripherals manual.
pub struct PwmPin<Pin>(libc::c_int, PhantomData<Pin>);
impl<P: Pin + Pwm> PwmPin<P> {
pub fn new(pin: libc::c_int) -> PwmPin<P> {
unsafe {
bindings::pinMode(pin, PWM_OUTPUT);
}
PwmPin(pin, PhantomData)
}
#[inline]
pub fn number(&self) -> libc::c_int {
let &PwmPin(number, _) = self;
number
}
pub fn into_input(self) -> InputPin<P> {
let PwmPin(number, _) = self;
InputPin::new(number)
}
pub fn into_output(self) -> OutputPin<P> {
let PwmPin(number, _) = self;
OutputPin::new(number)
}
pub fn into_soft_pwm(self) -> SoftPwmPin<P> {
let PwmPin(number, _) = self;
SoftPwmPin::new(number)
}
///Writes the value to the PWM register for the given pin.
///
///The value must be between 0 and 1024.
pub fn write(&self, value: u16) {
unsafe {
bindings::pwmWrite(self.number(), value as libc::c_int);
}
}
///The PWM generator can run in 2 modes – "balanced" and "mark:space".
///
///The mark:space mode is traditional, however the default mode in the
///Pi is "balanced". You can switch modes by supplying the parameter:
///`Balanced` or `MarkSpace`.
pub fn set_mode( | ode: PwmMode) {
unsafe {
bindings::pwmSetMode(mode as libc::c_int);
}
}
///This sets the range register in the PWM generator. The default is 1024.
pub fn set_range(&self, value: u16) {
unsafe {
bindings::pwmSetRange(value as libc::c_uint);
}
}
///This sets the divisor for the PWM clock.
pub fn set_clock(&self, value: u16) {
unsafe {
bindings::pwmSetClock(value as libc::c_int);
}
}
}
pub struct ClockPin<Pin>(libc::c_int, PhantomData<Pin>);
impl<P: Pin + GpioClock> ClockPin<P> {
pub fn new(pin: libc::c_int) -> ClockPin<P> {
unsafe {
bindings::pinMode(pin, GPIO_CLOCK);
}
ClockPin(pin, PhantomData)
}
#[inline]
pub fn number(&self) -> libc::c_int {
let &ClockPin(number, _) = self;
number
}
pub fn into_input(self) -> InputPin<P> {
let ClockPin(number, _) = self;
InputPin::new(number)
}
pub fn into_output(self) -> OutputPin<P> {
let ClockPin(number, _) = self;
OutputPin::new(number)
}
pub fn into_soft_pwm(self) -> SoftPwmPin<P> {
let ClockPin(number, _) = self;
SoftPwmPin::new(number)
}
///Set the freuency on a GPIO clock pin.
pub fn frequency(&self, freq: u16) {
unsafe {
bindings::gpioClockSet(self.number(), freq as libc::c_int);
}
}
}
}
///This initialises the wiringPi system and assumes that the calling program
///is going to be using the **wiringPi** pin numbering scheme.
///
///This is a simplified numbering scheme which provides a mapping from virtual
///pin numbers 0 through 16 to the real underlying Broadcom GPIO pin numbers.
///See the pins page for a table which maps the **wiringPi** pin number to the
///Broadcom GPIO pin number to the physical location on the edge connector.
///
///This function needs to be called with root privileges.
pub fn setup() -> WiringPi<pin::WiringPi> {
unsafe { bindings::wiringPiSetup(); }
WiringPi(PhantomData)
}
///This is identical to `setup()`, however it allows the calling programs to
///use the Broadcom GPIO pin numbers directly with no re-mapping.
///
///This function needs to be called with root privileges.
pub fn setup_gpio() -> WiringPi<pin::Gpio> {
unsafe { bindings::wiringPiSetupGpio(); }
WiringPi(PhantomData)
}
///This is identical to `setup()`, however it allows the calling programs to
///use the physical pin numbers _on the P1 connector only_.
///
///This function needs to be called with root privileges.
pub fn setup_phys() -> WiringPi<pin::Phys> {
unsafe { bindings::wiringPiSetupPhys(); }
WiringPi(PhantomData)
}
///This initialises the wiringPi system but uses the /sys/class/gpio interface
///rather than accessing the hardware directly.
///
///This can be called as a non-root user provided the GPIO pins have been
///exported before-hand using the gpio program. Pin number in this mode is the
///native Broadcom GPIO numbers.
///
///_Note_: In this mode you can only use the pins which have been exported via
///the /sys/class/gpio interface. You must export these pins before you call
///your program. You can do this in a separate shell-script, or by using the
///system() function from inside your program.
///
///Also note that some functions have no effect when using this mode as
///they’re not currently possible to action unless called with root
///privileges.
pub fn setup_sys() -> WiringPi<pin::Sys> {
unsafe { bindings::wiringPiSetupSys(); }
WiringPi(PhantomData)
}
///This returns the board revision of the Raspberry Pi.
///
///It will be either 1 or 2. Some of the BCM_GPIO pins changed number and
///function when moving from board revision 1 to 2, so if you are using
///BCM_GPIO pin numbers, then you need to be aware of the differences.
pub fn board_revision() -> i32 {
unsafe {
bindings::piBoardRev()
}
}
pub struct WiringPi<Pin>(PhantomData<Pin>);
impl<P: Pin> WiringPi<P> {
pub fn input_pin(&self, pin: u16) -> pin::InputPin<P> {
let pin = pin as libc::c_int;
pin::InputPin::new(pin)
}
pub fn output_pin(&self, pin: u16) -> pin::OutputPin<P> {
let pin = pin as libc::c_int;
pin::OutputPin::new(pin)
}
///This returns a number representing the number if milliseconds since
///your program | &self, m | identifier_name |
lib.rs | name {
#[inline]
fn pwm_pin() -> PwmPin<$name> {
PwmPin::new($pwm)
}
}
)+
)
}
macro_rules! impl_clock {
($($name:ident: $pwm:expr),+) => (
$(
impl GpioClock for $name {
#[inline]
fn clock_pin() -> ClockPin<$name> {
ClockPin::new($pwm)
}
}
)+
)
}
macro_rules! require_root {
($($name:ident),+) => (
$(
impl RequiresRoot for $name {}
)+
)
}
mod bindings;
pub mod thread {
use bindings;
use libc;
///This attempts to shift your program (or thread in a multi-threaded
///program) to a higher priority and enables a real-time scheduling.
///
///The priority parameter should be from 0 (the default) to 99 (the
///maximum). This won’t make your program go any faster, but it will give
///it a bigger slice of time when other programs are running. The priority
///parameter works relative to others – so you can make one program
///priority 1 and another priority 2 and it will have the same effect as
///setting one to 10 and the other to 90 (as long as no other programs are
///running with elevated priorities)
///
///The return value is `true` for success and `false` for error. If an
///error is returned, the program should then consult the _errno_ global
///variable, as per the usual conventions.
///
///_Note_: Only programs running as root can change their priority. If
///called from a non-root program then nothing happens.
pub fn priority(priority: u8) -> bool {
unsafe {
bindings::piHiPri(priority as libc::c_int) >= 0
}
}
}
pub mod pin {
use bindings;
use libc;
use self::Value::{Low, High};
use std::marker::PhantomData;
const INPUT: libc::c_int = 0;
const OUTPUT: libc::c_int = 1;
const PWM_OUTPUT: libc::c_int = 2;
const GPIO_CLOCK: libc::c_int = 3;
//const SOFT_PWM_OUTPUT: libc::c_int = 4;
//const SOFT_TONE_OUTPUT: libc::c_int = 5;
//const PWM_TONE_OUTPUT: libc::c_int = 6;
///This returns the BCM_GPIO pin number of the supplied **wiringPi** pin.
///
///It takes the board revision into account.
pub fn wpi_to_gpio_number(wpi_number: u16) -> u16 {
unsafe {
bindings::wpiPinToGpio(wpi_number as libc::c_int) as u16
}
}
///This returns the BCM_GPIO pin number of the supplied physical pin on
///the P1 connector.
pub fn phys_to_gpio_number(phys_number: u16) -> u16 {
unsafe {
bindings::physPinToGpio(phys_number as libc::c_int) as u16
}
}
impl_pins!(WiringPi, Gpio, Phys, Sys);
impl_pwm!(WiringPi: 1, Gpio: 18, Phys: 12);
impl_clock!(WiringPi: 7, Gpio: 4, Phys: 7);
require_root!(WiringPi, Gpio, Phys);
pub trait Pin {}
pub trait Pwm: RequiresRoot + Sized {
fn pwm_pin() -> PwmPin<Self>;
}
pub trait GpioClock: RequiresRoot + Sized {
fn clock_pin() -> ClockPin<Self>;
}
pub trait RequiresRoot: Pin {}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Value {
Low = 0,
High
}
#[derive(Debug, Clone, Copy)]
pub enum Edge {
///No setup is performed, it is assumed the trigger has already been set up previosuly
Setup = 0,
Falling = 1,
Rising = 2,
Both = 3
}
#[derive(Debug, Clone, Copy)]
pub enum Pull {
Off = 0,
Down,
Up
}
#[derive(Debug, Clone, Copy)]
pub enum PwmMode {
MarkSpace = 0,
Balanced
}
pub struct InputPin<Pin>(libc::c_int, PhantomData<Pin>);
impl<P: Pin> InputPin<P> {
pub fn new(pin: libc::c_int) -> InputPin<P> {
unsafe {
bindings::pinMode(pin, INPUT);
}
InputPin(pin, PhantomData)
}
#[inline]
pub fn number(&self) -> libc::c_int {
let &InputPin(number, _) = self;
number
}
///This function returns the value read at the given pin.
///
///It will be `High` or `Low` (1 or 0) depending on the logic level at the pin.
pub fn digital_read(&self) -> Value {
let value = unsafe {
bindings::digitalRead(self.number())
};
if value == 0 {
Low
} else {
High
}
}
///This returns the value read on the supplied analog input pin. You
///will need to register additional analog modules to enable this
///function for devices such as the Gertboard, quick2Wire analog
///board, etc.
pub fn analog_read(&self) -> u16 {
unsafe {
bindings::analogRead(self.number()) as u16
}
}
/// This will register an "Interrupt" to be called when the pin changes state
/// Note the quotes around Interrupt, because the current implementation in the C
/// library seems to be a dedicated thread that polls the gpio device driver,
/// and this callback is called from that thread synchronously, so it's not something that
/// you would call a real interrupt in an embedded environement.
///
/// The callback function does not need to be reentrant.
///
/// The callback must be an actual function (not a closure!), and must be using
/// the extern "C" modifier so that it can be passed to the wiringpi library,
/// and called from C code.
///
/// Unfortunately the C implementation does not allow userdata to be passed around,
/// so the callback must be able to determine what caused the interrupt just by the
/// function that was invoked.
///
/// See https://github.com/Ogeon/rust-wiringpi/pull/28 for
/// ideas on how to work around these limitations if you find them too constraining.
///
/// ```
/// use wiringpi;
///
/// extern "C" fn change_state() {
/// println!("Look ma, I'm being called from an another thread");
/// }
///
/// fn main() {
/// let pi = wiringpi::setup();
/// let pin = pi.output_pin(0);
///
/// pin.register_isr(Edge::Falling, Some(change_state));
///
/// thread::sleep(60000);
/// }
///
/// ```
///
///
pub fn register_isr(&self, edge: Edge, f: Option<extern "C" fn()>) {
unsafe {
bindings::wiringPiISR(self.number(), edge as i32, f);
}
}
}
impl<P: Pin + RequiresRoot> InputPin<P> {
///This sets the pull-up or pull-down resistor mode on the given pin.
///
///Unlike the Arduino, the BCM2835 has both pull-up an down internal
///resistors. The parameter pud should be; `Off`, (no pull up/down),
///`Down` (pull to ground) or `Up` (pull to 3.3v)
pub fn pull_up_dn_control(&self, pud: Pull) {
unsafe {
bindings::pullUpDnControl(self.number(), pud as libc::c_int);
}
}
pub fn into_output(self) -> OutputPin<P> {
let InputPin(number, _) = self;
OutputPin::new(number)
}
pub fn into_soft_pwm(self) -> SoftPwmPin<P> {
let InputPin(number, _) = self;
SoftPwmPin::new(number)
}
}
impl<P: Pin + Pwm> InputPin<P> {
pub fn into_pwm(self) -> PwmPin<P> {
let InputPin(number, _) = self;
PwmPin::new(number)
}
}
impl<P: Pin + GpioClock> InputPin<P> {
pub fn into_clock(self) -> ClockPin<P> {
let InputPin(number, _) = self;
ClockPin::new(number)
}
}
/// A pin with software controlled PWM output.
///
/// Due to limitations of the chip only one pin is able to do
/// hardware-controlled PWM output. The `SoftPwmPin`s on the
/// other hand allow for all GPIOs to output PWM signals.
///
/// The pulse width of the signal will be 100μs with a value range
/// of [0,100] \(where `0` is a constant low and `100` is a
/// constant high) resulting in a frequenzy of 100 Hz.
///
/// **Important**: In order to use software PWM pins *wiringPi*
/// has to be setup in GPIO mode via `setup_gpio()`.
pub struct SoftPwmPin<Pin>(libc::c_int, PhantomData<Pin>);
impl<P: Pin + RequiresRoot> SoftPwmPin<P> {
/// Configures the given `pin` to output a software controlled PWM
/// signal.
pub fn new(pin: libc::c_int) -> SoftPwmPin<P> {
| #[inline]
pub fn number(&self) -> libc::c_int {
let &SoftPwmPin(number, _) = self;
number
}
/// Sets the duty cycle.
///
/// `value` has to be in the interval [0,100].
pub fn pwm_write(&self, value: libc::c_int) {
unsafe {
bindings::softPwmWrite(self.number(), value);
}
}
/// Stops the software handling of this pin.
///
/// _Note_: In order to control this pin via software PWM again
/// it will need to be recreated using `new()`.
pub fn pwm_stop(self) {
unsafe {
bindings::softPwmStop(self.number());
}
}
pub fn into_input(self) -> InputPin<P> {
let SoftPwmPin(number, _) = self;
self.pwm_stop();
InputPin::new(number)
}
pub fn into_output(self) -> OutputPin<P> {
let SoftPwmPin(number, _) = self;
self.pwm_stop();
OutputPin::new(number)
}
}
impl<P: Pin + Pwm> SoftPwmPin<P> {
pub fn into_pwm(self) -> PwmPin<P> {
let SoftPwmPin(number, _) = self;
self.pwm_stop();
PwmPin::new(number)
}
}
impl<P: Pin + GpioClock> SoftPwmPin<P> {
pub fn into_clock(self) -> ClockPin<P> {
let SoftPwmPin(number, _) = self;
self.pwm_stop();
ClockPin::new(number)
}
}
pub struct OutputPin<Pin>(libc::c_int, PhantomData<Pin>);
impl<P: Pin> OutputPin<P> {
pub fn new(pin: libc::c_int) -> OutputPin<P> {
unsafe {
bindings::pinMode(pin, OUTPUT);
}
OutputPin(pin, PhantomData)
}
#[inline]
pub fn number(&self) -> libc::c_int {
let &OutputPin(number, _) = self;
number
}
///Writes the value `High` or `Low` (1 or 0) to the given pin which must have been previously set as an output.
pub fn digital_write(&self, value: Value) {
unsafe {
bindings::digitalWrite(self.number(), value as libc::c_int);
}
}
///This writes the given value to the supplied analog pin. You will
///need to register additional analog modules to enable this function
///for devices such as the Gertboard.
pub fn analog_write(&self, value: u16) {
unsafe {
bindings::analogWrite(self.number(), value as libc::c_int);
}
}
}
impl<P: Pin + RequiresRoot> OutputPin<P> {
pub fn into_soft_pwm(self) -> SoftPwmPin<P> {
let OutputPin(number, _) = self;
SoftPwmPin::new(number)
}
}
impl<P: Pin + RequiresRoot> OutputPin<P> {
pub fn into_input(self) -> InputPin<P> {
let OutputPin(number, _) = self;
InputPin::new(number)
}
}
impl<P: Pin + Pwm> OutputPin<P> {
pub fn into_pwm(self) -> PwmPin<P> {
let OutputPin(number, _) = self;
PwmPin::new(number)
}
}
impl<P: Pin + GpioClock> OutputPin<P> {
pub fn into_clock(self) -> ClockPin<P> {
let OutputPin(number, _) = self;
ClockPin::new(number)
}
}
///To understand more about the PWM system, you’ll need to read the Broadcom ARM peripherals manual.
pub struct PwmPin<Pin>(libc::c_int, PhantomData<Pin>);
impl<P: Pin + Pwm> PwmPin<P> {
pub fn new(pin: libc::c_int) -> PwmPin<P> {
unsafe {
bindings::pinMode(pin, PWM_OUTPUT);
}
PwmPin(pin, PhantomData)
}
#[inline]
pub fn number(&self) -> libc::c_int {
let &PwmPin(number, _) = self;
number
}
pub fn into_input(self) -> InputPin<P> {
let PwmPin(number, _) = self;
InputPin::new(number)
}
pub fn into_output(self) -> OutputPin<P> {
let PwmPin(number, _) = self;
OutputPin::new(number)
}
pub fn into_soft_pwm(self) -> SoftPwmPin<P> {
let PwmPin(number, _) = self;
SoftPwmPin::new(number)
}
///Writes the value to the PWM register for the given pin.
///
///The value must be between 0 and 1024.
pub fn write(&self, value: u16) {
unsafe {
bindings::pwmWrite(self.number(), value as libc::c_int);
}
}
///The PWM generator can run in 2 modes – "balanced" and "mark:space".
///
///The mark:space mode is traditional, however the default mode in the
///Pi is "balanced". You can switch modes by supplying the parameter:
///`Balanced` or `MarkSpace`.
pub fn set_mode(&self, mode: PwmMode) {
unsafe {
bindings::pwmSetMode(mode as libc::c_int);
}
}
///This sets the range register in the PWM generator. The default is 1024.
pub fn set_range(&self, value: u16) {
unsafe {
bindings::pwmSetRange(value as libc::c_uint);
}
}
///This sets the divisor for the PWM clock.
pub fn set_clock(&self, value: u16) {
unsafe {
bindings::pwmSetClock(value as libc::c_int);
}
}
}
pub struct ClockPin<Pin>(libc::c_int, PhantomData<Pin>);
impl<P: Pin + GpioClock> ClockPin<P> {
pub fn new(pin: libc::c_int) -> ClockPin<P> {
unsafe {
bindings::pinMode(pin, GPIO_CLOCK);
}
ClockPin(pin, PhantomData)
}
#[inline]
pub fn number(&self) -> libc::c_int {
let &ClockPin(number, _) = self;
number
}
pub fn into_input(self) -> InputPin<P> {
let ClockPin(number, _) = self;
InputPin::new(number)
}
pub fn into_output(self) -> OutputPin<P> {
let ClockPin(number, _) = self;
OutputPin::new(number)
}
pub fn into_soft_pwm(self) -> SoftPwmPin<P> {
let ClockPin(number, _) = self;
SoftPwmPin::new(number)
}
///Set the freuency on a GPIO clock pin.
pub fn frequency(&self, freq: u16) {
unsafe {
bindings::gpioClockSet(self.number(), freq as libc::c_int);
}
}
}
}
///This initialises the wiringPi system and assumes that the calling program
///is going to be using the **wiringPi** pin numbering scheme.
///
///This is a simplified numbering scheme which provides a mapping from virtual
///pin numbers 0 through 16 to the real underlying Broadcom GPIO pin numbers.
///See the pins page for a table which maps the **wiringPi** pin number to the
///Broadcom GPIO pin number to the physical location on the edge connector.
///
///This function needs to be called with root privileges.
pub fn setup() -> WiringPi<pin::WiringPi> {
unsafe { bindings::wiringPiSetup(); }
WiringPi(PhantomData)
}
///This is identical to `setup()`, however it allows the calling programs to
///use the Broadcom GPIO pin numbers directly with no re-mapping.
///
///This function needs to be called with root privileges.
pub fn setup_gpio() -> WiringPi<pin::Gpio> {
unsafe { bindings::wiringPiSetupGpio(); }
WiringPi(PhantomData)
}
///This is identical to `setup()`, however it allows the calling programs to
///use the physical pin numbers _on the P1 connector only_.
///
///This function needs to be called with root privileges.
pub fn setup_phys() -> WiringPi<pin::Phys> {
unsafe { bindings::wiringPiSetupPhys(); }
WiringPi(PhantomData)
}
///This initialises the wiringPi system but uses the /sys/class/gpio interface
///rather than accessing the hardware directly.
///
///This can be called as a non-root user provided the GPIO pins have been
///exported before-hand using the gpio program. Pin number in this mode is the
///native Broadcom GPIO numbers.
///
///_Note_: In this mode you can only use the pins which have been exported via
///the /sys/class/gpio interface. You must export these pins before you call
///your program. You can do this in a separate shell-script, or by using the
///system() function from inside your program.
///
///Also note that some functions have no effect when using this mode as
///they’re not currently possible to action unless called with root
///privileges.
pub fn setup_sys() -> WiringPi<pin::Sys> {
unsafe { bindings::wiringPiSetupSys(); }
WiringPi(PhantomData)
}
///This returns the board revision of the Raspberry Pi.
///
///It will be either 1 or 2. Some of the BCM_GPIO pins changed number and
///function when moving from board revision 1 to 2, so if you are using
///BCM_GPIO pin numbers, then you need to be aware of the differences.
pub fn board_revision() -> i32 {
unsafe {
bindings::piBoardRev()
}
}
pub struct WiringPi<Pin>(PhantomData<Pin>);
impl<P: Pin> WiringPi<P> {
pub fn input_pin(&self, pin: u16) -> pin::InputPin<P> {
let pin = pin as libc::c_int;
pin::InputPin::new(pin)
}
pub fn output_pin(&self, pin: u16) -> pin::OutputPin<P> {
let pin = pin as libc::c_int;
pin::OutputPin::new(pin)
}
///This returns a number representing the number if milliseconds since
///your program called | unsafe {
bindings::softPwmCreate(pin, 0, 100);
}
SoftPwmPin(pin, PhantomData)
}
| identifier_body |
lib.rs | $name {
#[inline]
fn pwm_pin() -> PwmPin<$name> {
PwmPin::new($pwm)
}
}
)+
)
}
macro_rules! impl_clock {
($($name:ident: $pwm:expr),+) => (
$(
impl GpioClock for $name {
#[inline]
fn clock_pin() -> ClockPin<$name> {
ClockPin::new($pwm)
}
}
)+
)
}
macro_rules! require_root {
($($name:ident),+) => (
$(
impl RequiresRoot for $name {}
)+
)
}
mod bindings;
pub mod thread {
use bindings;
use libc;
///This attempts to shift your program (or thread in a multi-threaded
///program) to a higher priority and enables a real-time scheduling.
///
///The priority parameter should be from 0 (the default) to 99 (the
///maximum). This won’t make your program go any faster, but it will give
///it a bigger slice of time when other programs are running. The priority
///parameter works relative to others – so you can make one program
///priority 1 and another priority 2 and it will have the same effect as
///setting one to 10 and the other to 90 (as long as no other programs are
///running with elevated priorities)
///
///The return value is `true` for success and `false` for error. If an
///error is returned, the program should then consult the _errno_ global
///variable, as per the usual conventions.
///
///_Note_: Only programs running as root can change their priority. If
///called from a non-root program then nothing happens.
pub fn priority(priority: u8) -> bool {
unsafe {
bindings::piHiPri(priority as libc::c_int) >= 0
}
}
}
pub mod pin {
use bindings;
use libc;
use self::Value::{Low, High};
use std::marker::PhantomData;
const INPUT: libc::c_int = 0;
const OUTPUT: libc::c_int = 1;
const PWM_OUTPUT: libc::c_int = 2;
const GPIO_CLOCK: libc::c_int = 3;
//const SOFT_PWM_OUTPUT: libc::c_int = 4;
//const SOFT_TONE_OUTPUT: libc::c_int = 5;
//const PWM_TONE_OUTPUT: libc::c_int = 6;
///This returns the BCM_GPIO pin number of the supplied **wiringPi** pin.
///
///It takes the board revision into account.
pub fn wpi_to_gpio_number(wpi_number: u16) -> u16 {
unsafe {
bindings::wpiPinToGpio(wpi_number as libc::c_int) as u16
}
}
///This returns the BCM_GPIO pin number of the supplied physical pin on
///the P1 connector.
pub fn phys_to_gpio_number(phys_number: u16) -> u16 {
unsafe {
bindings::physPinToGpio(phys_number as libc::c_int) as u16
}
}
impl_pins!(WiringPi, Gpio, Phys, Sys);
impl_pwm!(WiringPi: 1, Gpio: 18, Phys: 12);
impl_clock!(WiringPi: 7, Gpio: 4, Phys: 7);
require_root!(WiringPi, Gpio, Phys);
pub trait Pin {}
pub trait Pwm: RequiresRoot + Sized {
fn pwm_pin() -> PwmPin<Self>;
}
pub trait GpioClock: RequiresRoot + Sized {
fn clock_pin() -> ClockPin<Self>;
}
pub trait RequiresRoot: Pin {}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Value {
Low = 0,
High
}
#[derive(Debug, Clone, Copy)]
pub enum Edge {
///No setup is performed, it is assumed the trigger has already been set up previosuly
Setup = 0,
Falling = 1,
Rising = 2,
Both = 3
}
#[derive(Debug, Clone, Copy)]
pub enum Pull {
Off = 0,
Down,
Up
}
#[derive(Debug, Clone, Copy)]
pub enum PwmMode {
MarkSpace = 0,
Balanced
}
pub struct InputPin<Pin>(libc::c_int, PhantomData<Pin>);
impl<P: Pin> InputPin<P> {
pub fn new(pin: libc::c_int) -> InputPin<P> {
unsafe {
bindings::pinMode(pin, INPUT);
}
InputPin(pin, PhantomData)
}
#[inline]
pub fn number(&self) -> libc::c_int {
let &InputPin(number, _) = self;
number
}
///This function returns the value read at the given pin.
///
///It will be `High` or `Low` (1 or 0) depending on the logic level at the pin.
pub fn digital_read(&self) -> Value {
let value = unsafe {
bindings::digitalRead(self.number())
};
if value == 0 {
Low
} else {
High
}
}
///This returns the value read on the supplied analog input pin. You
///will need to register additional analog modules to enable this
///function for devices such as the Gertboard, quick2Wire analog
///board, etc.
pub fn analog_read(&self) -> u16 {
unsafe {
bindings::analogRead(self.number()) as u16
}
}
/// This will register an "Interrupt" to be called when the pin changes state
/// Note the quotes around Interrupt, because the current implementation in the C
/// library seems to be a dedicated thread that polls the gpio device driver,
/// and this callback is called from that thread synchronously, so it's not something that
/// you would call a real interrupt in an embedded environement.
/// | ///
/// Unfortunately the C implementation does not allow userdata to be passed around,
/// so the callback must be able to determine what caused the interrupt just by the
/// function that was invoked.
///
/// See https://github.com/Ogeon/rust-wiringpi/pull/28 for
/// ideas on how to work around these limitations if you find them too constraining.
///
/// ```
/// use wiringpi;
///
/// extern "C" fn change_state() {
/// println!("Look ma, I'm being called from an another thread");
/// }
///
/// fn main() {
/// let pi = wiringpi::setup();
/// let pin = pi.output_pin(0);
///
/// pin.register_isr(Edge::Falling, Some(change_state));
///
/// thread::sleep(60000);
/// }
///
/// ```
///
///
pub fn register_isr(&self, edge: Edge, f: Option<extern "C" fn()>) {
unsafe {
bindings::wiringPiISR(self.number(), edge as i32, f);
}
}
}
impl<P: Pin + RequiresRoot> InputPin<P> {
///This sets the pull-up or pull-down resistor mode on the given pin.
///
///Unlike the Arduino, the BCM2835 has both pull-up an down internal
///resistors. The parameter pud should be; `Off`, (no pull up/down),
///`Down` (pull to ground) or `Up` (pull to 3.3v)
pub fn pull_up_dn_control(&self, pud: Pull) {
unsafe {
bindings::pullUpDnControl(self.number(), pud as libc::c_int);
}
}
pub fn into_output(self) -> OutputPin<P> {
let InputPin(number, _) = self;
OutputPin::new(number)
}
pub fn into_soft_pwm(self) -> SoftPwmPin<P> {
let InputPin(number, _) = self;
SoftPwmPin::new(number)
}
}
impl<P: Pin + Pwm> InputPin<P> {
pub fn into_pwm(self) -> PwmPin<P> {
let InputPin(number, _) = self;
PwmPin::new(number)
}
}
impl<P: Pin + GpioClock> InputPin<P> {
pub fn into_clock(self) -> ClockPin<P> {
let InputPin(number, _) = self;
ClockPin::new(number)
}
}
/// A pin with software controlled PWM output.
///
/// Due to limitations of the chip only one pin is able to do
/// hardware-controlled PWM output. The `SoftPwmPin`s on the
/// other hand allow for all GPIOs to output PWM signals.
///
/// The pulse width of the signal will be 100μs with a value range
/// of [0,100] \(where `0` is a constant low and `100` is a
/// constant high) resulting in a frequenzy of 100 Hz.
///
/// **Important**: In order to use software PWM pins *wiringPi*
/// has to be setup in GPIO mode via `setup_gpio()`.
pub struct SoftPwmPin<Pin>(libc::c_int, PhantomData<Pin>);
impl<P: Pin + RequiresRoot> SoftPwmPin<P> {
/// Configures the given `pin` to output a software controlled PWM
/// signal.
pub fn new(pin: libc::c_int) -> SoftPwmPin<P> {
unsafe {
bindings::softPwmCreate(pin, 0, 100);
}
SoftPwmPin(pin, PhantomData)
}
#[inline]
pub fn number(&self) -> libc::c_int {
let &SoftPwmPin(number, _) = self;
number
}
/// Sets the duty cycle.
///
/// `value` has to be in the interval [0,100].
pub fn pwm_write(&self, value: libc::c_int) {
unsafe {
bindings::softPwmWrite(self.number(), value);
}
}
/// Stops the software handling of this pin.
///
/// _Note_: In order to control this pin via software PWM again
/// it will need to be recreated using `new()`.
pub fn pwm_stop(self) {
unsafe {
bindings::softPwmStop(self.number());
}
}
pub fn into_input(self) -> InputPin<P> {
let SoftPwmPin(number, _) = self;
self.pwm_stop();
InputPin::new(number)
}
pub fn into_output(self) -> OutputPin<P> {
let SoftPwmPin(number, _) = self;
self.pwm_stop();
OutputPin::new(number)
}
}
impl<P: Pin + Pwm> SoftPwmPin<P> {
pub fn into_pwm(self) -> PwmPin<P> {
let SoftPwmPin(number, _) = self;
self.pwm_stop();
PwmPin::new(number)
}
}
impl<P: Pin + GpioClock> SoftPwmPin<P> {
pub fn into_clock(self) -> ClockPin<P> {
let SoftPwmPin(number, _) = self;
self.pwm_stop();
ClockPin::new(number)
}
}
pub struct OutputPin<Pin>(libc::c_int, PhantomData<Pin>);
impl<P: Pin> OutputPin<P> {
pub fn new(pin: libc::c_int) -> OutputPin<P> {
unsafe {
bindings::pinMode(pin, OUTPUT);
}
OutputPin(pin, PhantomData)
}
#[inline]
pub fn number(&self) -> libc::c_int {
let &OutputPin(number, _) = self;
number
}
///Writes the value `High` or `Low` (1 or 0) to the given pin which must have been previously set as an output.
pub fn digital_write(&self, value: Value) {
unsafe {
bindings::digitalWrite(self.number(), value as libc::c_int);
}
}
///This writes the given value to the supplied analog pin. You will
///need to register additional analog modules to enable this function
///for devices such as the Gertboard.
pub fn analog_write(&self, value: u16) {
unsafe {
bindings::analogWrite(self.number(), value as libc::c_int);
}
}
}
impl<P: Pin + RequiresRoot> OutputPin<P> {
pub fn into_soft_pwm(self) -> SoftPwmPin<P> {
let OutputPin(number, _) = self;
SoftPwmPin::new(number)
}
}
impl<P: Pin + RequiresRoot> OutputPin<P> {
pub fn into_input(self) -> InputPin<P> {
let OutputPin(number, _) = self;
InputPin::new(number)
}
}
impl<P: Pin + Pwm> OutputPin<P> {
pub fn into_pwm(self) -> PwmPin<P> {
let OutputPin(number, _) = self;
PwmPin::new(number)
}
}
impl<P: Pin + GpioClock> OutputPin<P> {
pub fn into_clock(self) -> ClockPin<P> {
let OutputPin(number, _) = self;
ClockPin::new(number)
}
}
///To understand more about the PWM system, you’ll need to read the Broadcom ARM peripherals manual.
pub struct PwmPin<Pin>(libc::c_int, PhantomData<Pin>);
impl<P: Pin + Pwm> PwmPin<P> {
pub fn new(pin: libc::c_int) -> PwmPin<P> {
unsafe {
bindings::pinMode(pin, PWM_OUTPUT);
}
PwmPin(pin, PhantomData)
}
#[inline]
pub fn number(&self) -> libc::c_int {
let &PwmPin(number, _) = self;
number
}
pub fn into_input(self) -> InputPin<P> {
let PwmPin(number, _) = self;
InputPin::new(number)
}
pub fn into_output(self) -> OutputPin<P> {
let PwmPin(number, _) = self;
OutputPin::new(number)
}
pub fn into_soft_pwm(self) -> SoftPwmPin<P> {
let PwmPin(number, _) = self;
SoftPwmPin::new(number)
}
///Writes the value to the PWM register for the given pin.
///
///The value must be between 0 and 1024.
pub fn write(&self, value: u16) {
unsafe {
bindings::pwmWrite(self.number(), value as libc::c_int);
}
}
///The PWM generator can run in 2 modes – "balanced" and "mark:space".
///
///The mark:space mode is traditional, however the default mode in the
///Pi is "balanced". You can switch modes by supplying the parameter:
///`Balanced` or `MarkSpace`.
pub fn set_mode(&self, mode: PwmMode) {
unsafe {
bindings::pwmSetMode(mode as libc::c_int);
}
}
///This sets the range register in the PWM generator. The default is 1024.
pub fn set_range(&self, value: u16) {
unsafe {
bindings::pwmSetRange(value as libc::c_uint);
}
}
///This sets the divisor for the PWM clock.
pub fn set_clock(&self, value: u16) {
unsafe {
bindings::pwmSetClock(value as libc::c_int);
}
}
}
pub struct ClockPin<Pin>(libc::c_int, PhantomData<Pin>);
impl<P: Pin + GpioClock> ClockPin<P> {
pub fn new(pin: libc::c_int) -> ClockPin<P> {
unsafe {
bindings::pinMode(pin, GPIO_CLOCK);
}
ClockPin(pin, PhantomData)
}
#[inline]
pub fn number(&self) -> libc::c_int {
let &ClockPin(number, _) = self;
number
}
pub fn into_input(self) -> InputPin<P> {
let ClockPin(number, _) = self;
InputPin::new(number)
}
pub fn into_output(self) -> OutputPin<P> {
let ClockPin(number, _) = self;
OutputPin::new(number)
}
pub fn into_soft_pwm(self) -> SoftPwmPin<P> {
let ClockPin(number, _) = self;
SoftPwmPin::new(number)
}
///Set the freuency on a GPIO clock pin.
pub fn frequency(&self, freq: u16) {
unsafe {
bindings::gpioClockSet(self.number(), freq as libc::c_int);
}
}
}
}
///This initialises the wiringPi system and assumes that the calling program
///is going to be using the **wiringPi** pin numbering scheme.
///
///This is a simplified numbering scheme which provides a mapping from virtual
///pin numbers 0 through 16 to the real underlying Broadcom GPIO pin numbers.
///See the pins page for a table which maps the **wiringPi** pin number to the
///Broadcom GPIO pin number to the physical location on the edge connector.
///
///This function needs to be called with root privileges.
pub fn setup() -> WiringPi<pin::WiringPi> {
unsafe { bindings::wiringPiSetup(); }
WiringPi(PhantomData)
}
///This is identical to `setup()`, however it allows the calling programs to
///use the Broadcom GPIO pin numbers directly with no re-mapping.
///
///This function needs to be called with root privileges.
pub fn setup_gpio() -> WiringPi<pin::Gpio> {
unsafe { bindings::wiringPiSetupGpio(); }
WiringPi(PhantomData)
}
///This is identical to `setup()`, however it allows the calling programs to
///use the physical pin numbers _on the P1 connector only_.
///
///This function needs to be called with root privileges.
pub fn setup_phys() -> WiringPi<pin::Phys> {
unsafe { bindings::wiringPiSetupPhys(); }
WiringPi(PhantomData)
}
///This initialises the wiringPi system but uses the /sys/class/gpio interface
///rather than accessing the hardware directly.
///
///This can be called as a non-root user provided the GPIO pins have been
///exported before-hand using the gpio program. Pin number in this mode is the
///native Broadcom GPIO numbers.
///
///_Note_: In this mode you can only use the pins which have been exported via
///the /sys/class/gpio interface. You must export these pins before you call
///your program. You can do this in a separate shell-script, or by using the
///system() function from inside your program.
///
///Also note that some functions have no effect when using this mode as
///they’re not currently possible to action unless called with root
///privileges.
pub fn setup_sys() -> WiringPi<pin::Sys> {
unsafe { bindings::wiringPiSetupSys(); }
WiringPi(PhantomData)
}
///This returns the board revision of the Raspberry Pi.
///
///It will be either 1 or 2. Some of the BCM_GPIO pins changed number and
///function when moving from board revision 1 to 2, so if you are using
///BCM_GPIO pin numbers, then you need to be aware of the differences.
pub fn board_revision() -> i32 {
unsafe {
bindings::piBoardRev()
}
}
pub struct WiringPi<Pin>(PhantomData<Pin>);
impl<P: Pin> WiringPi<P> {
pub fn input_pin(&self, pin: u16) -> pin::InputPin<P> {
let pin = pin as libc::c_int;
pin::InputPin::new(pin)
}
pub fn output_pin(&self, pin: u16) -> pin::OutputPin<P> {
let pin = pin as libc::c_int;
pin::OutputPin::new(pin)
}
///This returns a number representing the number if milliseconds since
///your program called one | /// The callback function does not need to be reentrant.
///
/// The callback must be an actual function (not a closure!), and must be using
/// the extern "C" modifier so that it can be passed to the wiringpi library,
/// and called from C code. | random_line_split |
lib.rs | //! This is a platform-agnostic Rust driver for the Sensirion STS30, STS31, and STS35
//! high-accuracy, low-power, I2C digital temperature sensors, based on the
//! [`embedded-hal`] traits.
//!
//! [`embedded-hal`]: https://github.com/rust-embedded/embedded-hal
//!
//! TODO: More information here.
//!
//! The driver borrows liberally from:
//! - eldruin's tmp1x2-rs driver for Texas Instruments TMP102 and TMP112, https://github.com/eldruin/tmp1x2-rs, and
//! - dbrgn's shtcx-rs driver for Sensirion SHTCx temperature/humidity sensors, https://github.com/dbrgn/shtcx-rs.
#![deny(unsafe_code)]
#![no_std]
// TODO: add deny missing docs, and doc root url
mod crc;
use core::marker::PhantomData;
use embedded_hal::blocking::i2c; // TODO: move to using nb if the crate adds a nonblocking I2C.
pub use nb;
/// Possible errors in this crate
#[derive(Debug)]
pub enum Error<E> {
/// I²C bus error
I2C(E),
/// CRC checksum validation failed
Crc,
}
/// Error type for mode changes.
/// This allows us to retrieve the unchanged device in case of an error.
#[derive(Debug)]
pub enum ModeChangeError<E, DEV> {
/// I²C bus error while changing modes
///
/// `E` is the error that happened.
/// `DEV` is the device with the mode unchanged.
I2C(E, DEV),
}
/// Conversion rate for continuous conversion mode.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ConversionRate {
/// 0.5 Hz
_0_5Hz,
/// 1 Hz
_1Hz,
/// 2 Hz
_2Hz,
/// 4 Hz
_4Hz,
/// 10 Hz
_10Hz,
}
/// Repeatability condition for both one-shot and continuous modes.
/// From the datasheet: the value is 3 * standard deviation of measurements at constant ambient.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Repeatability {
/// High repeatability 0.04°C
High,
/// Medium repeatability 0.08°C
Medium,
/// Low repeatability 0.15°C
Low,
}
impl Default for Repeatability {
fn default() -> Self {
Repeatability::Low
}
}
/// Possible peripheral addresses
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum PeripheralAddr {
/// Default address, with address pin held low
PinLow,
/// Address with the pin held high
PinHigh,
}
impl Default for PeripheralAddr {
fn default() -> Self {
PeripheralAddr::PinLow
}
}
impl PeripheralAddr {
/// Return the 7-bit I2C address corresponding to the enum.
fn as_byte(self) -> u8 {
match self {
PeripheralAddr::PinLow => 0x4A,
PeripheralAddr::PinHigh => 0x4B,
}
}
}
/// I²C commands sent to the sensor
#[derive(Debug)]
enum Command {
/// Initiate a single-shot conversion.
StartSingleShot {repeatability: Repeatability},
/// Change to periodic mode with the given repeatability and conversion rates.
StartPeriodic {
repeatability: Repeatability,
conversion_rate: ConversionRate,
},
/// Fetch data from the sensor when it is in continuous mode.
FetchData,
/// Break out of continuous mode and return to one-shot mdoe.
Break,
/// Issue a software reset.
SoftReset,
/// Turn the heater on for plausibility checking.
HeaterOn,
/// Turn the heater off.
HeaterOff,
/// Read the status register.
ReadStatus,
/// Clear the status register.
ClearStatus,
}
impl Command {
/// Return a slice of two bytes corresponding to the command.
/// These are the bytes the sensor expects.
fn as_bytes(self) -> [u8; 2] {
match self {
// For single shot, listing each variant directly.
Command::StartSingleShot{repeatability: Repeatability::High} => [0x24, 0x00],
Command::StartSingleShot{repeatability: Repeatability::Medium} => [0x24, 0x0B],
Command::StartSingleShot{repeatability: Repeatability::Low} => [0x24, 0x16],
// For periodic, using nested matches, more lines of code, but hopefully more readable.
Command::StartPeriodic{repeatability: r, conversion_rate: c} => {
match c {
ConversionRate::_0_5Hz => {
match r {
Repeatability::High => [0x20, 0x32],
Repeatability::Medium => [0x20, 0x24],
Repeatability::Low => [0x20, 0x2F],
}
},
ConversionRate::_1Hz => {
match r {
Repeatability::High => [0x21, 0x30],
Repeatability::Medium => [0x21, 0x26],
Repeatability::Low => [0x21, 0x2D],
}
},
ConversionRate::_2Hz => {
match r {
Repeatability::High => [0x22, 0x36],
Repeatability::Medium => [0x22, 0x20],
Repeatability::Low => [0x22, 0x2B],
}
},
ConversionRate::_4Hz => {
match r {
Repeatability::High => [0x23, 0x34],
Repeatability::Medium => [0x23, 0x22],
Repeatability::Low => [0x23, 0x29],
}
},
ConversionRate::_10Hz => {
match r {
Repeatability::High => [0x27, 0x37],
Repeatability::Medium => [0x27, 0x21],
Repeatability::Low => [0x27, 0x2A],
}
},
}
},
Command::FetchData => [0xE0, 0x00],
Command::Break => [0x30, 0x93],
Command::SoftReset => [0x30, 0xA2],
Command::HeaterOn => [0x30, 0x6D],
Command::HeaterOff => [0x30, 0x66],
Command::ReadStatus => [0xF3, 0x2D],
Command::ClearStatus => [0x30, 0x41],
}
}
}
// TODO: this is from shtcx, and I need to figure out how/whether to implement
pub trait MeasurementDuration {
/// Return the maximum measurement duration according to repeatability
/// in microseconds
fn measurement_duration_us(repeat: Repeatability) -> u16;
}
#[doc(hidden)]
// Type states for one-shot and continuous modes.
pub mod marker {
pub mod mode {
#[derive(Debug)]
pub struct OneShot(());
#[derive(Debug)]
pub struct Continuous(());
}
}
/// Device Driver
#[derive(Debug, Default)]
pub struct Sts3x<I2C, MODE> {
/// The I2C device implementation
i2c: I2C,
/// The 7-bit I2C device address
address: u8,
/// The present repeatabiliy setting
repeatability: Repeatability,
/// A temperature measurement was started.
temp_measurement_started: bool,
_mode: PhantomData<MODE>,
}
// Implement struct creation for OneShot only so that it is only possible to create a one-shot version.
impl<I2C, E> Sts3x<I2C, marker::mode::OneShot>
where
I2C: i2c::Write<Error = E>,
{
/// Create new instance of the Sts3x device.
/// Defaults to Low repeatability for power savings.
/// Change repeatability with set_repeatability().
///
/// By default, the device starts in one-shot mode.
pub fn new(i2c: I2C, address: PeripheralAddr) -> Self {
Sts3x {
i2c,
address: address.as_byte(),
repeatability: Repeatability::default(),
temp_measurement_started: false,
_mode: PhantomData,
}
}
/// Create new instance of the Sts3x device, choosing a Repeatability.
///
/// By default, the device starts in one-shot mode.
pub fn new_with_repeatability(i2c: I2C, address: PeripheralAddr, repeatability: Repeatability) -> Self {
Sts3x {
i2c,
address: address.as_byte(),
repeatability,
temp_measurement_started: false,
_mode: PhantomData,
}
}
}
// Methods shared by both single-shot and continuous modes
impl<I2C, MODE, E> Sts3x<I2C, MODE>
where
I2C: i2c::Read<Error = E> + i2c::Write<Error = E>,
{
/// Destroy driver instance and return the I2C bus instance.
pub fn destroy(self) -> I2C {
| // write and read private methods
/// Write an I2C command to the sensor
fn send_command(&mut self, command: Command) -> Result<(), Error<E>> {
self.i2c
.write(self.address, &command.as_bytes())
.map_err(Error::I2C)
}
/// Read and check the CRC.
/// Returns a Result with u16 corresponding to the MSB,LSB of the first
/// two bytes of the buffer.
fn read_with_crc(&mut self) -> Result<u16, Error<E>> {
let mut buf = [0u8; 3];
self.i2c.read(self.address, &mut buf).map_err(Error::I2C)?;
if crc::is_crc8_valid(&buf) {
let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
Ok(x)
} else {
Err(Error::Crc)
}
}
fn convert_temp_to_float(temp: u16) -> f32 {
-45.0 + 175.0 * (temp as f32) / 65535.0
}
// method about measurement duration?
}
// Methods for one-shot mode only
// TODO: these are nonblocking, but don't utilize the nb concepts. Also make blocking types.
impl<I2C, E> Sts3x<I2C, marker::mode::OneShot>
where
I2C: i2c::Read<Error = E> + i2c::Write<Error = E>,
{
/// Start a one-shot temperature measurement using the repeatability
/// that has already been set.
pub fn trigger_temp_meas(&mut self) -> Result<(), Error<E>> {
self.send_command(Command::StartSingleShot{repeatability: self.repeatability})
}
/// Perform a one-shot temperature measurement.
///
/// This allows triggering a single temperature measurement when in
/// one-shot mode. The device returns to the low-power state at the
/// completion of the temperature conversion, reducing power
/// consumption when continuous temperature monitoring is not required.
///
/// If no temperature conversion was started yet, calling this method
/// will start one and return `nb::Error::WouldBlock`. Subsequent calls
/// will continue to return `nb::Error::WouldBlock` until the
/// temperature measurement is finished. Then it will return the
/// measured temperature in °C.
pub fn read_temperature(&mut self) -> nb::Result<f32, Error<E>> {
if!self.temp_measurement_started {
self.trigger_temp_meas()
.map_err(nb::Error::Other)?;
self.temp_measurement_started = true;
return Err(nb::Error::WouldBlock);
}
let mut buf = [0u8; 3];
let completion = self.i2c.read(self.address, &mut buf);
// What I want to do:
// match completion {
// Ok(val) => {
// // Conversion complete.
// let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
// self.temp_measurement_started = false;
// Ok(Self::convert_temp_to_float(x))
// },
// Err(stm32f3xx_hal::i2c::Error::Nack) => { // I want to replace with a generic path in embedded_hal
// // namespace because we shouldn't depend on a specific device HAL.
// Err(nb::Error::WouldBlock)
// },
// Err(e) => {
// Err(nb::Error::Other(Error::I2C(e))) // Not sure this is correct, but compiler doesn't complain.
// }
// }
// What I have to do with embedded_hal 0.2.4/0.2.5:
match completion {
Ok(_) => {
// Conversion complete.
self.temp_measurement_started = false;
if crc::is_crc8_valid(&buf) {
let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
Ok(Self::convert_temp_to_float(x))
} else {
Err(nb::Error::Other(Error::Crc))
}
},
_ => {
Err(nb::Error::WouldBlock)
}
}
}
pub fn set_repeatability(&mut self, r: Repeatability) -> Repeatability{
self.repeatability = r;
r
}
// pub fn into_continuous(self, rate: ConversionRate) -> Result<Sts3x<I2C, marker::mode::Continuous>, ModeChangeError<E, Self>>
// /// Reset the state of the driver, to be used if there was a "general call" on the I2C bus.
// pub fn reset_state(&mut self)
// pub fn soft_reset(&mut self)
// pub fn get_status(&self)
// pub fn clear_status(&mut self)
// pub fn heater_on(&mut self)
// pub fn heater_off(&mut self)
}
// Methods for continuous mode only
impl<I2C, E> Sts3x<I2C, marker::mode::Continuous>
where
I2C: i2c::Write<Error = E>,
{
/// Get latest temperature reading.
/// TODO: fill out
pub fn read_temperature(&self) -> u16 {
25
}
// /// Convert to one-shot mode.
// /// TODO: add the command to change and a failure error.
// pub fn into_one_shot(self) -> Result<Sts3x<I2C, marker::mode::OneShot>, ModeChangeError<E, Self>> {
// Result(Sts3x {
// i2c: self.i2c,
// address: self.address,
// repeatability: self.repeatability,
// temp_measurement_started: false,
// _mode: PhantomData,
// })
// }
// /// Reset the state of the driver, to be used if there was a "general call" on the I2C bus.
// /// This will convert into a one-shot mode device.
// pub fn reset_state(mut self)
}
// impl MeasurementDuration for Sts3x<I2C, MODE> {
// // TODO: fill out fn measurement_duration
// fn measurement_duration_us(repeat: Repeatability) -> u16 {
// 20
// }
// }
| self.i2c
}
| identifier_body |
lib.rs | //! This is a platform-agnostic Rust driver for the Sensirion STS30, STS31, and STS35
//! high-accuracy, low-power, I2C digital temperature sensors, based on the
//! [`embedded-hal`] traits.
//!
//! [`embedded-hal`]: https://github.com/rust-embedded/embedded-hal
//!
//! TODO: More information here.
//!
//! The driver borrows liberally from:
//! - eldruin's tmp1x2-rs driver for Texas Instruments TMP102 and TMP112, https://github.com/eldruin/tmp1x2-rs, and
//! - dbrgn's shtcx-rs driver for Sensirion SHTCx temperature/humidity sensors, https://github.com/dbrgn/shtcx-rs.
#![deny(unsafe_code)]
#![no_std]
// TODO: add deny missing docs, and doc root url
mod crc;
use core::marker::PhantomData;
use embedded_hal::blocking::i2c; // TODO: move to using nb if the crate adds a nonblocking I2C.
pub use nb;
/// Possible errors in this crate
#[derive(Debug)]
pub enum Error<E> {
/// I²C bus error
I2C(E),
/// CRC checksum validation failed
Crc,
}
/// Error type for mode changes.
/// This allows us to retrieve the unchanged device in case of an error.
#[derive(Debug)]
pub enum ModeChangeError<E, DEV> {
/// I²C bus error while changing modes
///
/// `E` is the error that happened.
/// `DEV` is the device with the mode unchanged.
I2C(E, DEV),
}
/// Conversion rate for continuous conversion mode.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ConversionRate {
/// 0.5 Hz
_0_5Hz,
/// 1 Hz
_1Hz,
/// 2 Hz
_2Hz,
/// 4 Hz
_4Hz,
/// 10 Hz
_10Hz,
}
/// Repeatability condition for both one-shot and continuous modes.
/// From the datasheet: the value is 3 * standard deviation of measurements at constant ambient.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Repeatability {
/// High repeatability 0.04°C
High,
/// Medium repeatability 0.08°C
Medium,
/// Low repeatability 0.15°C
Low,
}
impl Default for Repeatability {
fn default() -> Self {
Repeatability::Low
}
}
/// Possible peripheral addresses
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum PeripheralAddr {
/// Default address, with address pin held low
PinLow,
/// Address with the pin held high
PinHigh,
}
impl Default for PeripheralAddr {
fn default() -> Self {
PeripheralAddr::PinLow
}
}
impl PeripheralAddr {
/// Return the 7-bit I2C address corresponding to the enum.
fn as_byte(self) -> u8 {
match self {
PeripheralAddr::PinLow => 0x4A,
PeripheralAddr::PinHigh => 0x4B,
}
}
}
/// I²C commands sent to the sensor
#[derive(Debug)]
enum Command {
/// Initiate a single-shot conversion.
StartSingleShot {repeatability: Repeatability},
/// Change to periodic mode with the given repeatability and conversion rates.
StartPeriodic {
repeatability: Repeatability,
conversion_rate: ConversionRate,
},
/// Fetch data from the sensor when it is in continuous mode.
FetchData,
/// Break out of continuous mode and return to one-shot mdoe.
Break,
/// Issue a software reset.
SoftReset,
/// Turn the heater on for plausibility checking.
HeaterOn,
/// Turn the heater off.
HeaterOff,
/// Read the status register.
ReadStatus,
/// Clear the status register.
ClearStatus,
}
impl Command {
/// Return a slice of two bytes corresponding to the command.
/// These are the bytes the sensor expects.
fn as_bytes(self) -> [u8; 2] {
match self {
// For single shot, listing each variant directly.
Command::StartSingleShot{repeatability: Repeatability::High} => [0x24, 0x00],
Command::StartSingleShot{repeatability: Repeatability::Medium} => [0x24, 0x0B],
Command::StartSingleShot{repeatability: Repeatability::Low} => [0x24, 0x16],
// For periodic, using nested matches, more lines of code, but hopefully more readable.
Command::StartPeriodic{repeatability: r, conversion_rate: c} => {
match c {
ConversionRate::_0_5Hz => {
match r {
Repeatability::High => [0x20, 0x32],
Repeatability::Medium => [0x20, 0x24],
Repeatability::Low => [0x20, 0x2F],
}
},
ConversionRate::_1Hz => {
match r {
Repeatability::High => [0x21, 0x30],
Repeatability::Medium => [0x21, 0x26],
Repeatability::Low => [0x21, 0x2D],
}
},
ConversionRate::_2Hz => {
match r {
Repeatability::High => [0x22, 0x36],
Repeatability::Medium => [0x22, 0x20],
Repeatability::Low => [0x22, 0x2B],
}
},
ConversionRate::_4Hz => {
match r {
Repeatability::High => [0x23, 0x34],
Repeatability::Medium => [0x23, 0x22],
Repeatability::Low => [0x23, 0x29],
}
},
ConversionRate::_10Hz => {
match r {
Repeatability::High => [0x27, 0x37],
Repeatability::Medium => [0x27, 0x21],
Repeatability::Low => [0x27, 0x2A],
}
},
}
},
Command::FetchData => [0xE0, 0x00],
Command::Break => [0x30, 0x93],
Command::SoftReset => [0x30, 0xA2],
Command::HeaterOn => [0x30, 0x6D],
Command::HeaterOff => [0x30, 0x66],
Command::ReadStatus => [0xF3, 0x2D],
Command::ClearStatus => [0x30, 0x41],
}
}
}
// TODO: this is from shtcx, and I need to figure out how/whether to implement
pub trait MeasurementDuration {
/// Return the maximum measurement duration according to repeatability
/// in microseconds
fn measurement_duration_us(repeat: Repeatability) -> u16;
}
#[doc(hidden)]
// Type states for one-shot and continuous modes.
pub mod marker {
pub mod mode {
#[derive(Debug)]
pub struct OneShot(());
#[derive(Debug)]
pub struct Continuous(());
}
}
/// Device Driver
#[derive(Debug, Default)]
pub struct Sts3x<I2C, MODE> {
/// The I2C device implementation
i2c: I2C,
/// The 7-bit I2C device address
address: u8,
/// The present repeatabiliy setting
repeatability: Repeatability,
/// A temperature measurement was started.
temp_measurement_started: bool,
_mode: PhantomData<MODE>,
}
// Implement struct creation for OneShot only so that it is only possible to create a one-shot version.
impl<I2C, E> Sts3x<I2C, marker::mode::OneShot>
where
I2C: i2c::Write<Error = E>,
{
/// Create new instance of the Sts3x device.
/// Defaults to Low repeatability for power savings.
/// Change repeatability with set_repeatability().
///
/// By default, the device starts in one-shot mode.
pub fn new(i2c: I2C, address: PeripheralAddr) -> Self {
Sts3x {
i2c,
address: address.as_byte(),
repeatability: Repeatability::default(),
temp_measurement_started: false,
_mode: PhantomData,
}
}
/// Create new instance of the Sts3x device, choosing a Repeatability.
///
/// By default, the device starts in one-shot mode.
pub fn new_with_repeatability(i2c: I2C, address: PeripheralAddr, repeatability: Repeatability) -> Self {
Sts3x {
i2c,
address: address.as_byte(),
repeatability,
temp_measurement_started: false,
_mode: PhantomData,
}
}
}
// Methods shared by both single-shot and continuous modes
impl<I2C, MODE, E> Sts3x<I2C, MODE>
where
I2C: i2c::Read<Error = E> + i2c::Write<Error = E>,
{
/// Destroy driver instance and return the I2C bus instance.
pub fn destroy(self) -> I2C {
self.i2c
}
// write and read private methods
/// Write an I2C command to the sensor
fn send_command(&mut self, command: Command) -> Result<(), Error<E>> {
self.i2c
.write(self.address, &command.as_bytes())
.map_err(Error::I2C)
}
/// Read and check the CRC.
/// Returns a Result with u16 corresponding to the MSB,LSB of the first
/// two bytes of the buffer.
fn read_with_crc(&mut self) -> Result<u16, Error<E>> {
let mut buf = [0u8; 3];
self.i2c.read(self.address, &mut buf).map_err(Error::I2C)?;
if crc::is_crc8_valid(&buf) {
let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
Ok(x)
} else {
Err(Error::Crc)
}
}
fn convert_temp_to_float(temp: u16) -> f32 {
-45.0 + 175.0 * (temp as f32) / 65535.0
}
// method about measurement duration?
}
// Methods for one-shot mode only
// TODO: these are nonblocking, but don't utilize the nb concepts. Also make blocking types.
impl<I2C, E> Sts3x<I2C, marker::mode::OneShot>
where
I2C: i2c::Read<Error = E> + i2c::Write<Error = E>,
{
/// Start a one-shot temperature measurement using the repeatability
/// that has already been set.
pub fn trigger_temp_meas(&mut self) -> Result<(), Error<E>> {
self.send_command(Command::StartSingleShot{repeatability: self.repeatability})
}
/// Perform a one-shot temperature measurement.
///
/// This allows triggering a single temperature measurement when in
/// one-shot mode. The device returns to the low-power state at the
/// completion of the temperature conversion, reducing power
/// consumption when continuous temperature monitoring is not required.
///
/// If no temperature conversion was started yet, calling this method
/// will start one and return `nb::Error::WouldBlock`. Subsequent calls
/// will continue to return `nb::Error::WouldBlock` until the
/// temperature measurement is finished. Then it will return the
/// measured temperature in °C.
pub fn read_temperature(&mut self) -> nb::Result<f32, Error<E>> {
if!self.temp_measurement_started {
self.trigger_temp_meas()
.map_err(nb::Error::Other)?;
self.temp_measurement_started = true;
return Err(nb::Error::WouldBlock);
}
let mut buf = [0u8; 3];
let completion = self.i2c.read(self.address, &mut buf);
// What I want to do:
// match completion {
// Ok(val) => {
// // Conversion complete.
// let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
// self.temp_measurement_started = false;
// Ok(Self::convert_temp_to_float(x))
// },
// Err(stm32f3xx_hal::i2c::Error::Nack) => { // I want to replace with a generic path in embedded_hal
// // namespace because we shouldn't depend on a specific device HAL.
// Err(nb::Error::WouldBlock)
// },
// Err(e) => {
// Err(nb::Error::Other(Error::I2C(e))) // Not sure this is correct, but compiler doesn't complain.
// }
// }
// What I have to do with embedded_hal 0.2.4/0.2.5:
match completion {
Ok(_) => {
// Conversion complete.
self.temp_measurement_started = false;
if crc::is_crc8_valid(&buf) {
let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
Ok(Self::convert_temp_to_float(x))
} else {
Err(nb::Error::Other(Error::Crc))
}
},
_ => {
Err(nb::Error::WouldBlock)
}
}
}
pub fn set_repeatability(&mut self, r: Repeatability) -> Repeatability{
self.repeatability = r;
r
}
// pub fn into_continuous(self, rate: ConversionRate) -> Result<Sts3x<I2C, marker::mode::Continuous>, ModeChangeError<E, Self>>
// /// Reset the state of the driver, to be used if there was a "general call" on the I2C bus.
// pub fn reset_state(&mut self)
// pub fn soft_reset(&mut self)
// pub fn get_status(&self)
// pub fn clear_status(&mut self)
// pub fn heater_on(&mut self)
// pub fn heater_off(&mut self)
}
// Methods for continuous mode only
impl<I2C, E> Sts3x<I2C, marker::mode::Continuous>
where
I2C: i2c::Write<Error = E>,
{
/// Get latest temperature reading.
/// TODO: fill out
pub fn read_temperature(&self) -> u16 {
25
}
// /// Convert to one-shot mode.
// /// TODO: add the command to change and a failure error.
// pub fn into_one_shot(self) -> Result<Sts3x<I2C, marker::mode::OneShot>, ModeChangeError<E, Self>> {
// Result(Sts3x {
// i2c: self.i2c,
// address: self.address,
// repeatability: self.repeatability,
// temp_measurement_started: false,
// _mode: PhantomData,
// })
// }
// /// Reset the state of the driver, to be used if there was a "general call" on the I2C bus.
// /// This will convert into a one-shot mode device.
// pub fn reset_state(mut self)
}
// impl MeasurementDuration for Sts3x<I2C, MODE> {
// // TODO: fill out fn measurement_duration
// fn measurement_duration_us(repeat: Repeatability) -> u16 { | // } | // 20
// } | random_line_split |
lib.rs | //! This is a platform-agnostic Rust driver for the Sensirion STS30, STS31, and STS35
//! high-accuracy, low-power, I2C digital temperature sensors, based on the
//! [`embedded-hal`] traits.
//!
//! [`embedded-hal`]: https://github.com/rust-embedded/embedded-hal
//!
//! TODO: More information here.
//!
//! The driver borrows liberally from:
//! - eldruin's tmp1x2-rs driver for Texas Instruments TMP102 and TMP112, https://github.com/eldruin/tmp1x2-rs, and
//! - dbrgn's shtcx-rs driver for Sensirion SHTCx temperature/humidity sensors, https://github.com/dbrgn/shtcx-rs.
#![deny(unsafe_code)]
#![no_std]
// TODO: add deny missing docs, and doc root url
mod crc;
use core::marker::PhantomData;
use embedded_hal::blocking::i2c; // TODO: move to using nb if the crate adds a nonblocking I2C.
pub use nb;
/// Possible errors in this crate
#[derive(Debug)]
pub enum Error<E> {
/// I²C bus error
I2C(E),
/// CRC checksum validation failed
Crc,
}
/// Error type for mode changes.
/// This allows us to retrieve the unchanged device in case of an error.
#[derive(Debug)]
pub enum ModeChangeError<E, DEV> {
/// I²C bus error while changing modes
///
/// `E` is the error that happened.
/// `DEV` is the device with the mode unchanged.
I2C(E, DEV),
}
/// Conversion rate for continuous conversion mode.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ConversionRate {
/// 0.5 Hz
_0_5Hz,
/// 1 Hz
_1Hz,
/// 2 Hz
_2Hz,
/// 4 Hz
_4Hz,
/// 10 Hz
_10Hz,
}
/// Repeatability condition for both one-shot and continuous modes.
/// From the datasheet: the value is 3 * standard deviation of measurements at constant ambient.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Repeatability {
/// High repeatability 0.04°C
High,
/// Medium repeatability 0.08°C
Medium,
/// Low repeatability 0.15°C
Low,
}
impl Default for Repeatability {
fn default() -> Self {
Repeatability::Low
}
}
/// Possible peripheral addresses
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum PeripheralAddr {
/// Default address, with address pin held low
PinLow,
/// Address with the pin held high
PinHigh,
}
impl Default for PeripheralAddr {
fn default() -> Self {
PeripheralAddr::PinLow
}
}
impl PeripheralAddr {
/// Return the 7-bit I2C address corresponding to the enum.
fn as_byte(self) -> u8 {
match self {
PeripheralAddr::PinLow => 0x4A,
PeripheralAddr::PinHigh => 0x4B,
}
}
}
/// I²C commands sent to the sensor
#[derive(Debug)]
enum Command {
/// Initiate a single-shot conversion.
StartSingleShot {repeatability: Repeatability},
/// Change to periodic mode with the given repeatability and conversion rates.
StartPeriodic {
repeatability: Repeatability,
conversion_rate: ConversionRate,
},
/// Fetch data from the sensor when it is in continuous mode.
FetchData,
/// Break out of continuous mode and return to one-shot mdoe.
Break,
/// Issue a software reset.
SoftReset,
/// Turn the heater on for plausibility checking.
HeaterOn,
/// Turn the heater off.
HeaterOff,
/// Read the status register.
ReadStatus,
/// Clear the status register.
ClearStatus,
}
impl Command {
/// Return a slice of two bytes corresponding to the command.
/// These are the bytes the sensor expects.
fn as_bytes(self) -> [u8; 2] {
match self {
// For single shot, listing each variant directly.
Command::StartSingleShot{repeatability: Repeatability::High} => [0x24, 0x00],
Command::StartSingleShot{repeatability: Repeatability::Medium} => [0x24, 0x0B],
Command::StartSingleShot{repeatability: Repeatability::Low} => [0x24, 0x16],
// For periodic, using nested matches, more lines of code, but hopefully more readable.
Command::StartPeriodic{repeatability: r, conversion_rate: c} => {
match c {
ConversionRate::_0_5Hz => {
match r {
Repeatability::High => [0x20, 0x32],
Repeatability::Medium => [0x20, 0x24],
Repeatability::Low => [0x20, 0x2F],
}
},
ConversionRate::_1Hz => {
match r {
Repeatability::High => [0x21, 0x30],
Repeatability::Medium => [0x21, 0x26],
Repeatability::Low => [0x21, 0x2D],
}
},
ConversionRate::_2Hz => {
match r {
Repeatability::High => [0x22, 0x36],
Repeatability::Medium => [0x22, 0x20],
Repeatability::Low => [0x22, 0x2B],
}
},
ConversionRate::_4Hz => {
| ConversionRate::_10Hz => {
match r {
Repeatability::High => [0x27, 0x37],
Repeatability::Medium => [0x27, 0x21],
Repeatability::Low => [0x27, 0x2A],
}
},
}
},
Command::FetchData => [0xE0, 0x00],
Command::Break => [0x30, 0x93],
Command::SoftReset => [0x30, 0xA2],
Command::HeaterOn => [0x30, 0x6D],
Command::HeaterOff => [0x30, 0x66],
Command::ReadStatus => [0xF3, 0x2D],
Command::ClearStatus => [0x30, 0x41],
}
}
}
// TODO: this is from shtcx, and I need to figure out how/whether to implement
pub trait MeasurementDuration {
/// Return the maximum measurement duration according to repeatability
/// in microseconds
fn measurement_duration_us(repeat: Repeatability) -> u16;
}
#[doc(hidden)]
// Type states for one-shot and continuous modes.
pub mod marker {
pub mod mode {
#[derive(Debug)]
pub struct OneShot(());
#[derive(Debug)]
pub struct Continuous(());
}
}
/// Device Driver
#[derive(Debug, Default)]
pub struct Sts3x<I2C, MODE> {
/// The I2C device implementation
i2c: I2C,
/// The 7-bit I2C device address
address: u8,
/// The present repeatabiliy setting
repeatability: Repeatability,
/// A temperature measurement was started.
temp_measurement_started: bool,
_mode: PhantomData<MODE>,
}
// Implement struct creation for OneShot only so that it is only possible to create a one-shot version.
impl<I2C, E> Sts3x<I2C, marker::mode::OneShot>
where
I2C: i2c::Write<Error = E>,
{
/// Create new instance of the Sts3x device.
/// Defaults to Low repeatability for power savings.
/// Change repeatability with set_repeatability().
///
/// By default, the device starts in one-shot mode.
pub fn new(i2c: I2C, address: PeripheralAddr) -> Self {
Sts3x {
i2c,
address: address.as_byte(),
repeatability: Repeatability::default(),
temp_measurement_started: false,
_mode: PhantomData,
}
}
/// Create new instance of the Sts3x device, choosing a Repeatability.
///
/// By default, the device starts in one-shot mode.
pub fn new_with_repeatability(i2c: I2C, address: PeripheralAddr, repeatability: Repeatability) -> Self {
Sts3x {
i2c,
address: address.as_byte(),
repeatability,
temp_measurement_started: false,
_mode: PhantomData,
}
}
}
// Methods shared by both single-shot and continuous modes
impl<I2C, MODE, E> Sts3x<I2C, MODE>
where
I2C: i2c::Read<Error = E> + i2c::Write<Error = E>,
{
/// Destroy driver instance and return the I2C bus instance.
pub fn destroy(self) -> I2C {
self.i2c
}
// write and read private methods
/// Write an I2C command to the sensor
fn send_command(&mut self, command: Command) -> Result<(), Error<E>> {
self.i2c
.write(self.address, &command.as_bytes())
.map_err(Error::I2C)
}
/// Read and check the CRC.
/// Returns a Result with u16 corresponding to the MSB,LSB of the first
/// two bytes of the buffer.
fn read_with_crc(&mut self) -> Result<u16, Error<E>> {
let mut buf = [0u8; 3];
self.i2c.read(self.address, &mut buf).map_err(Error::I2C)?;
if crc::is_crc8_valid(&buf) {
let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
Ok(x)
} else {
Err(Error::Crc)
}
}
fn convert_temp_to_float(temp: u16) -> f32 {
-45.0 + 175.0 * (temp as f32) / 65535.0
}
// method about measurement duration?
}
// Methods for one-shot mode only
// TODO: these are nonblocking, but don't utilize the nb concepts. Also make blocking types.
impl<I2C, E> Sts3x<I2C, marker::mode::OneShot>
where
I2C: i2c::Read<Error = E> + i2c::Write<Error = E>,
{
/// Start a one-shot temperature measurement using the repeatability
/// that has already been set.
pub fn trigger_temp_meas(&mut self) -> Result<(), Error<E>> {
self.send_command(Command::StartSingleShot{repeatability: self.repeatability})
}
/// Perform a one-shot temperature measurement.
///
/// This allows triggering a single temperature measurement when in
/// one-shot mode. The device returns to the low-power state at the
/// completion of the temperature conversion, reducing power
/// consumption when continuous temperature monitoring is not required.
///
/// If no temperature conversion was started yet, calling this method
/// will start one and return `nb::Error::WouldBlock`. Subsequent calls
/// will continue to return `nb::Error::WouldBlock` until the
/// temperature measurement is finished. Then it will return the
/// measured temperature in °C.
pub fn read_temperature(&mut self) -> nb::Result<f32, Error<E>> {
if!self.temp_measurement_started {
self.trigger_temp_meas()
.map_err(nb::Error::Other)?;
self.temp_measurement_started = true;
return Err(nb::Error::WouldBlock);
}
let mut buf = [0u8; 3];
let completion = self.i2c.read(self.address, &mut buf);
// What I want to do:
// match completion {
// Ok(val) => {
// // Conversion complete.
// let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
// self.temp_measurement_started = false;
// Ok(Self::convert_temp_to_float(x))
// },
// Err(stm32f3xx_hal::i2c::Error::Nack) => { // I want to replace with a generic path in embedded_hal
// // namespace because we shouldn't depend on a specific device HAL.
// Err(nb::Error::WouldBlock)
// },
// Err(e) => {
// Err(nb::Error::Other(Error::I2C(e))) // Not sure this is correct, but compiler doesn't complain.
// }
// }
// What I have to do with embedded_hal 0.2.4/0.2.5:
match completion {
Ok(_) => {
// Conversion complete.
self.temp_measurement_started = false;
if crc::is_crc8_valid(&buf) {
let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
Ok(Self::convert_temp_to_float(x))
} else {
Err(nb::Error::Other(Error::Crc))
}
},
_ => {
Err(nb::Error::WouldBlock)
}
}
}
pub fn set_repeatability(&mut self, r: Repeatability) -> Repeatability{
self.repeatability = r;
r
}
// pub fn into_continuous(self, rate: ConversionRate) -> Result<Sts3x<I2C, marker::mode::Continuous>, ModeChangeError<E, Self>>
// /// Reset the state of the driver, to be used if there was a "general call" on the I2C bus.
// pub fn reset_state(&mut self)
// pub fn soft_reset(&mut self)
// pub fn get_status(&self)
// pub fn clear_status(&mut self)
// pub fn heater_on(&mut self)
// pub fn heater_off(&mut self)
}
// Methods for continuous mode only
impl<I2C, E> Sts3x<I2C, marker::mode::Continuous>
where
I2C: i2c::Write<Error = E>,
{
/// Get latest temperature reading.
/// TODO: fill out
pub fn read_temperature(&self) -> u16 {
25
}
// /// Convert to one-shot mode.
// /// TODO: add the command to change and a failure error.
// pub fn into_one_shot(self) -> Result<Sts3x<I2C, marker::mode::OneShot>, ModeChangeError<E, Self>> {
// Result(Sts3x {
// i2c: self.i2c,
// address: self.address,
// repeatability: self.repeatability,
// temp_measurement_started: false,
// _mode: PhantomData,
// })
// }
// /// Reset the state of the driver, to be used if there was a "general call" on the I2C bus.
// /// This will convert into a one-shot mode device.
// pub fn reset_state(mut self)
}
// impl MeasurementDuration for Sts3x<I2C, MODE> {
// // TODO: fill out fn measurement_duration
// fn measurement_duration_us(repeat: Repeatability) -> u16 {
// 20
// }
// }
| match r {
Repeatability::High => [0x23, 0x34],
Repeatability::Medium => [0x23, 0x22],
Repeatability::Low => [0x23, 0x29],
}
},
| conditional_block |
lib.rs | //! This is a platform-agnostic Rust driver for the Sensirion STS30, STS31, and STS35
//! high-accuracy, low-power, I2C digital temperature sensors, based on the
//! [`embedded-hal`] traits.
//!
//! [`embedded-hal`]: https://github.com/rust-embedded/embedded-hal
//!
//! TODO: More information here.
//!
//! The driver borrows liberally from:
//! - eldruin's tmp1x2-rs driver for Texas Instruments TMP102 and TMP112, https://github.com/eldruin/tmp1x2-rs, and
//! - dbrgn's shtcx-rs driver for Sensirion SHTCx temperature/humidity sensors, https://github.com/dbrgn/shtcx-rs.
#![deny(unsafe_code)]
#![no_std]
// TODO: add deny missing docs, and doc root url
mod crc;
use core::marker::PhantomData;
use embedded_hal::blocking::i2c; // TODO: move to using nb if the crate adds a nonblocking I2C.
pub use nb;
/// Possible errors in this crate
#[derive(Debug)]
pub enum Error<E> {
/// I²C bus error
I2C(E),
/// CRC checksum validation failed
Crc,
}
/// Error type for mode changes.
/// This allows us to retrieve the unchanged device in case of an error.
#[derive(Debug)]
pub enum ModeChangeError<E, DEV> {
/// I²C bus error while changing modes
///
/// `E` is the error that happened.
/// `DEV` is the device with the mode unchanged.
I2C(E, DEV),
}
/// Conversion rate for continuous conversion mode.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ConversionRate {
/// 0.5 Hz
_0_5Hz,
/// 1 Hz
_1Hz,
/// 2 Hz
_2Hz,
/// 4 Hz
_4Hz,
/// 10 Hz
_10Hz,
}
/// Repeatability condition for both one-shot and continuous modes.
/// From the datasheet: the value is 3 * standard deviation of measurements at constant ambient.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Repeatability {
/// High repeatability 0.04°C
High,
/// Medium repeatability 0.08°C
Medium,
/// Low repeatability 0.15°C
Low,
}
impl Default for Repeatability {
fn default() -> Self {
Repeatability::Low
}
}
/// Possible peripheral addresses
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum PeripheralAddr {
/// Default address, with address pin held low
PinLow,
/// Address with the pin held high
PinHigh,
}
impl Default for PeripheralAddr {
fn default() -> Self {
PeripheralAddr::PinLow
}
}
impl PeripheralAddr {
/// Return the 7-bit I2C address corresponding to the enum.
fn as_byte(self) -> u8 {
match self {
PeripheralAddr::PinLow => 0x4A,
PeripheralAddr::PinHigh => 0x4B,
}
}
}
/// I²C commands sent to the sensor
#[derive(Debug)]
enum Command {
/// Initiate a single-shot conversion.
StartSingleShot {repeatability: Repeatability},
/// Change to periodic mode with the given repeatability and conversion rates.
StartPeriodic {
repeatability: Repeatability,
conversion_rate: ConversionRate,
},
/// Fetch data from the sensor when it is in continuous mode.
FetchData,
/// Break out of continuous mode and return to one-shot mdoe.
Break,
/// Issue a software reset.
SoftReset,
/// Turn the heater on for plausibility checking.
HeaterOn,
/// Turn the heater off.
HeaterOff,
/// Read the status register.
ReadStatus,
/// Clear the status register.
ClearStatus,
}
impl Command {
/// Return a slice of two bytes corresponding to the command.
/// These are the bytes the sensor expects.
fn as_bytes(self) -> [u8; 2] {
match self {
// For single shot, listing each variant directly.
Command::StartSingleShot{repeatability: Repeatability::High} => [0x24, 0x00],
Command::StartSingleShot{repeatability: Repeatability::Medium} => [0x24, 0x0B],
Command::StartSingleShot{repeatability: Repeatability::Low} => [0x24, 0x16],
// For periodic, using nested matches, more lines of code, but hopefully more readable.
Command::StartPeriodic{repeatability: r, conversion_rate: c} => {
match c {
ConversionRate::_0_5Hz => {
match r {
Repeatability::High => [0x20, 0x32],
Repeatability::Medium => [0x20, 0x24],
Repeatability::Low => [0x20, 0x2F],
}
},
ConversionRate::_1Hz => {
match r {
Repeatability::High => [0x21, 0x30],
Repeatability::Medium => [0x21, 0x26],
Repeatability::Low => [0x21, 0x2D],
}
},
ConversionRate::_2Hz => {
match r {
Repeatability::High => [0x22, 0x36],
Repeatability::Medium => [0x22, 0x20],
Repeatability::Low => [0x22, 0x2B],
}
},
ConversionRate::_4Hz => {
match r {
Repeatability::High => [0x23, 0x34],
Repeatability::Medium => [0x23, 0x22],
Repeatability::Low => [0x23, 0x29],
}
},
ConversionRate::_10Hz => {
match r {
Repeatability::High => [0x27, 0x37],
Repeatability::Medium => [0x27, 0x21],
Repeatability::Low => [0x27, 0x2A],
}
},
}
},
Command::FetchData => [0xE0, 0x00],
Command::Break => [0x30, 0x93],
Command::SoftReset => [0x30, 0xA2],
Command::HeaterOn => [0x30, 0x6D],
Command::HeaterOff => [0x30, 0x66],
Command::ReadStatus => [0xF3, 0x2D],
Command::ClearStatus => [0x30, 0x41],
}
}
}
// TODO: this is from shtcx, and I need to figure out how/whether to implement
pub trait MeasurementDuration {
/// Return the maximum measurement duration according to repeatability
/// in microseconds
fn measurement_duration_us(repeat: Repeatability) -> u16;
}
#[doc(hidden)]
// Type states for one-shot and continuous modes.
pub mod marker {
pub mod mode {
#[derive(Debug)]
pub struct OneShot(());
#[derive(Debug)]
pub struct Continuous(());
}
}
/// Device Driver
#[derive(Debug, Default)]
pub struct Sts3x<I2C, MODE> {
/// The I2C device implementation
i2c: I2C,
/// The 7-bit I2C device address
address: u8,
/// The present repeatabiliy setting
repeatability: Repeatability,
/// A temperature measurement was started.
temp_measurement_started: bool,
_mode: PhantomData<MODE>,
}
// Implement struct creation for OneShot only so that it is only possible to create a one-shot version.
impl<I2C, E> Sts3x<I2C, marker::mode::OneShot>
where
I2C: i2c::Write<Error = E>,
{
/// Create new instance of the Sts3x device.
/// Defaults to Low repeatability for power savings.
/// Change repeatability with set_repeatability().
///
/// By default, the device starts in one-shot mode.
pub fn new(i2c: I2C, address: PeripheralAddr) -> Self {
Sts3x {
i2c,
address: address.as_byte(),
repeatability: Repeatability::default(),
temp_measurement_started: false,
_mode: PhantomData,
}
}
/// Create new instance of the Sts3x device, choosing a Repeatability.
///
/// By default, the device starts in one-shot mode.
pub fn new_with_repeatability(i2c: I2C, address: PeripheralAddr, repeatability: Repeatability) -> Self {
Sts3x {
i2c,
address: address.as_byte(),
repeatability,
temp_measurement_started: false,
_mode: PhantomData,
}
}
}
// Methods shared by both single-shot and continuous modes
impl<I2C, MODE, E> Sts3x<I2C, MODE>
where
I2C: i2c::Read<Error = E> + i2c::Write<Error = E>,
{
/// Destroy driver instance and return the I2C bus instance.
pub fn destroy(self) -> I2C {
self.i2c
}
// write and read private methods
/// Write an I2C command to the sensor
fn send_command(&mut self, command: Command) -> Result<(), Error<E>> {
self.i2c
.write(self.address, &command.as_bytes())
.map_err(Error::I2C)
}
/// Read and check the CRC.
/// Returns a Result with u16 corresponding to the MSB,LSB of the first
/// two bytes of the buffer.
fn read_with_crc(&mut self) -> Result<u16, Error<E>> {
let mut buf = [0u8; 3];
self.i2c.read(self.address, &mut buf).map_err(Error::I2C)?;
if crc::is_crc8_valid(&buf) {
let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
Ok(x)
} else {
Err(Error::Crc)
}
}
fn convert_temp_to_float(temp: u16) -> f32 {
-45.0 + 175.0 * (temp as f32) / 65535.0
}
// method about measurement duration?
}
// Methods for one-shot mode only
// TODO: these are nonblocking, but don't utilize the nb concepts. Also make blocking types.
impl<I2C, E> Sts3x<I2C, marker::mode::OneShot>
where
I2C: i2c::Read<Error = E> + i2c::Write<Error = E>,
{
/// Start a one-shot temperature measurement using the repeatability
/// that has already been set.
pub fn trigger_temp_meas(&mut self) -> Result<(), Error<E>> {
self.send_command(Command::StartSingleShot{repeatability: self.repeatability})
}
/// Perform a one-shot temperature measurement.
///
/// This allows triggering a single temperature measurement when in
/// one-shot mode. The device returns to the low-power state at the
/// completion of the temperature conversion, reducing power
/// consumption when continuous temperature monitoring is not required.
///
/// If no temperature conversion was started yet, calling this method
/// will start one and return `nb::Error::WouldBlock`. Subsequent calls
/// will continue to return `nb::Error::WouldBlock` until the
/// temperature measurement is finished. Then it will return the
/// measured temperature in °C.
pub fn read_temperature(&mut self) -> nb::Result<f32, Error<E>> {
if!self.temp_measurement_started {
self.trigger_temp_meas()
.map_err(nb::Error::Other)?;
self.temp_measurement_started = true;
return Err(nb::Error::WouldBlock);
}
let mut buf = [0u8; 3];
let completion = self.i2c.read(self.address, &mut buf);
// What I want to do:
// match completion {
// Ok(val) => {
// // Conversion complete.
// let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
// self.temp_measurement_started = false;
// Ok(Self::convert_temp_to_float(x))
// },
// Err(stm32f3xx_hal::i2c::Error::Nack) => { // I want to replace with a generic path in embedded_hal
// // namespace because we shouldn't depend on a specific device HAL.
// Err(nb::Error::WouldBlock)
// },
// Err(e) => {
// Err(nb::Error::Other(Error::I2C(e))) // Not sure this is correct, but compiler doesn't complain.
// }
// }
// What I have to do with embedded_hal 0.2.4/0.2.5:
match completion {
Ok(_) => {
// Conversion complete.
self.temp_measurement_started = false;
if crc::is_crc8_valid(&buf) {
let x: u16 = (buf[0] as u16) << 8 | (buf[1] as u16);
Ok(Self::convert_temp_to_float(x))
} else {
Err(nb::Error::Other(Error::Crc))
}
},
_ => {
Err(nb::Error::WouldBlock)
}
}
}
pub fn set_rep | elf, r: Repeatability) -> Repeatability{
self.repeatability = r;
r
}
// pub fn into_continuous(self, rate: ConversionRate) -> Result<Sts3x<I2C, marker::mode::Continuous>, ModeChangeError<E, Self>>
// /// Reset the state of the driver, to be used if there was a "general call" on the I2C bus.
// pub fn reset_state(&mut self)
// pub fn soft_reset(&mut self)
// pub fn get_status(&self)
// pub fn clear_status(&mut self)
// pub fn heater_on(&mut self)
// pub fn heater_off(&mut self)
}
// Methods for continuous mode only
impl<I2C, E> Sts3x<I2C, marker::mode::Continuous>
where
I2C: i2c::Write<Error = E>,
{
/// Get latest temperature reading.
/// TODO: fill out
pub fn read_temperature(&self) -> u16 {
25
}
// /// Convert to one-shot mode.
// /// TODO: add the command to change and a failure error.
// pub fn into_one_shot(self) -> Result<Sts3x<I2C, marker::mode::OneShot>, ModeChangeError<E, Self>> {
// Result(Sts3x {
// i2c: self.i2c,
// address: self.address,
// repeatability: self.repeatability,
// temp_measurement_started: false,
// _mode: PhantomData,
// })
// }
// /// Reset the state of the driver, to be used if there was a "general call" on the I2C bus.
// /// This will convert into a one-shot mode device.
// pub fn reset_state(mut self)
}
// impl MeasurementDuration for Sts3x<I2C, MODE> {
// // TODO: fill out fn measurement_duration
// fn measurement_duration_us(repeat: Repeatability) -> u16 {
// 20
// }
// }
| eatability(&mut s | identifier_name |
softmax.rs | use alumina_core::{
base_ops::{OpInstance, OpSpecification},
errors::{ExecutionError, GradientError, OpBuildError, ShapePropError},
exec::ExecutionContext,
grad::GradientContext,
graph::{Graph, Node, NodeID},
shape_prop::ShapePropContext,
util::wrap_dim,
};
use indexmap::{indexset, IndexMap, IndexSet};
use ndarray::{Axis, Dimension, Zip};
use std::any::Any;
/// Calculates the combined Softmax norm of the input nodes.
///
/// Axis determines the grouping direction.
pub fn softmax<I>(logits: I, axis: isize) -> Result<Node, OpBuildError>
where
I: Into<Node>,
{
let logits = logits.into();
let axis = wrap_dim(axis, logits.shape().len());
let output = logits.graph().new_node(logits.shape());
Softmax::new(logits, output.clone(), axis).build()?;
Ok(output)
}
#[must_use = "Op builder not used, call.build()"]
#[derive(Clone, Debug)]
pub struct Softmax {
logits: Node,
output: Node,
axis: usize,
}
impl Softmax {
pub fn new<I, O>(logits: I, output: O, axis: usize) -> Self
where
I: Into<Node>,
O: Into<Node>,
{
let logits = logits.into();
let output = output.into();
assert!(
logits.shape().len() == output.shape().len(),
"output and logits must have the same shape"
);
assert!(
axis < logits.shape().len(),
"axis {} must be less than logits.shape().len() {}",
axis,
logits.shape().len()
);
Softmax { logits, output, axis }
}
}
impl OpSpecification for Softmax {
type InstanceType = SoftmaxInstance;
fn type_name(&self) -> &'static str {
"Softmax"
}
fn inputs(&self) -> IndexSet<Node> {
indexset![self.logits.clone()]
}
fn outputs(&self) -> IndexSet<Node> {
indexset![self.output.clone()]
}
fn clone_with_nodes_changed(&self, mapping: &IndexMap<Node, Node>) -> Self {
Self {
logits: mapping.get(&self.logits).unwrap_or(&self.logits).clone(),
output: mapping.get(&self.output).unwrap_or(&self.output).clone(),
axis: self.axis,
}
}
fn build_instance(self) -> Result<Self::InstanceType, OpBuildError> {
Ok(SoftmaxInstance {
logits: self.logits.id(),
output: self.output.id(),
axis: self.axis,
})
}
}
/// Softmax OpInstance
#[derive(Clone, Debug)]
pub struct SoftmaxInstance {
logits: NodeID,
output: NodeID,
axis: usize,
}
impl OpInstance for SoftmaxInstance {
fn type_name(&self) -> &'static str {
"Softmax"
}
fn as_specification(&self, graph: &Graph) -> Box<dyn Any> {
Box::new(Softmax {
logits: graph.node_from_id(self.logits),
output: graph.node_from_id(self.output),
axis: self.axis,
})
}
fn inputs(&self) -> IndexSet<NodeID> {
indexset![self.logits]
}
fn outputs(&self) -> IndexSet<NodeID> {
indexset![self.output]
}
fn gradient(&self, ctx: &mut GradientContext) -> Result<(), GradientError> {
SoftmaxBack::new(
ctx.node(&self.logits),
ctx.grad_of(&self.logits),
ctx.grad_of(&self.output),
self.axis,
)
.build()?;
Ok(())
}
fn propagate_shapes(&self, ctx: &mut ShapePropContext) -> Result<(), ShapePropError> {
ctx.merge_output_shape(&self.output, &ctx.input_shape(&self.logits).slice().into())
}
fn execute(&self, ctx: &ExecutionContext) -> Result<(), ExecutionError> {
Zip::from(ctx.get_input(&self.logits).lanes(Axis(self.axis)))
.and(ctx.get_output(&self.output).lanes_mut(Axis(self.axis)))
.par_for_each(|logits, outputs| {
let max = logits.iter().fold(::std::f32::NEG_INFINITY, |max, &v| v.max(max));
let exp_sum = logits.iter().fold(0.0, |sum, &v| sum + (v - max).exp());
Zip::from(logits).and(outputs).for_each(|logit, output| {
*output += (logit - max).exp() / exp_sum;
});
});
Ok(())
}
}
/// Optimised Backward pass for Softmax Op.
///
/// Input/Output naming convention matches Softmax Input/Outputs, i.e. output_grad is an input to this Op.
///
/// All inputs and grads must be unique.
#[must_use = "Op builder not used, call.build()"]
#[derive(Clone, Debug)]
pub struct SoftmaxBack {
logits: Node,
logits_grad: Node,
output_grad: Node,
axis: usize,
}
impl SoftmaxBack {
pub fn new<I1, I2, O>(logits: I1, logits_grad: O, output_grad: I2, axis: usize) -> Self
where
I1: Into<Node>,
I2: Into<Node>,
O: Into<Node>,
{
let logits = logits.into();
let logits_grad = logits_grad.into();
let output_grad = output_grad.into();
assert!(logits.shape().len() == logits_grad.shape().len());
assert!(logits.shape().len() == output_grad.shape().len());
assert!(
axis < logits.shape().len(),
"axis {} must be less than logits.shape().len() {}",
axis,
logits.shape().len()
);
SoftmaxBack {
logits,
logits_grad,
output_grad,
axis,
}
}
}
impl OpSpecification for SoftmaxBack {
type InstanceType = SoftmaxBackInstance;
fn type_name(&self) -> &'static str {
"SoftmaxBack"
}
fn inputs(&self) -> IndexSet<Node> {
indexset![self.logits.clone(), self.output_grad.clone()]
}
fn outputs(&self) -> IndexSet<Node> {
indexset![self.logits_grad.clone()]
}
fn | (&self, mapping: &IndexMap<Node, Node>) -> Self {
Self {
logits: mapping.get(&self.logits).unwrap_or(&self.logits).clone(),
output_grad: mapping.get(&self.output_grad).unwrap_or(&self.output_grad).clone(),
logits_grad: mapping.get(&self.logits_grad).unwrap_or(&self.logits_grad).clone(),
axis: self.axis,
}
}
fn build_instance(self) -> Result<Self::InstanceType, OpBuildError> {
Ok(SoftmaxBackInstance {
logits: self.logits.id(),
logits_grad: self.logits_grad.id(),
output_grad: self.output_grad.id(),
axis: self.axis,
})
}
}
/// SoftmaxBack OpInstance
#[derive(Clone, Debug)]
pub struct SoftmaxBackInstance {
logits: NodeID,
logits_grad: NodeID,
output_grad: NodeID,
axis: usize,
}
impl OpInstance for SoftmaxBackInstance {
fn type_name(&self) -> &'static str {
"SoftmaxBack"
}
fn as_specification(&self, graph: &Graph) -> Box<dyn Any> {
Box::new(SoftmaxBack {
logits: graph.node_from_id(self.logits),
logits_grad: graph.node_from_id(self.logits_grad),
output_grad: graph.node_from_id(self.output_grad),
axis: self.axis,
})
}
fn inputs(&self) -> IndexSet<NodeID> {
indexset![self.logits, self.output_grad]
}
fn outputs(&self) -> IndexSet<NodeID> {
indexset![self.logits_grad]
}
fn gradient(&self, _ctx: &mut GradientContext) -> Result<(), GradientError> {
Err(GradientError::Unimplemented)
}
fn propagate_shapes(&self, ctx: &mut ShapePropContext) -> Result<(), ShapePropError> {
let logits_shape = ctx.input_shape(&self.logits).clone();
let output_grad_shape = ctx.input_shape(&self.output_grad).clone();
if logits_shape.ndim() == 0 {
return Err(format!(
"Softmax requires logit and label shapes to have at least one axis: {:?}",
logits_shape.slice(),
)
.into());
}
if output_grad_shape!= logits_shape {
return Err(format!("SoftmaxBack requires the output grad to have the shape of the logits: logits:{:?} output_grad:{:?}, axis: {}", logits_shape.slice(), output_grad_shape.slice(), self.axis).into());
}
ctx.merge_output_shape(&self.logits_grad, &logits_shape.slice().into())
}
fn execute(&self, ctx: &ExecutionContext) -> Result<(), ExecutionError> {
Zip::from(ctx.get_output(&self.logits_grad).lanes_mut(Axis(self.axis)))
.and(ctx.get_input(&self.logits).lanes(Axis(self.axis)))
.and(ctx.get_input(&self.output_grad).lanes(Axis(self.axis)))
.par_for_each(|mut logits_grad, logits, output_grad| {
let len = logits.len();
let max = logits.iter().fold(::std::f32::NEG_INFINITY, |max, &v| v.max(max));
let exp_sum = logits.iter().fold(0., |sum, &v| sum + (v - max).exp());
// let exp_sum_ln = exp_sum.ln();
for (i, grad) in output_grad.iter().enumerate() {
if grad.abs() > 0.0 {
// hopefully output gradients are sparse, eg from cross entropy loss
let a = logits[i] - max;
// let x = (a - exp_sum_ln).exp();
let x = a.exp() / exp_sum;
let g_x = grad * x;
let mut other_sum = 0.0;
for j in 0..i {
let b = logits[j] - max;
// logits_grad[j] -= g_x * (b - exp_sum_ln).exp();
logits_grad[j] -= g_x * b.exp() / exp_sum;
other_sum += b.exp() / exp_sum;
}
// logits_grad[i] += g_x - g_x * x;
// inpd_n[i] += - inp_n.iter().enumerate().fold(0., |sum, (ind, v)| sum + if ind!= i
// {(v-max).exp()} else {0.0})*(mult/exp_sum);
for j in i + 1..len {
let b = logits[j] - max;
// logits_grad[j] -= g_x * (b- exp_sum_ln).exp();
logits_grad[j] -= g_x * b.exp() / exp_sum;
other_sum += b.exp() / exp_sum;
}
logits_grad[i] += g_x * other_sum;
}
}
});
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::softmax;
use crate::elementwise::mul::mul;
use alumina_core::graph::Node;
use alumina_test::{grad_numeric_test::GradNumericTest, relatively_close::RelClose};
use indexmap::indexset;
use ndarray::arr2;
#[test]
fn forward_test() {
let logits = Node::new(&[4, 4])
.set_value(arr2(&[
[0.2, 0.4, 0.6, 0.8],
[1.2, 1.4, 1.6, 1.8],
[2.2, 2.4, 2.6, 2.8],
[3.2, 3.4, 3.6, 3.8],
]))
.set_name("logits");
let hor_groups = softmax(&logits, -1).unwrap();
let vert_groups = softmax(&logits, 0).unwrap();
assert!(hor_groups
.calc()
.unwrap()
.all_relatively_close(&arr2(&[[0.180_657_18, 0.220_655_17, 0.269_508_84, 0.329_178_84]]), 1e-4));
assert!(vert_groups.calc().unwrap().all_relatively_close(
&arr2(&[[0.032_058_604], [0.087_144_32f32], [0.236_882_82], [0.643_914_3]]),
1e-4
));
}
#[test]
fn grad_numeric_rand_test() {
let logits = Node::new(&[13, 33]).set_name("logits");
let rand = Node::new(&[13, 33]).set_name("rand"); // multiply output by random amounts to prevent gradient cancellation
let output = mul(&softmax(&logits, -1).unwrap(), &rand).unwrap();
GradNumericTest::new(&output, &indexset![&logits, &rand])
.step_size(1e-3)
.tolerance(4e-3)
.run();
}
#[test]
fn grad_numeric_test() {
let logits = Node::new(&[13, 33]).set_name("logits");
let output = softmax(&logits, -1).unwrap();
GradNumericTest::new(&output, &indexset![&logits])
.expect_zero(&logits, 20.0 * ::std::f32::EPSILON) // under a uniform output gradient the gradient of the logits should cancel out to zero
.step_size(1e-3)
.tolerance(4e-3)
.run();
}
}
| clone_with_nodes_changed | identifier_name |
softmax.rs | use alumina_core::{
base_ops::{OpInstance, OpSpecification},
errors::{ExecutionError, GradientError, OpBuildError, ShapePropError},
exec::ExecutionContext,
grad::GradientContext,
graph::{Graph, Node, NodeID},
shape_prop::ShapePropContext,
util::wrap_dim,
};
use indexmap::{indexset, IndexMap, IndexSet};
use ndarray::{Axis, Dimension, Zip};
use std::any::Any;
/// Calculates the combined Softmax norm of the input nodes.
///
/// Axis determines the grouping direction.
pub fn softmax<I>(logits: I, axis: isize) -> Result<Node, OpBuildError>
where
I: Into<Node>,
{
let logits = logits.into();
let axis = wrap_dim(axis, logits.shape().len());
let output = logits.graph().new_node(logits.shape());
Softmax::new(logits, output.clone(), axis).build()?;
Ok(output)
}
#[must_use = "Op builder not used, call.build()"]
#[derive(Clone, Debug)]
pub struct Softmax {
logits: Node,
output: Node,
axis: usize,
}
impl Softmax {
pub fn new<I, O>(logits: I, output: O, axis: usize) -> Self
where
I: Into<Node>,
O: Into<Node>,
{
let logits = logits.into();
let output = output.into();
assert!(
logits.shape().len() == output.shape().len(),
"output and logits must have the same shape"
);
assert!(
axis < logits.shape().len(),
"axis {} must be less than logits.shape().len() {}",
axis,
logits.shape().len()
);
Softmax { logits, output, axis }
}
}
impl OpSpecification for Softmax {
type InstanceType = SoftmaxInstance;
fn type_name(&self) -> &'static str {
"Softmax"
}
fn inputs(&self) -> IndexSet<Node> {
indexset![self.logits.clone()]
}
fn outputs(&self) -> IndexSet<Node> {
indexset![self.output.clone()]
}
fn clone_with_nodes_changed(&self, mapping: &IndexMap<Node, Node>) -> Self {
Self {
logits: mapping.get(&self.logits).unwrap_or(&self.logits).clone(),
output: mapping.get(&self.output).unwrap_or(&self.output).clone(),
axis: self.axis,
}
}
fn build_instance(self) -> Result<Self::InstanceType, OpBuildError> {
Ok(SoftmaxInstance {
logits: self.logits.id(),
output: self.output.id(),
axis: self.axis,
})
}
}
/// Softmax OpInstance
#[derive(Clone, Debug)]
pub struct SoftmaxInstance {
logits: NodeID,
output: NodeID,
axis: usize,
}
impl OpInstance for SoftmaxInstance {
fn type_name(&self) -> &'static str {
"Softmax"
}
fn as_specification(&self, graph: &Graph) -> Box<dyn Any> {
Box::new(Softmax {
logits: graph.node_from_id(self.logits),
output: graph.node_from_id(self.output),
axis: self.axis,
})
}
fn inputs(&self) -> IndexSet<NodeID> {
indexset![self.logits]
}
fn outputs(&self) -> IndexSet<NodeID> {
indexset![self.output]
}
fn gradient(&self, ctx: &mut GradientContext) -> Result<(), GradientError> {
SoftmaxBack::new(
ctx.node(&self.logits),
ctx.grad_of(&self.logits),
ctx.grad_of(&self.output),
self.axis,
)
.build()?;
Ok(())
}
fn propagate_shapes(&self, ctx: &mut ShapePropContext) -> Result<(), ShapePropError> {
ctx.merge_output_shape(&self.output, &ctx.input_shape(&self.logits).slice().into())
}
fn execute(&self, ctx: &ExecutionContext) -> Result<(), ExecutionError> {
Zip::from(ctx.get_input(&self.logits).lanes(Axis(self.axis)))
.and(ctx.get_output(&self.output).lanes_mut(Axis(self.axis)))
.par_for_each(|logits, outputs| {
let max = logits.iter().fold(::std::f32::NEG_INFINITY, |max, &v| v.max(max));
let exp_sum = logits.iter().fold(0.0, |sum, &v| sum + (v - max).exp());
Zip::from(logits).and(outputs).for_each(|logit, output| {
*output += (logit - max).exp() / exp_sum;
});
});
Ok(())
}
}
/// Optimised Backward pass for Softmax Op.
///
/// Input/Output naming convention matches Softmax Input/Outputs, i.e. output_grad is an input to this Op.
///
/// All inputs and grads must be unique.
#[must_use = "Op builder not used, call.build()"]
#[derive(Clone, Debug)]
pub struct SoftmaxBack {
logits: Node,
logits_grad: Node,
output_grad: Node,
axis: usize,
}
impl SoftmaxBack {
pub fn new<I1, I2, O>(logits: I1, logits_grad: O, output_grad: I2, axis: usize) -> Self
where
I1: Into<Node>,
I2: Into<Node>,
O: Into<Node>,
{
let logits = logits.into();
let logits_grad = logits_grad.into();
let output_grad = output_grad.into();
assert!(logits.shape().len() == logits_grad.shape().len());
assert!(logits.shape().len() == output_grad.shape().len());
assert!(
axis < logits.shape().len(),
"axis {} must be less than logits.shape().len() {}",
axis,
logits.shape().len()
);
SoftmaxBack {
logits,
logits_grad,
output_grad,
axis,
}
}
}
impl OpSpecification for SoftmaxBack {
type InstanceType = SoftmaxBackInstance;
fn type_name(&self) -> &'static str {
"SoftmaxBack"
}
fn inputs(&self) -> IndexSet<Node> {
indexset![self.logits.clone(), self.output_grad.clone()]
}
fn outputs(&self) -> IndexSet<Node> {
indexset![self.logits_grad.clone()]
}
fn clone_with_nodes_changed(&self, mapping: &IndexMap<Node, Node>) -> Self {
Self {
logits: mapping.get(&self.logits).unwrap_or(&self.logits).clone(),
output_grad: mapping.get(&self.output_grad).unwrap_or(&self.output_grad).clone(),
logits_grad: mapping.get(&self.logits_grad).unwrap_or(&self.logits_grad).clone(),
axis: self.axis,
}
}
fn build_instance(self) -> Result<Self::InstanceType, OpBuildError> {
Ok(SoftmaxBackInstance {
logits: self.logits.id(),
logits_grad: self.logits_grad.id(),
output_grad: self.output_grad.id(), | })
}
}
/// SoftmaxBack OpInstance
#[derive(Clone, Debug)]
pub struct SoftmaxBackInstance {
logits: NodeID,
logits_grad: NodeID,
output_grad: NodeID,
axis: usize,
}
impl OpInstance for SoftmaxBackInstance {
fn type_name(&self) -> &'static str {
"SoftmaxBack"
}
fn as_specification(&self, graph: &Graph) -> Box<dyn Any> {
Box::new(SoftmaxBack {
logits: graph.node_from_id(self.logits),
logits_grad: graph.node_from_id(self.logits_grad),
output_grad: graph.node_from_id(self.output_grad),
axis: self.axis,
})
}
fn inputs(&self) -> IndexSet<NodeID> {
indexset![self.logits, self.output_grad]
}
fn outputs(&self) -> IndexSet<NodeID> {
indexset![self.logits_grad]
}
fn gradient(&self, _ctx: &mut GradientContext) -> Result<(), GradientError> {
Err(GradientError::Unimplemented)
}
fn propagate_shapes(&self, ctx: &mut ShapePropContext) -> Result<(), ShapePropError> {
let logits_shape = ctx.input_shape(&self.logits).clone();
let output_grad_shape = ctx.input_shape(&self.output_grad).clone();
if logits_shape.ndim() == 0 {
return Err(format!(
"Softmax requires logit and label shapes to have at least one axis: {:?}",
logits_shape.slice(),
)
.into());
}
if output_grad_shape!= logits_shape {
return Err(format!("SoftmaxBack requires the output grad to have the shape of the logits: logits:{:?} output_grad:{:?}, axis: {}", logits_shape.slice(), output_grad_shape.slice(), self.axis).into());
}
ctx.merge_output_shape(&self.logits_grad, &logits_shape.slice().into())
}
fn execute(&self, ctx: &ExecutionContext) -> Result<(), ExecutionError> {
Zip::from(ctx.get_output(&self.logits_grad).lanes_mut(Axis(self.axis)))
.and(ctx.get_input(&self.logits).lanes(Axis(self.axis)))
.and(ctx.get_input(&self.output_grad).lanes(Axis(self.axis)))
.par_for_each(|mut logits_grad, logits, output_grad| {
let len = logits.len();
let max = logits.iter().fold(::std::f32::NEG_INFINITY, |max, &v| v.max(max));
let exp_sum = logits.iter().fold(0., |sum, &v| sum + (v - max).exp());
// let exp_sum_ln = exp_sum.ln();
for (i, grad) in output_grad.iter().enumerate() {
if grad.abs() > 0.0 {
// hopefully output gradients are sparse, eg from cross entropy loss
let a = logits[i] - max;
// let x = (a - exp_sum_ln).exp();
let x = a.exp() / exp_sum;
let g_x = grad * x;
let mut other_sum = 0.0;
for j in 0..i {
let b = logits[j] - max;
// logits_grad[j] -= g_x * (b - exp_sum_ln).exp();
logits_grad[j] -= g_x * b.exp() / exp_sum;
other_sum += b.exp() / exp_sum;
}
// logits_grad[i] += g_x - g_x * x;
// inpd_n[i] += - inp_n.iter().enumerate().fold(0., |sum, (ind, v)| sum + if ind!= i
// {(v-max).exp()} else {0.0})*(mult/exp_sum);
for j in i + 1..len {
let b = logits[j] - max;
// logits_grad[j] -= g_x * (b- exp_sum_ln).exp();
logits_grad[j] -= g_x * b.exp() / exp_sum;
other_sum += b.exp() / exp_sum;
}
logits_grad[i] += g_x * other_sum;
}
}
});
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::softmax;
use crate::elementwise::mul::mul;
use alumina_core::graph::Node;
use alumina_test::{grad_numeric_test::GradNumericTest, relatively_close::RelClose};
use indexmap::indexset;
use ndarray::arr2;
#[test]
fn forward_test() {
let logits = Node::new(&[4, 4])
.set_value(arr2(&[
[0.2, 0.4, 0.6, 0.8],
[1.2, 1.4, 1.6, 1.8],
[2.2, 2.4, 2.6, 2.8],
[3.2, 3.4, 3.6, 3.8],
]))
.set_name("logits");
let hor_groups = softmax(&logits, -1).unwrap();
let vert_groups = softmax(&logits, 0).unwrap();
assert!(hor_groups
.calc()
.unwrap()
.all_relatively_close(&arr2(&[[0.180_657_18, 0.220_655_17, 0.269_508_84, 0.329_178_84]]), 1e-4));
assert!(vert_groups.calc().unwrap().all_relatively_close(
&arr2(&[[0.032_058_604], [0.087_144_32f32], [0.236_882_82], [0.643_914_3]]),
1e-4
));
}
#[test]
fn grad_numeric_rand_test() {
let logits = Node::new(&[13, 33]).set_name("logits");
let rand = Node::new(&[13, 33]).set_name("rand"); // multiply output by random amounts to prevent gradient cancellation
let output = mul(&softmax(&logits, -1).unwrap(), &rand).unwrap();
GradNumericTest::new(&output, &indexset![&logits, &rand])
.step_size(1e-3)
.tolerance(4e-3)
.run();
}
#[test]
fn grad_numeric_test() {
let logits = Node::new(&[13, 33]).set_name("logits");
let output = softmax(&logits, -1).unwrap();
GradNumericTest::new(&output, &indexset![&logits])
.expect_zero(&logits, 20.0 * ::std::f32::EPSILON) // under a uniform output gradient the gradient of the logits should cancel out to zero
.step_size(1e-3)
.tolerance(4e-3)
.run();
}
} | axis: self.axis, | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.