file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
f2s.rs | // Translated from C to Rust. The original C code can be found at
// https://github.com/ulfjack/ryu and carries the following license:
//
// Copyright 2018 Ulf Adams
//
// The contents of this file may be used under the terms of the Apache License,
// Version 2.0.
//
// (See accompanying file LICENSE-Apache or copy at
// http://www.apache.org/licenses/LICENSE-2.0)
//
// Alternatively, the contents of this file may be used under the terms of
// the Boost Software License, Version 1.0.
// (See accompanying file LICENSE-Boost or copy at
// https://www.boost.org/LICENSE_1_0.txt)
//
// Unless required by applicable law or agreed to in writing, this software
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.
use common::*;
pub const FLOAT_MANTISSA_BITS: u32 = 23;
pub const FLOAT_EXPONENT_BITS: u32 = 8;
const FLOAT_BIAS: i32 = 127;
const FLOAT_POW5_INV_BITCOUNT: i32 = 59;
const FLOAT_POW5_BITCOUNT: i32 = 61;
// This table is generated by PrintFloatLookupTable.
static FLOAT_POW5_INV_SPLIT: [u64; 32] = [
576460752303423489,
461168601842738791,
368934881474191033,
295147905179352826,
472236648286964522,
377789318629571618,
302231454903657294,
483570327845851670,
386856262276681336,
309485009821345069,
495176015714152110,
396140812571321688,
316912650057057351,
507060240091291761,
405648192073033409,
324518553658426727,
519229685853482763,
415383748682786211,
332306998946228969,
531691198313966350,
425352958651173080,
340282366920938464,
544451787073501542,
435561429658801234,
348449143727040987,
557518629963265579,
446014903970612463,
356811923176489971,
570899077082383953,
456719261665907162,
365375409332725730,
1 << 63,
];
static FLOAT_POW5_SPLIT: [u64; 47] = [
1152921504606846976,
1441151880758558720,
1801439850948198400,
2251799813685248000,
1407374883553280000,
1759218604441600000,
2199023255552000000,
1374389534720000000,
1717986918400000000,
2147483648000000000,
1342177280000000000,
1677721600000000000,
2097152000000000000,
1310720000000000000,
1638400000000000000,
2048000000000000000,
1280000000000000000,
1600000000000000000,
2000000000000000000,
1250000000000000000,
1562500000000000000,
1953125000000000000,
1220703125000000000,
1525878906250000000,
1907348632812500000,
1192092895507812500,
1490116119384765625,
1862645149230957031,
1164153218269348144,
1455191522836685180,
1818989403545856475,
2273736754432320594,
1421085471520200371,
1776356839400250464,
2220446049250313080,
1387778780781445675,
1734723475976807094,
2168404344971008868,
1355252715606880542,
1694065894508600678,
2117582368135750847,
1323488980084844279,
1654361225106055349,
2067951531382569187,
1292469707114105741,
1615587133892632177,
2019483917365790221,
];
#[cfg_attr(feature = "no-panic", inline)]
fn pow5_factor(mut value: u32) -> u32 {
let mut count = 0u32;
loop {
debug_assert!(value!= 0);
let q = value / 5;
let r = value % 5;
if r!= 0 {
break;
}
value = q;
count += 1;
}
count
}
// Returns true if value is divisible by 5^p.
#[cfg_attr(feature = "no-panic", inline)]
fn multiple_of_power_of_5(value: u32, p: u32) -> bool {
pow5_factor(value) >= p
}
// Returns true if value is divisible by 2^p.
#[cfg_attr(feature = "no-panic", inline)]
fn multiple_of_power_of_2(value: u32, p: u32) -> bool {
// return __builtin_ctz(value) >= p;
(value & ((1u32 << p) - 1)) == 0
}
// It seems to be slightly faster to avoid uint128_t here, although the
// generated code for uint128_t looks slightly nicer.
#[cfg_attr(feature = "no-panic", inline)]
fn mul_shift(m: u32, factor: u64, shift: i32) -> u32 {
debug_assert!(shift > 32);
// The casts here help MSVC to avoid calls to the __allmul library
// function.
let factor_lo = factor as u32;
let factor_hi = (factor >> 32) as u32;
let bits0 = m as u64 * factor_lo as u64;
let bits1 = m as u64 * factor_hi as u64;
let sum = (bits0 >> 32) + bits1;
let shifted_sum = sum >> (shift - 32);
debug_assert!(shifted_sum <= u32::max_value() as u64);
shifted_sum as u32
}
#[cfg_attr(feature = "no-panic", inline)]
fn mul_pow5_inv_div_pow2(m: u32, q: u32, j: i32) -> u32 {
debug_assert!(q < FLOAT_POW5_INV_SPLIT.len() as u32);
unsafe { mul_shift(m, *FLOAT_POW5_INV_SPLIT.get_unchecked(q as usize), j) }
}
#[cfg_attr(feature = "no-panic", inline)]
fn mul_pow5_div_pow2(m: u32, i: u32, j: i32) -> u32 {
debug_assert!(i < FLOAT_POW5_SPLIT.len() as u32);
unsafe { mul_shift(m, *FLOAT_POW5_SPLIT.get_unchecked(i as usize), j) }
}
// A floating decimal representing m * 10^e.
pub struct FloatingDecimal32 {
pub mantissa: u32,
// Decimal exponent's range is -45 to 38
// inclusive, and can fit in i16 if needed.
pub exponent: i32,
}
#[cfg_attr(feature = "no-panic", inline)]
pub fn f2d(ieee_mantissa: u32, ieee_exponent: u32) -> FloatingDecimal32 {
let (e2, m2) = if ieee_exponent == 0 {
(
// We subtract 2 so that the bounds computation has 2 additional bits.
1 - FLOAT_BIAS - FLOAT_MANTISSA_BITS as i32 - 2,
ieee_mantissa,
)
} else {
(
ieee_exponent as i32 - FLOAT_BIAS - FLOAT_MANTISSA_BITS as i32 - 2,
(1u32 << FLOAT_MANTISSA_BITS) | ieee_mantissa,
)
};
let even = (m2 & 1) == 0;
let accept_bounds = even;
// Step 2: Determine the interval of valid decimal representations.
let mv = 4 * m2;
let mp = 4 * m2 + 2;
// Implicit bool -> int conversion. True is 1, false is 0.
let mm_shift = (ieee_mantissa!= 0 || ieee_exponent <= 1) as u32;
let mm = 4 * m2 - 1 - mm_shift;
// Step 3: Convert to a decimal power base using 64-bit arithmetic.
let mut vr: u32;
let mut vp: u32;
let mut vm: u32;
let e10: i32;
let mut vm_is_trailing_zeros = false;
let mut vr_is_trailing_zeros = false;
let mut last_removed_digit = 0u8;
if e2 >= 0 {
let q = log10_pow2(e2);
e10 = q as i32;
let k = FLOAT_POW5_INV_BITCOUNT + pow5bits(q as i32) - 1;
let i = -e2 + q as i32 + k;
vr = mul_pow5_inv_div_pow2(mv, q, i);
vp = mul_pow5_inv_div_pow2(mp, q, i);
vm = mul_pow5_inv_div_pow2(mm, q, i);
if q!= 0 && (vp - 1) / 10 <= vm / 10 {
// We need to know one removed digit even if we are not going to loop below. We could use
// q = X - 1 above, except that would require 33 bits for the result, and we've found that
// 32-bit arithmetic is faster even on 64-bit machines.
let l = FLOAT_POW5_INV_BITCOUNT + pow5bits(q as i32 - 1) - 1;
last_removed_digit =
(mul_pow5_inv_div_pow2(mv, q - 1, -e2 + q as i32 - 1 + l) % 10) as u8;
}
if q <= 9 {
// The largest power of 5 that fits in 24 bits is 5^10, but q <= 9 seems to be safe as well.
// Only one of mp, mv, and mm can be a multiple of 5, if any.
if mv % 5 == 0 {
vr_is_trailing_zeros = multiple_of_power_of_5(mv, q);
} else if accept_bounds {
vm_is_trailing_zeros = multiple_of_power_of_5(mm, q);
} else {
vp -= multiple_of_power_of_5(mp, q) as u32;
}
}
} else {
let q = log10_pow5(-e2);
e10 = q as i32 + e2;
let i = -e2 - q as i32;
let k = pow5bits(i) - FLOAT_POW5_BITCOUNT;
let mut j = q as i32 - k;
vr = mul_pow5_div_pow2(mv, i as u32, j);
vp = mul_pow5_div_pow2(mp, i as u32, j);
vm = mul_pow5_div_pow2(mm, i as u32, j);
if q!= 0 && (vp - 1) / 10 <= vm / 10 {
j = q as i32 - 1 - (pow5bits(i + 1) - FLOAT_POW5_BITCOUNT);
last_removed_digit = (mul_pow5_div_pow2(mv, (i + 1) as u32, j) % 10) as u8;
}
if q <= 1 {
// {vr,vp,vm} is trailing zeros if {mv,mp,mm} has at least q trailing 0 bits.
// mv = 4 * m2, so it always has at least two trailing 0 bits.
vr_is_trailing_zeros = true;
if accept_bounds {
// mm = mv - 1 - mm_shift, so it has 1 trailing 0 bit iff mm_shift == 1.
vm_is_trailing_zeros = mm_shift == 1;
} else {
// mp = mv + 2, so it always has at least one trailing 0 bit.
vp -= 1;
}
} else if q < 31 {
// TODO(ulfjack): Use a tighter bound here.
vr_is_trailing_zeros = multiple_of_power_of_2(mv, q - 1);
}
}
// Step 4: Find the shortest decimal representation in the interval of valid representations.
let mut removed = 0i32;
let output = if vm_is_trailing_zeros || vr_is_trailing_zeros {
// General case, which happens rarely (~4.0%).
while vp / 10 > vm / 10 {
vm_is_trailing_zeros &= vm - (vm / 10) * 10 == 0;
vr_is_trailing_zeros &= last_removed_digit == 0;
last_removed_digit = (vr % 10) as u8;
vr /= 10;
vp /= 10;
vm /= 10;
removed += 1;
}
if vm_is_trailing_zeros {
while vm % 10 == 0 {
vr_is_trailing_zeros &= last_removed_digit == 0;
last_removed_digit = (vr % 10) as u8;
vr /= 10;
vp /= 10;
vm /= 10;
removed += 1;
}
}
if vr_is_trailing_zeros && last_removed_digit == 5 && vr % 2 == 0 {
// Round even if the exact number is.....50..0.
last_removed_digit = 4;
}
// We need to take vr + 1 if vr is outside bounds or we need to round up.
vr + ((vr == vm && (!accept_bounds ||!vm_is_trailing_zeros)) || last_removed_digit >= 5)
as u32
} else | ;
let exp = e10 + removed;
FloatingDecimal32 {
exponent: exp,
mantissa: output,
}
}
| {
// Specialized for the common case (~96.0%). Percentages below are relative to this.
// Loop iterations below (approximately):
// 0: 13.6%, 1: 70.7%, 2: 14.1%, 3: 1.39%, 4: 0.14%, 5+: 0.01%
while vp / 10 > vm / 10 {
last_removed_digit = (vr % 10) as u8;
vr /= 10;
vp /= 10;
vm /= 10;
removed += 1;
}
// We need to take vr + 1 if vr is outside bounds or we need to round up.
vr + (vr == vm || last_removed_digit >= 5) as u32
} | conditional_block |
f2s.rs | // Translated from C to Rust. The original C code can be found at
// https://github.com/ulfjack/ryu and carries the following license:
//
// Copyright 2018 Ulf Adams
//
// The contents of this file may be used under the terms of the Apache License,
// Version 2.0.
//
// (See accompanying file LICENSE-Apache or copy at
// http://www.apache.org/licenses/LICENSE-2.0)
//
// Alternatively, the contents of this file may be used under the terms of
// the Boost Software License, Version 1.0.
// (See accompanying file LICENSE-Boost or copy at
// https://www.boost.org/LICENSE_1_0.txt)
//
// Unless required by applicable law or agreed to in writing, this software
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.
use common::*;
pub const FLOAT_MANTISSA_BITS: u32 = 23;
pub const FLOAT_EXPONENT_BITS: u32 = 8;
const FLOAT_BIAS: i32 = 127;
const FLOAT_POW5_INV_BITCOUNT: i32 = 59;
const FLOAT_POW5_BITCOUNT: i32 = 61;
// This table is generated by PrintFloatLookupTable.
static FLOAT_POW5_INV_SPLIT: [u64; 32] = [
576460752303423489,
461168601842738791,
368934881474191033,
295147905179352826,
472236648286964522,
377789318629571618,
302231454903657294,
483570327845851670,
386856262276681336,
309485009821345069,
495176015714152110,
396140812571321688,
316912650057057351,
507060240091291761,
405648192073033409,
324518553658426727,
519229685853482763,
415383748682786211,
332306998946228969,
531691198313966350,
425352958651173080,
340282366920938464,
544451787073501542,
435561429658801234,
348449143727040987,
557518629963265579,
446014903970612463,
356811923176489971,
570899077082383953,
456719261665907162,
365375409332725730,
1 << 63,
];
static FLOAT_POW5_SPLIT: [u64; 47] = [
1152921504606846976,
1441151880758558720,
1801439850948198400,
2251799813685248000,
1407374883553280000,
1759218604441600000,
2199023255552000000,
1374389534720000000,
1717986918400000000,
2147483648000000000,
1342177280000000000,
1677721600000000000,
2097152000000000000,
1310720000000000000,
1638400000000000000,
2048000000000000000,
1280000000000000000,
1600000000000000000,
2000000000000000000,
1250000000000000000,
1562500000000000000,
1953125000000000000,
1220703125000000000,
1525878906250000000,
1907348632812500000,
1192092895507812500,
1490116119384765625,
1862645149230957031,
1164153218269348144,
1455191522836685180,
1818989403545856475,
2273736754432320594,
1421085471520200371,
1776356839400250464,
2220446049250313080,
1387778780781445675,
1734723475976807094,
2168404344971008868,
1355252715606880542,
1694065894508600678,
2117582368135750847,
1323488980084844279,
1654361225106055349,
2067951531382569187,
1292469707114105741,
1615587133892632177,
2019483917365790221,
];
#[cfg_attr(feature = "no-panic", inline)]
fn pow5_factor(mut value: u32) -> u32 {
let mut count = 0u32;
loop {
debug_assert!(value!= 0);
let q = value / 5;
let r = value % 5;
if r!= 0 {
break;
}
value = q;
count += 1;
}
count
}
// Returns true if value is divisible by 5^p.
#[cfg_attr(feature = "no-panic", inline)]
fn multiple_of_power_of_5(value: u32, p: u32) -> bool {
pow5_factor(value) >= p
}
// Returns true if value is divisible by 2^p.
#[cfg_attr(feature = "no-panic", inline)]
fn multiple_of_power_of_2(value: u32, p: u32) -> bool {
// return __builtin_ctz(value) >= p;
(value & ((1u32 << p) - 1)) == 0
}
// It seems to be slightly faster to avoid uint128_t here, although the
// generated code for uint128_t looks slightly nicer.
#[cfg_attr(feature = "no-panic", inline)]
fn mul_shift(m: u32, factor: u64, shift: i32) -> u32 {
debug_assert!(shift > 32);
// The casts here help MSVC to avoid calls to the __allmul library
// function.
let factor_lo = factor as u32;
let factor_hi = (factor >> 32) as u32;
let bits0 = m as u64 * factor_lo as u64;
let bits1 = m as u64 * factor_hi as u64;
let sum = (bits0 >> 32) + bits1;
let shifted_sum = sum >> (shift - 32);
debug_assert!(shifted_sum <= u32::max_value() as u64);
shifted_sum as u32
}
#[cfg_attr(feature = "no-panic", inline)]
fn | (m: u32, q: u32, j: i32) -> u32 {
debug_assert!(q < FLOAT_POW5_INV_SPLIT.len() as u32);
unsafe { mul_shift(m, *FLOAT_POW5_INV_SPLIT.get_unchecked(q as usize), j) }
}
#[cfg_attr(feature = "no-panic", inline)]
fn mul_pow5_div_pow2(m: u32, i: u32, j: i32) -> u32 {
debug_assert!(i < FLOAT_POW5_SPLIT.len() as u32);
unsafe { mul_shift(m, *FLOAT_POW5_SPLIT.get_unchecked(i as usize), j) }
}
// A floating decimal representing m * 10^e.
pub struct FloatingDecimal32 {
pub mantissa: u32,
// Decimal exponent's range is -45 to 38
// inclusive, and can fit in i16 if needed.
pub exponent: i32,
}
#[cfg_attr(feature = "no-panic", inline)]
pub fn f2d(ieee_mantissa: u32, ieee_exponent: u32) -> FloatingDecimal32 {
let (e2, m2) = if ieee_exponent == 0 {
(
// We subtract 2 so that the bounds computation has 2 additional bits.
1 - FLOAT_BIAS - FLOAT_MANTISSA_BITS as i32 - 2,
ieee_mantissa,
)
} else {
(
ieee_exponent as i32 - FLOAT_BIAS - FLOAT_MANTISSA_BITS as i32 - 2,
(1u32 << FLOAT_MANTISSA_BITS) | ieee_mantissa,
)
};
let even = (m2 & 1) == 0;
let accept_bounds = even;
// Step 2: Determine the interval of valid decimal representations.
let mv = 4 * m2;
let mp = 4 * m2 + 2;
// Implicit bool -> int conversion. True is 1, false is 0.
let mm_shift = (ieee_mantissa!= 0 || ieee_exponent <= 1) as u32;
let mm = 4 * m2 - 1 - mm_shift;
// Step 3: Convert to a decimal power base using 64-bit arithmetic.
let mut vr: u32;
let mut vp: u32;
let mut vm: u32;
let e10: i32;
let mut vm_is_trailing_zeros = false;
let mut vr_is_trailing_zeros = false;
let mut last_removed_digit = 0u8;
if e2 >= 0 {
let q = log10_pow2(e2);
e10 = q as i32;
let k = FLOAT_POW5_INV_BITCOUNT + pow5bits(q as i32) - 1;
let i = -e2 + q as i32 + k;
vr = mul_pow5_inv_div_pow2(mv, q, i);
vp = mul_pow5_inv_div_pow2(mp, q, i);
vm = mul_pow5_inv_div_pow2(mm, q, i);
if q!= 0 && (vp - 1) / 10 <= vm / 10 {
// We need to know one removed digit even if we are not going to loop below. We could use
// q = X - 1 above, except that would require 33 bits for the result, and we've found that
// 32-bit arithmetic is faster even on 64-bit machines.
let l = FLOAT_POW5_INV_BITCOUNT + pow5bits(q as i32 - 1) - 1;
last_removed_digit =
(mul_pow5_inv_div_pow2(mv, q - 1, -e2 + q as i32 - 1 + l) % 10) as u8;
}
if q <= 9 {
// The largest power of 5 that fits in 24 bits is 5^10, but q <= 9 seems to be safe as well.
// Only one of mp, mv, and mm can be a multiple of 5, if any.
if mv % 5 == 0 {
vr_is_trailing_zeros = multiple_of_power_of_5(mv, q);
} else if accept_bounds {
vm_is_trailing_zeros = multiple_of_power_of_5(mm, q);
} else {
vp -= multiple_of_power_of_5(mp, q) as u32;
}
}
} else {
let q = log10_pow5(-e2);
e10 = q as i32 + e2;
let i = -e2 - q as i32;
let k = pow5bits(i) - FLOAT_POW5_BITCOUNT;
let mut j = q as i32 - k;
vr = mul_pow5_div_pow2(mv, i as u32, j);
vp = mul_pow5_div_pow2(mp, i as u32, j);
vm = mul_pow5_div_pow2(mm, i as u32, j);
if q!= 0 && (vp - 1) / 10 <= vm / 10 {
j = q as i32 - 1 - (pow5bits(i + 1) - FLOAT_POW5_BITCOUNT);
last_removed_digit = (mul_pow5_div_pow2(mv, (i + 1) as u32, j) % 10) as u8;
}
if q <= 1 {
// {vr,vp,vm} is trailing zeros if {mv,mp,mm} has at least q trailing 0 bits.
// mv = 4 * m2, so it always has at least two trailing 0 bits.
vr_is_trailing_zeros = true;
if accept_bounds {
// mm = mv - 1 - mm_shift, so it has 1 trailing 0 bit iff mm_shift == 1.
vm_is_trailing_zeros = mm_shift == 1;
} else {
// mp = mv + 2, so it always has at least one trailing 0 bit.
vp -= 1;
}
} else if q < 31 {
// TODO(ulfjack): Use a tighter bound here.
vr_is_trailing_zeros = multiple_of_power_of_2(mv, q - 1);
}
}
// Step 4: Find the shortest decimal representation in the interval of valid representations.
let mut removed = 0i32;
let output = if vm_is_trailing_zeros || vr_is_trailing_zeros {
// General case, which happens rarely (~4.0%).
while vp / 10 > vm / 10 {
vm_is_trailing_zeros &= vm - (vm / 10) * 10 == 0;
vr_is_trailing_zeros &= last_removed_digit == 0;
last_removed_digit = (vr % 10) as u8;
vr /= 10;
vp /= 10;
vm /= 10;
removed += 1;
}
if vm_is_trailing_zeros {
while vm % 10 == 0 {
vr_is_trailing_zeros &= last_removed_digit == 0;
last_removed_digit = (vr % 10) as u8;
vr /= 10;
vp /= 10;
vm /= 10;
removed += 1;
}
}
if vr_is_trailing_zeros && last_removed_digit == 5 && vr % 2 == 0 {
// Round even if the exact number is.....50..0.
last_removed_digit = 4;
}
// We need to take vr + 1 if vr is outside bounds or we need to round up.
vr + ((vr == vm && (!accept_bounds ||!vm_is_trailing_zeros)) || last_removed_digit >= 5)
as u32
} else {
// Specialized for the common case (~96.0%). Percentages below are relative to this.
// Loop iterations below (approximately):
// 0: 13.6%, 1: 70.7%, 2: 14.1%, 3: 1.39%, 4: 0.14%, 5+: 0.01%
while vp / 10 > vm / 10 {
last_removed_digit = (vr % 10) as u8;
vr /= 10;
vp /= 10;
vm /= 10;
removed += 1;
}
// We need to take vr + 1 if vr is outside bounds or we need to round up.
vr + (vr == vm || last_removed_digit >= 5) as u32
};
let exp = e10 + removed;
FloatingDecimal32 {
exponent: exp,
mantissa: output,
}
}
| mul_pow5_inv_div_pow2 | identifier_name |
f2s.rs | // Translated from C to Rust. The original C code can be found at
// https://github.com/ulfjack/ryu and carries the following license:
//
// Copyright 2018 Ulf Adams
//
// The contents of this file may be used under the terms of the Apache License,
// Version 2.0.
//
// (See accompanying file LICENSE-Apache or copy at
// http://www.apache.org/licenses/LICENSE-2.0)
//
// Alternatively, the contents of this file may be used under the terms of
// the Boost Software License, Version 1.0.
// (See accompanying file LICENSE-Boost or copy at
// https://www.boost.org/LICENSE_1_0.txt)
//
// Unless required by applicable law or agreed to in writing, this software
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.
use common::*;
pub const FLOAT_MANTISSA_BITS: u32 = 23;
pub const FLOAT_EXPONENT_BITS: u32 = 8;
const FLOAT_BIAS: i32 = 127;
const FLOAT_POW5_INV_BITCOUNT: i32 = 59;
const FLOAT_POW5_BITCOUNT: i32 = 61;
// This table is generated by PrintFloatLookupTable.
static FLOAT_POW5_INV_SPLIT: [u64; 32] = [
576460752303423489,
461168601842738791,
368934881474191033,
295147905179352826,
472236648286964522,
377789318629571618,
302231454903657294,
483570327845851670,
386856262276681336,
309485009821345069,
495176015714152110,
396140812571321688,
316912650057057351,
507060240091291761,
405648192073033409,
324518553658426727,
519229685853482763,
415383748682786211,
332306998946228969,
531691198313966350,
425352958651173080,
340282366920938464,
544451787073501542,
435561429658801234,
348449143727040987,
557518629963265579,
446014903970612463,
356811923176489971,
570899077082383953,
456719261665907162,
365375409332725730,
1 << 63,
];
static FLOAT_POW5_SPLIT: [u64; 47] = [
1152921504606846976,
1441151880758558720,
1801439850948198400,
2251799813685248000,
1407374883553280000,
1759218604441600000,
2199023255552000000,
1374389534720000000,
1717986918400000000,
2147483648000000000,
1342177280000000000,
1677721600000000000,
2097152000000000000,
1310720000000000000,
1638400000000000000,
2048000000000000000,
1280000000000000000,
1600000000000000000,
2000000000000000000,
1250000000000000000,
1562500000000000000,
1953125000000000000,
1220703125000000000,
1525878906250000000,
1907348632812500000,
1192092895507812500,
1490116119384765625,
1862645149230957031,
1164153218269348144,
1455191522836685180,
1818989403545856475,
2273736754432320594,
1421085471520200371,
1776356839400250464,
2220446049250313080,
1387778780781445675,
1734723475976807094,
2168404344971008868,
1355252715606880542,
1694065894508600678,
2117582368135750847,
1323488980084844279,
1654361225106055349,
2067951531382569187,
1292469707114105741,
1615587133892632177,
2019483917365790221,
];
#[cfg_attr(feature = "no-panic", inline)]
fn pow5_factor(mut value: u32) -> u32 {
let mut count = 0u32;
loop {
debug_assert!(value!= 0);
let q = value / 5;
let r = value % 5;
if r!= 0 {
break;
}
value = q;
count += 1; |
// Returns true if value is divisible by 5^p.
#[cfg_attr(feature = "no-panic", inline)]
fn multiple_of_power_of_5(value: u32, p: u32) -> bool {
pow5_factor(value) >= p
}
// Returns true if value is divisible by 2^p.
#[cfg_attr(feature = "no-panic", inline)]
fn multiple_of_power_of_2(value: u32, p: u32) -> bool {
// return __builtin_ctz(value) >= p;
(value & ((1u32 << p) - 1)) == 0
}
// It seems to be slightly faster to avoid uint128_t here, although the
// generated code for uint128_t looks slightly nicer.
#[cfg_attr(feature = "no-panic", inline)]
fn mul_shift(m: u32, factor: u64, shift: i32) -> u32 {
debug_assert!(shift > 32);
// The casts here help MSVC to avoid calls to the __allmul library
// function.
let factor_lo = factor as u32;
let factor_hi = (factor >> 32) as u32;
let bits0 = m as u64 * factor_lo as u64;
let bits1 = m as u64 * factor_hi as u64;
let sum = (bits0 >> 32) + bits1;
let shifted_sum = sum >> (shift - 32);
debug_assert!(shifted_sum <= u32::max_value() as u64);
shifted_sum as u32
}
#[cfg_attr(feature = "no-panic", inline)]
fn mul_pow5_inv_div_pow2(m: u32, q: u32, j: i32) -> u32 {
debug_assert!(q < FLOAT_POW5_INV_SPLIT.len() as u32);
unsafe { mul_shift(m, *FLOAT_POW5_INV_SPLIT.get_unchecked(q as usize), j) }
}
#[cfg_attr(feature = "no-panic", inline)]
fn mul_pow5_div_pow2(m: u32, i: u32, j: i32) -> u32 {
debug_assert!(i < FLOAT_POW5_SPLIT.len() as u32);
unsafe { mul_shift(m, *FLOAT_POW5_SPLIT.get_unchecked(i as usize), j) }
}
// A floating decimal representing m * 10^e.
pub struct FloatingDecimal32 {
pub mantissa: u32,
// Decimal exponent's range is -45 to 38
// inclusive, and can fit in i16 if needed.
pub exponent: i32,
}
#[cfg_attr(feature = "no-panic", inline)]
pub fn f2d(ieee_mantissa: u32, ieee_exponent: u32) -> FloatingDecimal32 {
let (e2, m2) = if ieee_exponent == 0 {
(
// We subtract 2 so that the bounds computation has 2 additional bits.
1 - FLOAT_BIAS - FLOAT_MANTISSA_BITS as i32 - 2,
ieee_mantissa,
)
} else {
(
ieee_exponent as i32 - FLOAT_BIAS - FLOAT_MANTISSA_BITS as i32 - 2,
(1u32 << FLOAT_MANTISSA_BITS) | ieee_mantissa,
)
};
let even = (m2 & 1) == 0;
let accept_bounds = even;
// Step 2: Determine the interval of valid decimal representations.
let mv = 4 * m2;
let mp = 4 * m2 + 2;
// Implicit bool -> int conversion. True is 1, false is 0.
let mm_shift = (ieee_mantissa!= 0 || ieee_exponent <= 1) as u32;
let mm = 4 * m2 - 1 - mm_shift;
// Step 3: Convert to a decimal power base using 64-bit arithmetic.
let mut vr: u32;
let mut vp: u32;
let mut vm: u32;
let e10: i32;
let mut vm_is_trailing_zeros = false;
let mut vr_is_trailing_zeros = false;
let mut last_removed_digit = 0u8;
if e2 >= 0 {
let q = log10_pow2(e2);
e10 = q as i32;
let k = FLOAT_POW5_INV_BITCOUNT + pow5bits(q as i32) - 1;
let i = -e2 + q as i32 + k;
vr = mul_pow5_inv_div_pow2(mv, q, i);
vp = mul_pow5_inv_div_pow2(mp, q, i);
vm = mul_pow5_inv_div_pow2(mm, q, i);
if q!= 0 && (vp - 1) / 10 <= vm / 10 {
// We need to know one removed digit even if we are not going to loop below. We could use
// q = X - 1 above, except that would require 33 bits for the result, and we've found that
// 32-bit arithmetic is faster even on 64-bit machines.
let l = FLOAT_POW5_INV_BITCOUNT + pow5bits(q as i32 - 1) - 1;
last_removed_digit =
(mul_pow5_inv_div_pow2(mv, q - 1, -e2 + q as i32 - 1 + l) % 10) as u8;
}
if q <= 9 {
// The largest power of 5 that fits in 24 bits is 5^10, but q <= 9 seems to be safe as well.
// Only one of mp, mv, and mm can be a multiple of 5, if any.
if mv % 5 == 0 {
vr_is_trailing_zeros = multiple_of_power_of_5(mv, q);
} else if accept_bounds {
vm_is_trailing_zeros = multiple_of_power_of_5(mm, q);
} else {
vp -= multiple_of_power_of_5(mp, q) as u32;
}
}
} else {
let q = log10_pow5(-e2);
e10 = q as i32 + e2;
let i = -e2 - q as i32;
let k = pow5bits(i) - FLOAT_POW5_BITCOUNT;
let mut j = q as i32 - k;
vr = mul_pow5_div_pow2(mv, i as u32, j);
vp = mul_pow5_div_pow2(mp, i as u32, j);
vm = mul_pow5_div_pow2(mm, i as u32, j);
if q!= 0 && (vp - 1) / 10 <= vm / 10 {
j = q as i32 - 1 - (pow5bits(i + 1) - FLOAT_POW5_BITCOUNT);
last_removed_digit = (mul_pow5_div_pow2(mv, (i + 1) as u32, j) % 10) as u8;
}
if q <= 1 {
// {vr,vp,vm} is trailing zeros if {mv,mp,mm} has at least q trailing 0 bits.
// mv = 4 * m2, so it always has at least two trailing 0 bits.
vr_is_trailing_zeros = true;
if accept_bounds {
// mm = mv - 1 - mm_shift, so it has 1 trailing 0 bit iff mm_shift == 1.
vm_is_trailing_zeros = mm_shift == 1;
} else {
// mp = mv + 2, so it always has at least one trailing 0 bit.
vp -= 1;
}
} else if q < 31 {
// TODO(ulfjack): Use a tighter bound here.
vr_is_trailing_zeros = multiple_of_power_of_2(mv, q - 1);
}
}
// Step 4: Find the shortest decimal representation in the interval of valid representations.
let mut removed = 0i32;
let output = if vm_is_trailing_zeros || vr_is_trailing_zeros {
// General case, which happens rarely (~4.0%).
while vp / 10 > vm / 10 {
vm_is_trailing_zeros &= vm - (vm / 10) * 10 == 0;
vr_is_trailing_zeros &= last_removed_digit == 0;
last_removed_digit = (vr % 10) as u8;
vr /= 10;
vp /= 10;
vm /= 10;
removed += 1;
}
if vm_is_trailing_zeros {
while vm % 10 == 0 {
vr_is_trailing_zeros &= last_removed_digit == 0;
last_removed_digit = (vr % 10) as u8;
vr /= 10;
vp /= 10;
vm /= 10;
removed += 1;
}
}
if vr_is_trailing_zeros && last_removed_digit == 5 && vr % 2 == 0 {
// Round even if the exact number is.....50..0.
last_removed_digit = 4;
}
// We need to take vr + 1 if vr is outside bounds or we need to round up.
vr + ((vr == vm && (!accept_bounds ||!vm_is_trailing_zeros)) || last_removed_digit >= 5)
as u32
} else {
// Specialized for the common case (~96.0%). Percentages below are relative to this.
// Loop iterations below (approximately):
// 0: 13.6%, 1: 70.7%, 2: 14.1%, 3: 1.39%, 4: 0.14%, 5+: 0.01%
while vp / 10 > vm / 10 {
last_removed_digit = (vr % 10) as u8;
vr /= 10;
vp /= 10;
vm /= 10;
removed += 1;
}
// We need to take vr + 1 if vr is outside bounds or we need to round up.
vr + (vr == vm || last_removed_digit >= 5) as u32
};
let exp = e10 + removed;
FloatingDecimal32 {
exponent: exp,
mantissa: output,
}
} | }
count
} | random_line_split |
mailbox.rs | use std::future::Future;
use anyhow::Result;
use wasmtime::{Caller, FuncType, Linker, Trap, ValType};
use crate::{
api::{error::IntoTrap, get_memory},
message::Message,
state::ProcessState,
};
use super::{link_async2_if_match, link_if_match};
// Register the mailbox APIs to the linker
pub(crate) fn register(
linker: &mut Linker<ProcessState>,
namespace_filter: &[String],
) -> Result<()> {
link_if_match(
linker,
"lunatic::message",
"create",
FuncType::new([], []),
create,
namespace_filter,
)?;
link_if_match(
linker,
"lunatic::message",
"set_buffer",
FuncType::new([ValType::I32, ValType::I32], []),
set_buffer,
namespace_filter,
)?;
link_if_match(
linker,
"lunatic::message",
"add_process",
FuncType::new([ValType::I64], [ValType::I64]),
add_process,
namespace_filter,
)?;
link_if_match(
linker,
"lunatic::message",
"add_tcp_stream",
FuncType::new([ValType::I64], [ValType::I64]),
add_tcp_stream,
namespace_filter,
)?;
link_if_match(
linker,
"lunatic::message",
"send",
FuncType::new([ValType::I64], [ValType::I32]),
send,
namespace_filter,
)?;
link_async2_if_match(
linker,
"lunatic::message",
"prepare_receive",
FuncType::new([ValType::I32, ValType::I32], [ValType::I32]),
prepare_receive,
namespace_filter,
)?;
link_if_match(
linker,
"lunatic::message",
"receive",
FuncType::new([ValType::I32, ValType::I32], []),
receive,
namespace_filter,
)?;
Ok(())
}
//% lunatic::message
//%
//% A lunatic message consists of 2 parts:
//% * A buffer of raw data
//% * An array of resource IDs
//%
//% If resources are sent between processes, their ID changes. The resource ID can for example
//% be already taken in the receiving process. So we need a way to communicate the new ID on the
//% receiving end.
//%
//% When the `create()` function is called an empty message is allocated and both parts can be
//% modified before it's sent to another process. If a new resource is added to the message, the
//% index inside of the array is returned. This information can be now serialized inside the raw
//% data buffer in some way. E.g. You are serializing a structure like this:
//%
//% struct A {
//% a: String,
//% b: Process,
//% c: i32,
//% d: TcpStream
//% }
//%
//% Into something like this:
//%
//% ["Some string" | [resource 0] | i32 value | [resource 1] ]
//%
//% [resource 0] & [resource 1] are just encoded as 0 and 1 u64 values, representing their order
//% in the resource array.
//%
//% It's common to use some serialization library that will encode a mixture of raw data and
//% resource indexes into the data buffer.
//%
//% On the receiving side, first the `prepare_receive()` function must be called to receive info
//% on how big the buffer and resource arrays are, so that enough space can be allocated inside
//% the guest.
//%
//% The `receive()` function will do 2 things:
//% * Write the buffer of raw data to the specified location
//% * Give all resources to the new process (with new IDs) and write the IDs to the specified
//% location in the same order they were added.
//% Now the information from the buffer (with resource indexes) can be used to deserialize the
//% received message into the same structure.
//%
//% This can be a bit confusing, because resources are just IDs (u64 values) themself. But we
//% still need to serialize them into different u64 values. Resources are inherently bound to a
//% process and you can't access another resource just by guessing an ID from another process.
//% The process of sending them around needs to be explicit.
//%
//% This API was designed around the idea that most guest languages will use some serialization
//% library and turning resources into indexes is a way of serializing. The same is true for
//% deserializing them on the receiving side, when an index needs to be turned into an actual
//% resource ID.
//% lunatic::message::create()
//%
//% Creates a new message. This message is intended to be modified by other functions in this
//% namespace. Once `lunatic::message::send` is called it will be sent to another process.
fn create(mut caller: Caller<ProcessState>) {
caller.data_mut().message = Some(Message::default());
}
//% lunatic::message::set_buffer(
//% data_ptr: i32,
//% data_len: i32,
//% )
//%
//% Sets the data for the next message.
//%
//% Traps:
//% * If **data_ptr + data_len** is outside the memory.
//% * If it's called before the next message is created.
fn set_buffer(mut caller: Caller<ProcessState>, data_ptr: u32, data_len: u32) -> Result<(), Trap> {
let mut buffer = vec![0; data_len as usize];
let memory = get_memory(&mut caller)?;
memory
.read(&caller, data_ptr as usize, buffer.as_mut_slice())
.or_trap("lunatic::message::set_buffer")?;
let message = caller
.data_mut()
.message
.as_mut()
.or_trap("lunatic::message::set_buffer")?;
match message {
Message::Data(data) => data.set_buffer(buffer),
Message::Signal => return Err(Trap::new("Unexpected `Message::Signal` in scratch buffer")),
};
Ok(())
}
//% lunatic::message::add_process(process_id: u64) -> u64
//%
//% Adds a process resource to the next message and returns the location in the array the process
//% was added to. This will remove the process handle from the current process' resources.
//%
//% Traps:
//% * If process ID doesn't exist
//% * If it's called before the next message is created.
fn add_process(mut caller: Caller<ProcessState>, process_id: u64) -> Result<u64, Trap> {
let process = caller
.data_mut()
.resources
.processes
.remove(process_id)
.or_trap("lunatic::message::add_process")?;
let message = caller
.data_mut()
.message
.as_mut()
.or_trap("lunatic::message::add_process")?;
let pid = match message {
Message::Data(data) => data.add_process(process) as u64,
Message::Signal => return Err(Trap::new("Unexpected `Message::Signal` in scratch buffer")),
};
Ok(pid)
}
//% lunatic::message::add_tcp_stream(stream_id: u64) -> u64
//%
//% Adds a TCP stream resource to the next message and returns the location in the array the TCP
//% stream was added to. This will remove the TCP stream from the current process' resources.
//%
//% Traps:
//% * If TCP stream ID doesn't exist
//% * If it's called before the next message is created.
fn add_tcp_stream(mut caller: Caller<ProcessState>, stream_id: u64) -> Result<u64, Trap> {
let stream = caller
.data_mut()
.resources
.tcp_streams
.remove(stream_id)
.or_trap("lunatic::message::add_tcp_stream")?;
let message = caller
.data_mut()
.message
.as_mut()
.or_trap("lunatic::message::add_tcp_stream")?;
let stream_id = match message {
Message::Data(data) => data.add_tcp_stream(stream) as u64,
Message::Signal => return Err(Trap::new("Unexpected `Message::Signal` in scratch buffer")),
};
Ok(stream_id)
}
//% lunatic::message::send(
//% process_id: i64,
//% ) -> i32
//%
//% Returns:
//% * 0 on success
//% * 1 on error - Process can't receive messages (finished).
//%
//% Sends the message to a process.
//%
//% Traps:
//% * If the process ID doesn't exist.
//% * If it's called before a creating the next message.
fn send(mut caller: Caller<ProcessState>, process_id: u64) -> Result<u32, Trap> |
//% lunatic::message::prepare_receive(i32_data_size_ptr: i32, i32_res_size_ptr: i32) -> i32
//%
//% Returns:
//% * 0 if it's a regular message.
//% * 1 if it's a signal turned into a message.
//%
//% For regular messages both parameters are used.
//% * **i32_data_size_ptr** - Location to write the message buffer size to as.
//% * **i32_res_size_ptr** - Location to write the number of resources to.
//%
//% This function should be called before `lunatic::message::receive` to let the guest know how
//% much memory space needs to be reserved for the next message. The data size is in **bytes**,
//% the resources size is the number of resources and each resource is a u64 value. Because of
//% this the guest needs to reserve `64 * resource size` bytes for the resource buffer.
//%
//% Traps:
//% * If **size_ptr** is outside the memory.
fn prepare_receive(
mut caller: Caller<ProcessState>,
data_size_ptr: u32,
res_size_ptr: u32,
) -> Box<dyn Future<Output = Result<u32, Trap>> + Send + '_> {
Box::new(async move {
let message = caller
.data_mut()
.mailbox
.recv()
.await
.expect("a process always hold onto its sender and this can't be None");
let result = match &message {
Message::Data(message) => {
let message_buffer_size = message.buffer_size() as u32;
let message_resources_size = message.resources_size() as u32;
let memory = get_memory(&mut caller)?;
memory
.write(
&mut caller,
data_size_ptr as usize,
&message_buffer_size.to_le_bytes(),
)
.or_trap("lunatic::message::prepare_receive")?;
memory
.write(
&mut caller,
res_size_ptr as usize,
&message_resources_size.to_le_bytes(),
)
.or_trap("lunatic::message::prepare_receive")?;
0
}
Message::Signal => 1,
};
// Put the message into the scratch area
caller.data_mut().message = Some(message);
Ok(result)
})
}
//% lunatic::message::receive(data_ptr: i32, resource_ptr: i32)
//%
//% * **data_ptr** - Pointer to write the data to.
//% * **resource_ptr** - Pointer to an array of i64 values, where each value represents the
//% resource id inside the new process. Resources are in the same order they
//% were added.
//%
//% Writes the message that was prepared with `lunatic::message::prepare_receive` to the guest. It
//% should only be called if `prepare_receive` returned 0, otherwise it will trap. Signal message
//% don't cary any additional information and everything we need was returned by `prepare_receive`.
//%
//% Traps:
//% * If `lunatic::message::prepare_receive` was not called before.
//% * If **data_ptr + size of the message** is outside the memory.
//% * If **resource_ptr + size of the resources** is outside the memory.
fn receive(mut caller: Caller<ProcessState>, data_ptr: u32, resource_ptr: u32) -> Result<(), Trap> {
let last_message = caller
.data_mut()
.message
.take()
.or_trap("lunatic::message::receive")?;
match last_message {
Message::Data(last_message) => {
let memory = get_memory(&mut caller)?;
memory
.write(&mut caller, data_ptr as usize, last_message.buffer())
.or_trap("lunatic::message::receive")?;
let resources: Vec<u8> = last_message
.resources()
.into_iter()
.map(|resource| match resource {
crate::message::Resource::Process(process_handle) => {
u64::to_le_bytes(caller.data_mut().resources.processes.add(process_handle))
}
crate::message::Resource::TcpStream(tcp_stream) => {
u64::to_le_bytes(caller.data_mut().resources.tcp_streams.add(tcp_stream))
}
})
.flatten()
.collect();
memory
.write(&mut caller, resource_ptr as usize, &resources)
.or_trap("lunatic::message::receive")?;
Ok(())
}
Message::Signal => Err(Trap::new("`lunatic::message::receive` called on a signal")),
}
}
| {
let message = caller
.data_mut()
.message
.take()
.or_trap("lunatic::message::send")?;
let process = caller
.data()
.resources
.processes
.get(process_id)
.or_trap("lunatic::message::send")?;
let result = match process.send_message(message) {
Ok(()) => 0,
Err(_error) => 1,
};
Ok(result)
} | identifier_body |
mailbox.rs | use std::future::Future;
use anyhow::Result;
use wasmtime::{Caller, FuncType, Linker, Trap, ValType};
use crate::{
api::{error::IntoTrap, get_memory},
message::Message,
state::ProcessState,
};
use super::{link_async2_if_match, link_if_match};
// Register the mailbox APIs to the linker
pub(crate) fn register(
linker: &mut Linker<ProcessState>,
namespace_filter: &[String],
) -> Result<()> {
link_if_match(
linker,
"lunatic::message",
"create",
FuncType::new([], []),
create,
namespace_filter,
)?;
link_if_match(
linker,
"lunatic::message",
"set_buffer",
FuncType::new([ValType::I32, ValType::I32], []),
set_buffer,
namespace_filter,
)?;
link_if_match(
linker,
"lunatic::message",
"add_process",
FuncType::new([ValType::I64], [ValType::I64]),
add_process,
namespace_filter,
)?;
link_if_match(
linker,
"lunatic::message",
"add_tcp_stream",
FuncType::new([ValType::I64], [ValType::I64]),
add_tcp_stream,
namespace_filter,
)?;
link_if_match(
linker,
"lunatic::message",
"send",
FuncType::new([ValType::I64], [ValType::I32]),
send,
namespace_filter,
)?;
link_async2_if_match(
linker,
"lunatic::message",
"prepare_receive",
FuncType::new([ValType::I32, ValType::I32], [ValType::I32]),
prepare_receive,
namespace_filter,
)?;
link_if_match(
linker,
"lunatic::message",
"receive",
FuncType::new([ValType::I32, ValType::I32], []),
receive,
namespace_filter,
)?;
Ok(())
}
//% lunatic::message
//%
//% A lunatic message consists of 2 parts:
//% * A buffer of raw data
//% * An array of resource IDs
//%
//% If resources are sent between processes, their ID changes. The resource ID can for example
//% be already taken in the receiving process. So we need a way to communicate the new ID on the
//% receiving end.
//%
//% When the `create()` function is called an empty message is allocated and both parts can be
//% modified before it's sent to another process. If a new resource is added to the message, the
//% index inside of the array is returned. This information can be now serialized inside the raw
//% data buffer in some way. E.g. You are serializing a structure like this:
//%
//% struct A {
//% a: String,
//% b: Process,
//% c: i32,
//% d: TcpStream
//% }
//%
//% Into something like this:
//%
//% ["Some string" | [resource 0] | i32 value | [resource 1] ]
//%
//% [resource 0] & [resource 1] are just encoded as 0 and 1 u64 values, representing their order
//% in the resource array.
//%
//% It's common to use some serialization library that will encode a mixture of raw data and
//% resource indexes into the data buffer.
//%
//% On the receiving side, first the `prepare_receive()` function must be called to receive info
//% on how big the buffer and resource arrays are, so that enough space can be allocated inside
//% the guest.
//%
//% The `receive()` function will do 2 things:
//% * Write the buffer of raw data to the specified location
//% * Give all resources to the new process (with new IDs) and write the IDs to the specified
//% location in the same order they were added.
//% Now the information from the buffer (with resource indexes) can be used to deserialize the | //% The process of sending them around needs to be explicit.
//%
//% This API was designed around the idea that most guest languages will use some serialization
//% library and turning resources into indexes is a way of serializing. The same is true for
//% deserializing them on the receiving side, when an index needs to be turned into an actual
//% resource ID.
//% lunatic::message::create()
//%
//% Creates a new message. This message is intended to be modified by other functions in this
//% namespace. Once `lunatic::message::send` is called it will be sent to another process.
fn create(mut caller: Caller<ProcessState>) {
caller.data_mut().message = Some(Message::default());
}
//% lunatic::message::set_buffer(
//% data_ptr: i32,
//% data_len: i32,
//% )
//%
//% Sets the data for the next message.
//%
//% Traps:
//% * If **data_ptr + data_len** is outside the memory.
//% * If it's called before the next message is created.
fn set_buffer(mut caller: Caller<ProcessState>, data_ptr: u32, data_len: u32) -> Result<(), Trap> {
let mut buffer = vec![0; data_len as usize];
let memory = get_memory(&mut caller)?;
memory
.read(&caller, data_ptr as usize, buffer.as_mut_slice())
.or_trap("lunatic::message::set_buffer")?;
let message = caller
.data_mut()
.message
.as_mut()
.or_trap("lunatic::message::set_buffer")?;
match message {
Message::Data(data) => data.set_buffer(buffer),
Message::Signal => return Err(Trap::new("Unexpected `Message::Signal` in scratch buffer")),
};
Ok(())
}
//% lunatic::message::add_process(process_id: u64) -> u64
//%
//% Adds a process resource to the next message and returns the location in the array the process
//% was added to. This will remove the process handle from the current process' resources.
//%
//% Traps:
//% * If process ID doesn't exist
//% * If it's called before the next message is created.
fn add_process(mut caller: Caller<ProcessState>, process_id: u64) -> Result<u64, Trap> {
let process = caller
.data_mut()
.resources
.processes
.remove(process_id)
.or_trap("lunatic::message::add_process")?;
let message = caller
.data_mut()
.message
.as_mut()
.or_trap("lunatic::message::add_process")?;
let pid = match message {
Message::Data(data) => data.add_process(process) as u64,
Message::Signal => return Err(Trap::new("Unexpected `Message::Signal` in scratch buffer")),
};
Ok(pid)
}
//% lunatic::message::add_tcp_stream(stream_id: u64) -> u64
//%
//% Adds a TCP stream resource to the next message and returns the location in the array the TCP
//% stream was added to. This will remove the TCP stream from the current process' resources.
//%
//% Traps:
//% * If TCP stream ID doesn't exist
//% * If it's called before the next message is created.
fn add_tcp_stream(mut caller: Caller<ProcessState>, stream_id: u64) -> Result<u64, Trap> {
let stream = caller
.data_mut()
.resources
.tcp_streams
.remove(stream_id)
.or_trap("lunatic::message::add_tcp_stream")?;
let message = caller
.data_mut()
.message
.as_mut()
.or_trap("lunatic::message::add_tcp_stream")?;
let stream_id = match message {
Message::Data(data) => data.add_tcp_stream(stream) as u64,
Message::Signal => return Err(Trap::new("Unexpected `Message::Signal` in scratch buffer")),
};
Ok(stream_id)
}
//% lunatic::message::send(
//% process_id: i64,
//% ) -> i32
//%
//% Returns:
//% * 0 on success
//% * 1 on error - Process can't receive messages (finished).
//%
//% Sends the message to a process.
//%
//% Traps:
//% * If the process ID doesn't exist.
//% * If it's called before a creating the next message.
fn send(mut caller: Caller<ProcessState>, process_id: u64) -> Result<u32, Trap> {
let message = caller
.data_mut()
.message
.take()
.or_trap("lunatic::message::send")?;
let process = caller
.data()
.resources
.processes
.get(process_id)
.or_trap("lunatic::message::send")?;
let result = match process.send_message(message) {
Ok(()) => 0,
Err(_error) => 1,
};
Ok(result)
}
//% lunatic::message::prepare_receive(i32_data_size_ptr: i32, i32_res_size_ptr: i32) -> i32
//%
//% Returns:
//% * 0 if it's a regular message.
//% * 1 if it's a signal turned into a message.
//%
//% For regular messages both parameters are used.
//% * **i32_data_size_ptr** - Location to write the message buffer size to as.
//% * **i32_res_size_ptr** - Location to write the number of resources to.
//%
//% This function should be called before `lunatic::message::receive` to let the guest know how
//% much memory space needs to be reserved for the next message. The data size is in **bytes**,
//% the resources size is the number of resources and each resource is a u64 value. Because of
//% this the guest needs to reserve `64 * resource size` bytes for the resource buffer.
//%
//% Traps:
//% * If **size_ptr** is outside the memory.
fn prepare_receive(
mut caller: Caller<ProcessState>,
data_size_ptr: u32,
res_size_ptr: u32,
) -> Box<dyn Future<Output = Result<u32, Trap>> + Send + '_> {
Box::new(async move {
let message = caller
.data_mut()
.mailbox
.recv()
.await
.expect("a process always hold onto its sender and this can't be None");
let result = match &message {
Message::Data(message) => {
let message_buffer_size = message.buffer_size() as u32;
let message_resources_size = message.resources_size() as u32;
let memory = get_memory(&mut caller)?;
memory
.write(
&mut caller,
data_size_ptr as usize,
&message_buffer_size.to_le_bytes(),
)
.or_trap("lunatic::message::prepare_receive")?;
memory
.write(
&mut caller,
res_size_ptr as usize,
&message_resources_size.to_le_bytes(),
)
.or_trap("lunatic::message::prepare_receive")?;
0
}
Message::Signal => 1,
};
// Put the message into the scratch area
caller.data_mut().message = Some(message);
Ok(result)
})
}
//% lunatic::message::receive(data_ptr: i32, resource_ptr: i32)
//%
//% * **data_ptr** - Pointer to write the data to.
//% * **resource_ptr** - Pointer to an array of i64 values, where each value represents the
//% resource id inside the new process. Resources are in the same order they
//% were added.
//%
//% Writes the message that was prepared with `lunatic::message::prepare_receive` to the guest. It
//% should only be called if `prepare_receive` returned 0, otherwise it will trap. Signal message
//% don't cary any additional information and everything we need was returned by `prepare_receive`.
//%
//% Traps:
//% * If `lunatic::message::prepare_receive` was not called before.
//% * If **data_ptr + size of the message** is outside the memory.
//% * If **resource_ptr + size of the resources** is outside the memory.
fn receive(mut caller: Caller<ProcessState>, data_ptr: u32, resource_ptr: u32) -> Result<(), Trap> {
let last_message = caller
.data_mut()
.message
.take()
.or_trap("lunatic::message::receive")?;
match last_message {
Message::Data(last_message) => {
let memory = get_memory(&mut caller)?;
memory
.write(&mut caller, data_ptr as usize, last_message.buffer())
.or_trap("lunatic::message::receive")?;
let resources: Vec<u8> = last_message
.resources()
.into_iter()
.map(|resource| match resource {
crate::message::Resource::Process(process_handle) => {
u64::to_le_bytes(caller.data_mut().resources.processes.add(process_handle))
}
crate::message::Resource::TcpStream(tcp_stream) => {
u64::to_le_bytes(caller.data_mut().resources.tcp_streams.add(tcp_stream))
}
})
.flatten()
.collect();
memory
.write(&mut caller, resource_ptr as usize, &resources)
.or_trap("lunatic::message::receive")?;
Ok(())
}
Message::Signal => Err(Trap::new("`lunatic::message::receive` called on a signal")),
}
} | //% received message into the same structure.
//%
//% This can be a bit confusing, because resources are just IDs (u64 values) themself. But we
//% still need to serialize them into different u64 values. Resources are inherently bound to a
//% process and you can't access another resource just by guessing an ID from another process. | random_line_split |
mailbox.rs | use std::future::Future;
use anyhow::Result;
use wasmtime::{Caller, FuncType, Linker, Trap, ValType};
use crate::{
api::{error::IntoTrap, get_memory},
message::Message,
state::ProcessState,
};
use super::{link_async2_if_match, link_if_match};
// Register the mailbox APIs to the linker
pub(crate) fn register(
linker: &mut Linker<ProcessState>,
namespace_filter: &[String],
) -> Result<()> {
link_if_match(
linker,
"lunatic::message",
"create",
FuncType::new([], []),
create,
namespace_filter,
)?;
link_if_match(
linker,
"lunatic::message",
"set_buffer",
FuncType::new([ValType::I32, ValType::I32], []),
set_buffer,
namespace_filter,
)?;
link_if_match(
linker,
"lunatic::message",
"add_process",
FuncType::new([ValType::I64], [ValType::I64]),
add_process,
namespace_filter,
)?;
link_if_match(
linker,
"lunatic::message",
"add_tcp_stream",
FuncType::new([ValType::I64], [ValType::I64]),
add_tcp_stream,
namespace_filter,
)?;
link_if_match(
linker,
"lunatic::message",
"send",
FuncType::new([ValType::I64], [ValType::I32]),
send,
namespace_filter,
)?;
link_async2_if_match(
linker,
"lunatic::message",
"prepare_receive",
FuncType::new([ValType::I32, ValType::I32], [ValType::I32]),
prepare_receive,
namespace_filter,
)?;
link_if_match(
linker,
"lunatic::message",
"receive",
FuncType::new([ValType::I32, ValType::I32], []),
receive,
namespace_filter,
)?;
Ok(())
}
//% lunatic::message
//%
//% A lunatic message consists of 2 parts:
//% * A buffer of raw data
//% * An array of resource IDs
//%
//% If resources are sent between processes, their ID changes. The resource ID can for example
//% be already taken in the receiving process. So we need a way to communicate the new ID on the
//% receiving end.
//%
//% When the `create()` function is called an empty message is allocated and both parts can be
//% modified before it's sent to another process. If a new resource is added to the message, the
//% index inside of the array is returned. This information can be now serialized inside the raw
//% data buffer in some way. E.g. You are serializing a structure like this:
//%
//% struct A {
//% a: String,
//% b: Process,
//% c: i32,
//% d: TcpStream
//% }
//%
//% Into something like this:
//%
//% ["Some string" | [resource 0] | i32 value | [resource 1] ]
//%
//% [resource 0] & [resource 1] are just encoded as 0 and 1 u64 values, representing their order
//% in the resource array.
//%
//% It's common to use some serialization library that will encode a mixture of raw data and
//% resource indexes into the data buffer.
//%
//% On the receiving side, first the `prepare_receive()` function must be called to receive info
//% on how big the buffer and resource arrays are, so that enough space can be allocated inside
//% the guest.
//%
//% The `receive()` function will do 2 things:
//% * Write the buffer of raw data to the specified location
//% * Give all resources to the new process (with new IDs) and write the IDs to the specified
//% location in the same order they were added.
//% Now the information from the buffer (with resource indexes) can be used to deserialize the
//% received message into the same structure.
//%
//% This can be a bit confusing, because resources are just IDs (u64 values) themself. But we
//% still need to serialize them into different u64 values. Resources are inherently bound to a
//% process and you can't access another resource just by guessing an ID from another process.
//% The process of sending them around needs to be explicit.
//%
//% This API was designed around the idea that most guest languages will use some serialization
//% library and turning resources into indexes is a way of serializing. The same is true for
//% deserializing them on the receiving side, when an index needs to be turned into an actual
//% resource ID.
//% lunatic::message::create()
//%
//% Creates a new message. This message is intended to be modified by other functions in this
//% namespace. Once `lunatic::message::send` is called it will be sent to another process.
fn create(mut caller: Caller<ProcessState>) {
caller.data_mut().message = Some(Message::default());
}
//% lunatic::message::set_buffer(
//% data_ptr: i32,
//% data_len: i32,
//% )
//%
//% Sets the data for the next message.
//%
//% Traps:
//% * If **data_ptr + data_len** is outside the memory.
//% * If it's called before the next message is created.
fn set_buffer(mut caller: Caller<ProcessState>, data_ptr: u32, data_len: u32) -> Result<(), Trap> {
let mut buffer = vec![0; data_len as usize];
let memory = get_memory(&mut caller)?;
memory
.read(&caller, data_ptr as usize, buffer.as_mut_slice())
.or_trap("lunatic::message::set_buffer")?;
let message = caller
.data_mut()
.message
.as_mut()
.or_trap("lunatic::message::set_buffer")?;
match message {
Message::Data(data) => data.set_buffer(buffer),
Message::Signal => return Err(Trap::new("Unexpected `Message::Signal` in scratch buffer")),
};
Ok(())
}
//% lunatic::message::add_process(process_id: u64) -> u64
//%
//% Adds a process resource to the next message and returns the location in the array the process
//% was added to. This will remove the process handle from the current process' resources.
//%
//% Traps:
//% * If process ID doesn't exist
//% * If it's called before the next message is created.
fn add_process(mut caller: Caller<ProcessState>, process_id: u64) -> Result<u64, Trap> {
let process = caller
.data_mut()
.resources
.processes
.remove(process_id)
.or_trap("lunatic::message::add_process")?;
let message = caller
.data_mut()
.message
.as_mut()
.or_trap("lunatic::message::add_process")?;
let pid = match message {
Message::Data(data) => data.add_process(process) as u64,
Message::Signal => return Err(Trap::new("Unexpected `Message::Signal` in scratch buffer")),
};
Ok(pid)
}
//% lunatic::message::add_tcp_stream(stream_id: u64) -> u64
//%
//% Adds a TCP stream resource to the next message and returns the location in the array the TCP
//% stream was added to. This will remove the TCP stream from the current process' resources.
//%
//% Traps:
//% * If TCP stream ID doesn't exist
//% * If it's called before the next message is created.
fn add_tcp_stream(mut caller: Caller<ProcessState>, stream_id: u64) -> Result<u64, Trap> {
let stream = caller
.data_mut()
.resources
.tcp_streams
.remove(stream_id)
.or_trap("lunatic::message::add_tcp_stream")?;
let message = caller
.data_mut()
.message
.as_mut()
.or_trap("lunatic::message::add_tcp_stream")?;
let stream_id = match message {
Message::Data(data) => data.add_tcp_stream(stream) as u64,
Message::Signal => return Err(Trap::new("Unexpected `Message::Signal` in scratch buffer")),
};
Ok(stream_id)
}
//% lunatic::message::send(
//% process_id: i64,
//% ) -> i32
//%
//% Returns:
//% * 0 on success
//% * 1 on error - Process can't receive messages (finished).
//%
//% Sends the message to a process.
//%
//% Traps:
//% * If the process ID doesn't exist.
//% * If it's called before a creating the next message.
fn send(mut caller: Caller<ProcessState>, process_id: u64) -> Result<u32, Trap> {
let message = caller
.data_mut()
.message
.take()
.or_trap("lunatic::message::send")?;
let process = caller
.data()
.resources
.processes
.get(process_id)
.or_trap("lunatic::message::send")?;
let result = match process.send_message(message) {
Ok(()) => 0,
Err(_error) => 1,
};
Ok(result)
}
//% lunatic::message::prepare_receive(i32_data_size_ptr: i32, i32_res_size_ptr: i32) -> i32
//%
//% Returns:
//% * 0 if it's a regular message.
//% * 1 if it's a signal turned into a message.
//%
//% For regular messages both parameters are used.
//% * **i32_data_size_ptr** - Location to write the message buffer size to as.
//% * **i32_res_size_ptr** - Location to write the number of resources to.
//%
//% This function should be called before `lunatic::message::receive` to let the guest know how
//% much memory space needs to be reserved for the next message. The data size is in **bytes**,
//% the resources size is the number of resources and each resource is a u64 value. Because of
//% this the guest needs to reserve `64 * resource size` bytes for the resource buffer.
//%
//% Traps:
//% * If **size_ptr** is outside the memory.
fn | (
mut caller: Caller<ProcessState>,
data_size_ptr: u32,
res_size_ptr: u32,
) -> Box<dyn Future<Output = Result<u32, Trap>> + Send + '_> {
Box::new(async move {
let message = caller
.data_mut()
.mailbox
.recv()
.await
.expect("a process always hold onto its sender and this can't be None");
let result = match &message {
Message::Data(message) => {
let message_buffer_size = message.buffer_size() as u32;
let message_resources_size = message.resources_size() as u32;
let memory = get_memory(&mut caller)?;
memory
.write(
&mut caller,
data_size_ptr as usize,
&message_buffer_size.to_le_bytes(),
)
.or_trap("lunatic::message::prepare_receive")?;
memory
.write(
&mut caller,
res_size_ptr as usize,
&message_resources_size.to_le_bytes(),
)
.or_trap("lunatic::message::prepare_receive")?;
0
}
Message::Signal => 1,
};
// Put the message into the scratch area
caller.data_mut().message = Some(message);
Ok(result)
})
}
//% lunatic::message::receive(data_ptr: i32, resource_ptr: i32)
//%
//% * **data_ptr** - Pointer to write the data to.
//% * **resource_ptr** - Pointer to an array of i64 values, where each value represents the
//% resource id inside the new process. Resources are in the same order they
//% were added.
//%
//% Writes the message that was prepared with `lunatic::message::prepare_receive` to the guest. It
//% should only be called if `prepare_receive` returned 0, otherwise it will trap. Signal message
//% don't cary any additional information and everything we need was returned by `prepare_receive`.
//%
//% Traps:
//% * If `lunatic::message::prepare_receive` was not called before.
//% * If **data_ptr + size of the message** is outside the memory.
//% * If **resource_ptr + size of the resources** is outside the memory.
fn receive(mut caller: Caller<ProcessState>, data_ptr: u32, resource_ptr: u32) -> Result<(), Trap> {
let last_message = caller
.data_mut()
.message
.take()
.or_trap("lunatic::message::receive")?;
match last_message {
Message::Data(last_message) => {
let memory = get_memory(&mut caller)?;
memory
.write(&mut caller, data_ptr as usize, last_message.buffer())
.or_trap("lunatic::message::receive")?;
let resources: Vec<u8> = last_message
.resources()
.into_iter()
.map(|resource| match resource {
crate::message::Resource::Process(process_handle) => {
u64::to_le_bytes(caller.data_mut().resources.processes.add(process_handle))
}
crate::message::Resource::TcpStream(tcp_stream) => {
u64::to_le_bytes(caller.data_mut().resources.tcp_streams.add(tcp_stream))
}
})
.flatten()
.collect();
memory
.write(&mut caller, resource_ptr as usize, &resources)
.or_trap("lunatic::message::receive")?;
Ok(())
}
Message::Signal => Err(Trap::new("`lunatic::message::receive` called on a signal")),
}
}
| prepare_receive | identifier_name |
string.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Format string literals.
use regex::Regex;
use unicode_segmentation::UnicodeSegmentation;
use config::Config;
use shape::Shape;
use utils::wrap_str;
const MIN_STRING: usize = 10;
pub struct StringFormat<'a> {
pub opener: &'a str,
pub closer: &'a str,
pub line_start: &'a str,
pub line_end: &'a str,
pub shape: Shape,
pub trim_end: bool,
pub config: &'a Config,
}
impl<'a> StringFormat<'a> {
pub fn new(shape: Shape, config: &'a Config) -> StringFormat<'a> {
StringFormat {
opener: "\"",
closer: "\"",
line_start: " ",
line_end: "\\",
shape,
trim_end: false,
config,
}
}
/// Returns the maximum number of graphemes that is possible on a line while taking the
/// indentation into account.
///
/// If we cannot put at least a single character per line, the rewrite won't succeed.
fn max_chars_with_indent(&self) -> Option<usize> {
Some(
self.shape
.width
.checked_sub(self.opener.len() + self.line_end.len() + 1)?
+ 1,
)
}
/// Like max_chars_with_indent but the indentation is not substracted.
/// This allows to fit more graphemes from the string on a line when
/// SnippetState::Overflow.
fn max_chars_without_indent(&self) -> Option<usize> {
Some(self.config.max_width().checked_sub(self.line_end.len())?)
}
}
pub fn rewrite_string<'a>(orig: &str, fmt: &StringFormat<'a>) -> Option<String> {
let max_chars_with_indent = fmt.max_chars_with_indent()?;
let max_chars_without_indent = fmt.max_chars_without_indent()?;
let indent = fmt.shape.indent.to_string_with_newline(fmt.config);
// Strip line breaks.
// With this regex applied, all remaining whitespaces are significant
let strip_line_breaks_re = Regex::new(r"([^\\](\\\\)*)\\[\n\r][[:space:]]*").unwrap();
let stripped_str = strip_line_breaks_re.replace_all(orig, "$1");
let graphemes = UnicodeSegmentation::graphemes(&*stripped_str, false).collect::<Vec<&str>>();
// `cur_start` is the position in `orig` of the start of the current line.
let mut cur_start = 0;
let mut result = String::with_capacity(
stripped_str
.len()
.checked_next_power_of_two()
.unwrap_or(usize::max_value()),
);
result.push_str(fmt.opener);
// Snip a line at a time from `stripped_str` until it is used up. Push the snippet
// onto result.
let mut cur_max_chars = max_chars_with_indent;
loop {
// All the input starting at cur_start fits on the current line
if graphemes.len() - cur_start <= cur_max_chars {
result.push_str(&graphemes[cur_start..].join(""));
break;
}
// The input starting at cur_start needs to be broken
match break_string(cur_max_chars, fmt.trim_end, &graphemes[cur_start..]) {
SnippetState::LineEnd(line, len) => {
result.push_str(&line);
result.push_str(fmt.line_end);
result.push_str(&indent);
result.push_str(fmt.line_start);
cur_max_chars = max_chars_with_indent;
cur_start += len;
}
SnippetState::Overflow(line, len) => {
result.push_str(&line);
cur_max_chars = max_chars_without_indent;
cur_start += len;
}
SnippetState::EndOfInput(line) => {
result.push_str(&line);
break;
}
}
}
result.push_str(fmt.closer);
wrap_str(result, fmt.config.max_width(), fmt.shape)
}
/// Result of breaking a string so it fits in a line and the state it ended in.
/// The state informs about what to do with the snippet and how to continue the breaking process.
#[derive(Debug, PartialEq)]
enum SnippetState {
/// The input could not be broken and so rewriting the string is finished.
EndOfInput(String),
/// The input could be broken and the returned snippet should be ended with a
/// `[StringFormat::line_end]`. The next snippet needs to be indented.
LineEnd(String, usize),
/// The input could be broken but the returned snippet should not be ended with a
/// `[StringFormat::line_end]` because the whitespace is significant. Therefore, the next
/// snippet should not be indented.
Overflow(String, usize),
}
/// Break the input string at a boundary character around the offset `max_chars`. A boundary
/// character is either a punctuation or a whitespace.
fn break_string(max_chars: usize, trim_end: bool, input: &[&str]) -> SnippetState {
let break_at = |index /* grapheme at index is included */| {
// Take in any whitespaces to the left/right of `input[index]` and
// check if there is a line feed, in which case whitespaces needs to be kept.
let mut index_minus_ws = index;
for (i, grapheme) in input[0..=index].iter().enumerate().rev() {
if!trim_end && is_line_feed(grapheme) {
return SnippetState::Overflow(input[0..=i].join("").to_string(), i + 1);
} else if!is_whitespace(grapheme) {
index_minus_ws = i;
break;
}
}
let mut index_plus_ws = index;
for (i, grapheme) in input[index + 1..].iter().enumerate() {
if!trim_end && is_line_feed(grapheme) {
return SnippetState::Overflow(
input[0..=index + 1 + i].join("").to_string(),
index + 2 + i,
);
} else if!is_whitespace(grapheme) {
index_plus_ws = index + i;
break;
}
}
if trim_end {
SnippetState::LineEnd(
input[0..=index_minus_ws].join("").to_string(),
index_plus_ws + 1,
)
} else {
SnippetState::LineEnd(
input[0..=index_plus_ws].join("").to_string(),
index_plus_ws + 1,
)
}
};
// Find the position in input for breaking the string
match input[0..max_chars]
.iter()
.rposition(|grapheme| is_whitespace(grapheme))
{
// Found a whitespace and what is on its left side is big enough.
Some(index) if index >= MIN_STRING => break_at(index),
// No whitespace found, try looking for a punctuation instead
_ => match input[0..max_chars]
.iter()
.rposition(|grapheme| is_punctuation(grapheme))
{
// Found a punctuation and what is on its left side is big enough.
Some(index) if index >= MIN_STRING => break_at(index),
// Either no boundary character was found to the left of `input[max_chars]`, or the line
// got too small. We try searching for a boundary character to the right.
_ => match input[max_chars..]
.iter()
.position(|grapheme| is_whitespace(grapheme) || is_punctuation(grapheme))
{
// A boundary was found after the line limit
Some(index) => break_at(max_chars + index),
// No boundary to the right, the input cannot be broken
None => SnippetState::EndOfInput(input.join("").to_string()),
},
},
}
}
fn is_line_feed(grapheme: &str) -> bool {
grapheme.as_bytes()[0] == b'\n'
}
fn is_whitespace(grapheme: &str) -> bool {
grapheme.chars().all(|c| c.is_whitespace())
}
fn is_punctuation(grapheme: &str) -> bool {
match grapheme.as_bytes()[0] {
b':' | b',' | b';' | b'.' => true,
_ => false,
}
}
#[cfg(test)]
mod test {
use super::{break_string, rewrite_string, SnippetState, StringFormat};
use shape::{Indent, Shape};
use unicode_segmentation::UnicodeSegmentation;
#[test]
fn issue343() {
let config = Default::default();
let fmt = StringFormat::new(Shape::legacy(2, Indent::empty()), &config);
rewrite_string("eq_", &fmt);
}
#[test]
fn should_break_on_whitespace() {
let string = "Placerat felis. Mauris porta ante sagittis purus.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::LineEnd("Placerat felis. ".to_string(), 16)
);
assert_eq!(
break_string(20, true, &graphemes[..]),
SnippetState::LineEnd("Placerat felis.".to_string(), 16)
);
}
#[test]
fn should_break_on_punctuation() {
let string = "Placerat_felis._Mauris_porta_ante_sagittis_purus.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::LineEnd("Placerat_felis.".to_string(), 15)
);
}
#[test]
fn should_break_forward() {
let string = "Venenatis_tellus_vel_tellus. Aliquam aliquam dolor at justo.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::LineEnd("Venenatis_tellus_vel_tellus. ".to_string(), 29)
);
assert_eq!(
break_string(20, true, &graphemes[..]),
SnippetState::LineEnd("Venenatis_tellus_vel_tellus.".to_string(), 29)
);
}
#[test]
fn | () {
let string = "Venenatis_tellus_vel_tellus";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::EndOfInput("Venenatis_tellus_vel_tellus".to_string())
);
}
#[test]
fn significant_whitespaces() {
let string = "Neque in sem. \n Pellentesque tellus augue.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(15, false, &graphemes[..]),
SnippetState::Overflow("Neque in sem. \n".to_string(), 20)
);
assert_eq!(
break_string(25, false, &graphemes[..]),
SnippetState::Overflow("Neque in sem. \n".to_string(), 20)
);
// if `StringFormat::line_end` is true, then the line feed does not matter anymore
assert_eq!(
break_string(15, true, &graphemes[..]),
SnippetState::LineEnd("Neque in sem.".to_string(), 26)
);
assert_eq!(
break_string(25, true, &graphemes[..]),
SnippetState::LineEnd("Neque in sem.".to_string(), 26)
);
}
#[test]
fn big_whitespace() {
let string = "Neque in sem. Pellentesque tellus augue.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::LineEnd("Neque in sem. ".to_string(), 25)
);
assert_eq!(
break_string(20, true, &graphemes[..]),
SnippetState::LineEnd("Neque in sem.".to_string(), 25)
);
}
}
| nothing_to_break | identifier_name |
string.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Format string literals.
use regex::Regex;
use unicode_segmentation::UnicodeSegmentation;
use config::Config;
use shape::Shape;
use utils::wrap_str;
const MIN_STRING: usize = 10;
pub struct StringFormat<'a> {
pub opener: &'a str,
pub closer: &'a str,
pub line_start: &'a str,
pub line_end: &'a str,
pub shape: Shape,
pub trim_end: bool,
pub config: &'a Config,
}
impl<'a> StringFormat<'a> {
pub fn new(shape: Shape, config: &'a Config) -> StringFormat<'a> {
StringFormat {
opener: "\"",
closer: "\"",
line_start: " ",
line_end: "\\",
shape,
trim_end: false,
config,
}
}
/// Returns the maximum number of graphemes that is possible on a line while taking the
/// indentation into account.
///
/// If we cannot put at least a single character per line, the rewrite won't succeed.
fn max_chars_with_indent(&self) -> Option<usize> {
Some(
self.shape
.width
.checked_sub(self.opener.len() + self.line_end.len() + 1)?
+ 1,
)
}
/// Like max_chars_with_indent but the indentation is not substracted.
/// This allows to fit more graphemes from the string on a line when
/// SnippetState::Overflow.
fn max_chars_without_indent(&self) -> Option<usize> {
Some(self.config.max_width().checked_sub(self.line_end.len())?)
}
}
pub fn rewrite_string<'a>(orig: &str, fmt: &StringFormat<'a>) -> Option<String> {
let max_chars_with_indent = fmt.max_chars_with_indent()?;
let max_chars_without_indent = fmt.max_chars_without_indent()?;
let indent = fmt.shape.indent.to_string_with_newline(fmt.config);
// Strip line breaks.
// With this regex applied, all remaining whitespaces are significant
let strip_line_breaks_re = Regex::new(r"([^\\](\\\\)*)\\[\n\r][[:space:]]*").unwrap();
let stripped_str = strip_line_breaks_re.replace_all(orig, "$1");
let graphemes = UnicodeSegmentation::graphemes(&*stripped_str, false).collect::<Vec<&str>>();
// `cur_start` is the position in `orig` of the start of the current line.
let mut cur_start = 0;
let mut result = String::with_capacity(
stripped_str
.len()
.checked_next_power_of_two()
.unwrap_or(usize::max_value()),
);
result.push_str(fmt.opener);
// Snip a line at a time from `stripped_str` until it is used up. Push the snippet
// onto result.
let mut cur_max_chars = max_chars_with_indent;
loop {
// All the input starting at cur_start fits on the current line
if graphemes.len() - cur_start <= cur_max_chars {
result.push_str(&graphemes[cur_start..].join(""));
break;
}
// The input starting at cur_start needs to be broken
match break_string(cur_max_chars, fmt.trim_end, &graphemes[cur_start..]) {
SnippetState::LineEnd(line, len) => {
result.push_str(&line);
result.push_str(fmt.line_end);
result.push_str(&indent);
result.push_str(fmt.line_start);
cur_max_chars = max_chars_with_indent;
cur_start += len;
}
SnippetState::Overflow(line, len) => {
result.push_str(&line);
cur_max_chars = max_chars_without_indent;
cur_start += len;
}
SnippetState::EndOfInput(line) => {
result.push_str(&line);
break;
}
}
}
result.push_str(fmt.closer);
wrap_str(result, fmt.config.max_width(), fmt.shape)
}
/// Result of breaking a string so it fits in a line and the state it ended in.
/// The state informs about what to do with the snippet and how to continue the breaking process.
#[derive(Debug, PartialEq)]
enum SnippetState {
/// The input could not be broken and so rewriting the string is finished.
EndOfInput(String),
/// The input could be broken and the returned snippet should be ended with a
/// `[StringFormat::line_end]`. The next snippet needs to be indented.
LineEnd(String, usize),
/// The input could be broken but the returned snippet should not be ended with a
/// `[StringFormat::line_end]` because the whitespace is significant. Therefore, the next
/// snippet should not be indented.
Overflow(String, usize),
}
/// Break the input string at a boundary character around the offset `max_chars`. A boundary
/// character is either a punctuation or a whitespace.
fn break_string(max_chars: usize, trim_end: bool, input: &[&str]) -> SnippetState {
let break_at = |index /* grapheme at index is included */| {
// Take in any whitespaces to the left/right of `input[index]` and
// check if there is a line feed, in which case whitespaces needs to be kept.
let mut index_minus_ws = index;
for (i, grapheme) in input[0..=index].iter().enumerate().rev() {
if!trim_end && is_line_feed(grapheme) {
return SnippetState::Overflow(input[0..=i].join("").to_string(), i + 1);
} else if!is_whitespace(grapheme) {
index_minus_ws = i;
break;
}
}
let mut index_plus_ws = index;
for (i, grapheme) in input[index + 1..].iter().enumerate() {
if!trim_end && is_line_feed(grapheme) {
return SnippetState::Overflow(
input[0..=index + 1 + i].join("").to_string(),
index + 2 + i,
);
} else if!is_whitespace(grapheme) {
index_plus_ws = index + i;
break;
}
}
if trim_end {
SnippetState::LineEnd(
input[0..=index_minus_ws].join("").to_string(),
index_plus_ws + 1,
)
} else {
SnippetState::LineEnd(
input[0..=index_plus_ws].join("").to_string(),
index_plus_ws + 1,
)
}
};
// Find the position in input for breaking the string
match input[0..max_chars]
.iter()
.rposition(|grapheme| is_whitespace(grapheme))
{
// Found a whitespace and what is on its left side is big enough.
Some(index) if index >= MIN_STRING => break_at(index),
// No whitespace found, try looking for a punctuation instead
_ => match input[0..max_chars]
.iter()
.rposition(|grapheme| is_punctuation(grapheme))
{
// Found a punctuation and what is on its left side is big enough.
Some(index) if index >= MIN_STRING => break_at(index),
// Either no boundary character was found to the left of `input[max_chars]`, or the line
// got too small. We try searching for a boundary character to the right.
_ => match input[max_chars..]
.iter()
.position(|grapheme| is_whitespace(grapheme) || is_punctuation(grapheme))
{
// A boundary was found after the line limit
Some(index) => break_at(max_chars + index),
// No boundary to the right, the input cannot be broken
None => SnippetState::EndOfInput(input.join("").to_string()),
},
},
}
}
fn is_line_feed(grapheme: &str) -> bool {
grapheme.as_bytes()[0] == b'\n'
}
fn is_whitespace(grapheme: &str) -> bool {
grapheme.chars().all(|c| c.is_whitespace())
}
fn is_punctuation(grapheme: &str) -> bool {
match grapheme.as_bytes()[0] {
b':' | b',' | b';' | b'.' => true,
_ => false,
}
}
#[cfg(test)]
mod test {
use super::{break_string, rewrite_string, SnippetState, StringFormat};
use shape::{Indent, Shape};
use unicode_segmentation::UnicodeSegmentation;
#[test]
fn issue343() {
let config = Default::default();
let fmt = StringFormat::new(Shape::legacy(2, Indent::empty()), &config);
rewrite_string("eq_", &fmt);
}
#[test]
fn should_break_on_whitespace() {
let string = "Placerat felis. Mauris porta ante sagittis purus.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::LineEnd("Placerat felis. ".to_string(), 16)
);
assert_eq!(
break_string(20, true, &graphemes[..]),
SnippetState::LineEnd("Placerat felis.".to_string(), 16)
);
}
#[test]
fn should_break_on_punctuation() {
let string = "Placerat_felis._Mauris_porta_ante_sagittis_purus.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::LineEnd("Placerat_felis.".to_string(), 15)
);
}
#[test]
fn should_break_forward() {
let string = "Venenatis_tellus_vel_tellus. Aliquam aliquam dolor at justo.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::LineEnd("Venenatis_tellus_vel_tellus. ".to_string(), 29)
);
assert_eq!(
break_string(20, true, &graphemes[..]),
SnippetState::LineEnd("Venenatis_tellus_vel_tellus.".to_string(), 29)
);
}
#[test]
fn nothing_to_break() {
let string = "Venenatis_tellus_vel_tellus";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::EndOfInput("Venenatis_tellus_vel_tellus".to_string())
);
}
#[test]
fn significant_whitespaces() | }
#[test]
fn big_whitespace() {
let string = "Neque in sem. Pellentesque tellus augue.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::LineEnd("Neque in sem. ".to_string(), 25)
);
assert_eq!(
break_string(20, true, &graphemes[..]),
SnippetState::LineEnd("Neque in sem.".to_string(), 25)
);
}
}
| {
let string = "Neque in sem. \n Pellentesque tellus augue.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(15, false, &graphemes[..]),
SnippetState::Overflow("Neque in sem. \n".to_string(), 20)
);
assert_eq!(
break_string(25, false, &graphemes[..]),
SnippetState::Overflow("Neque in sem. \n".to_string(), 20)
);
// if `StringFormat::line_end` is true, then the line feed does not matter anymore
assert_eq!(
break_string(15, true, &graphemes[..]),
SnippetState::LineEnd("Neque in sem.".to_string(), 26)
);
assert_eq!(
break_string(25, true, &graphemes[..]),
SnippetState::LineEnd("Neque in sem.".to_string(), 26)
); | identifier_body |
string.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Format string literals.
use regex::Regex;
use unicode_segmentation::UnicodeSegmentation;
use config::Config;
use shape::Shape;
use utils::wrap_str;
const MIN_STRING: usize = 10;
pub struct StringFormat<'a> {
pub opener: &'a str,
pub closer: &'a str,
pub line_start: &'a str,
pub line_end: &'a str,
pub shape: Shape,
pub trim_end: bool,
pub config: &'a Config,
}
impl<'a> StringFormat<'a> {
pub fn new(shape: Shape, config: &'a Config) -> StringFormat<'a> {
StringFormat {
opener: "\"",
closer: "\"",
line_start: " ",
line_end: "\\",
shape,
trim_end: false,
config,
}
}
/// Returns the maximum number of graphemes that is possible on a line while taking the
/// indentation into account.
///
/// If we cannot put at least a single character per line, the rewrite won't succeed.
fn max_chars_with_indent(&self) -> Option<usize> {
Some(
self.shape
.width | /// Like max_chars_with_indent but the indentation is not substracted.
/// This allows to fit more graphemes from the string on a line when
/// SnippetState::Overflow.
fn max_chars_without_indent(&self) -> Option<usize> {
Some(self.config.max_width().checked_sub(self.line_end.len())?)
}
}
pub fn rewrite_string<'a>(orig: &str, fmt: &StringFormat<'a>) -> Option<String> {
let max_chars_with_indent = fmt.max_chars_with_indent()?;
let max_chars_without_indent = fmt.max_chars_without_indent()?;
let indent = fmt.shape.indent.to_string_with_newline(fmt.config);
// Strip line breaks.
// With this regex applied, all remaining whitespaces are significant
let strip_line_breaks_re = Regex::new(r"([^\\](\\\\)*)\\[\n\r][[:space:]]*").unwrap();
let stripped_str = strip_line_breaks_re.replace_all(orig, "$1");
let graphemes = UnicodeSegmentation::graphemes(&*stripped_str, false).collect::<Vec<&str>>();
// `cur_start` is the position in `orig` of the start of the current line.
let mut cur_start = 0;
let mut result = String::with_capacity(
stripped_str
.len()
.checked_next_power_of_two()
.unwrap_or(usize::max_value()),
);
result.push_str(fmt.opener);
// Snip a line at a time from `stripped_str` until it is used up. Push the snippet
// onto result.
let mut cur_max_chars = max_chars_with_indent;
loop {
// All the input starting at cur_start fits on the current line
if graphemes.len() - cur_start <= cur_max_chars {
result.push_str(&graphemes[cur_start..].join(""));
break;
}
// The input starting at cur_start needs to be broken
match break_string(cur_max_chars, fmt.trim_end, &graphemes[cur_start..]) {
SnippetState::LineEnd(line, len) => {
result.push_str(&line);
result.push_str(fmt.line_end);
result.push_str(&indent);
result.push_str(fmt.line_start);
cur_max_chars = max_chars_with_indent;
cur_start += len;
}
SnippetState::Overflow(line, len) => {
result.push_str(&line);
cur_max_chars = max_chars_without_indent;
cur_start += len;
}
SnippetState::EndOfInput(line) => {
result.push_str(&line);
break;
}
}
}
result.push_str(fmt.closer);
wrap_str(result, fmt.config.max_width(), fmt.shape)
}
/// Result of breaking a string so it fits in a line and the state it ended in.
/// The state informs about what to do with the snippet and how to continue the breaking process.
#[derive(Debug, PartialEq)]
enum SnippetState {
/// The input could not be broken and so rewriting the string is finished.
EndOfInput(String),
/// The input could be broken and the returned snippet should be ended with a
/// `[StringFormat::line_end]`. The next snippet needs to be indented.
LineEnd(String, usize),
/// The input could be broken but the returned snippet should not be ended with a
/// `[StringFormat::line_end]` because the whitespace is significant. Therefore, the next
/// snippet should not be indented.
Overflow(String, usize),
}
/// Break the input string at a boundary character around the offset `max_chars`. A boundary
/// character is either a punctuation or a whitespace.
fn break_string(max_chars: usize, trim_end: bool, input: &[&str]) -> SnippetState {
let break_at = |index /* grapheme at index is included */| {
// Take in any whitespaces to the left/right of `input[index]` and
// check if there is a line feed, in which case whitespaces needs to be kept.
let mut index_minus_ws = index;
for (i, grapheme) in input[0..=index].iter().enumerate().rev() {
if!trim_end && is_line_feed(grapheme) {
return SnippetState::Overflow(input[0..=i].join("").to_string(), i + 1);
} else if!is_whitespace(grapheme) {
index_minus_ws = i;
break;
}
}
let mut index_plus_ws = index;
for (i, grapheme) in input[index + 1..].iter().enumerate() {
if!trim_end && is_line_feed(grapheme) {
return SnippetState::Overflow(
input[0..=index + 1 + i].join("").to_string(),
index + 2 + i,
);
} else if!is_whitespace(grapheme) {
index_plus_ws = index + i;
break;
}
}
if trim_end {
SnippetState::LineEnd(
input[0..=index_minus_ws].join("").to_string(),
index_plus_ws + 1,
)
} else {
SnippetState::LineEnd(
input[0..=index_plus_ws].join("").to_string(),
index_plus_ws + 1,
)
}
};
// Find the position in input for breaking the string
match input[0..max_chars]
.iter()
.rposition(|grapheme| is_whitespace(grapheme))
{
// Found a whitespace and what is on its left side is big enough.
Some(index) if index >= MIN_STRING => break_at(index),
// No whitespace found, try looking for a punctuation instead
_ => match input[0..max_chars]
.iter()
.rposition(|grapheme| is_punctuation(grapheme))
{
// Found a punctuation and what is on its left side is big enough.
Some(index) if index >= MIN_STRING => break_at(index),
// Either no boundary character was found to the left of `input[max_chars]`, or the line
// got too small. We try searching for a boundary character to the right.
_ => match input[max_chars..]
.iter()
.position(|grapheme| is_whitespace(grapheme) || is_punctuation(grapheme))
{
// A boundary was found after the line limit
Some(index) => break_at(max_chars + index),
// No boundary to the right, the input cannot be broken
None => SnippetState::EndOfInput(input.join("").to_string()),
},
},
}
}
fn is_line_feed(grapheme: &str) -> bool {
grapheme.as_bytes()[0] == b'\n'
}
fn is_whitespace(grapheme: &str) -> bool {
grapheme.chars().all(|c| c.is_whitespace())
}
fn is_punctuation(grapheme: &str) -> bool {
match grapheme.as_bytes()[0] {
b':' | b',' | b';' | b'.' => true,
_ => false,
}
}
#[cfg(test)]
mod test {
use super::{break_string, rewrite_string, SnippetState, StringFormat};
use shape::{Indent, Shape};
use unicode_segmentation::UnicodeSegmentation;
#[test]
fn issue343() {
let config = Default::default();
let fmt = StringFormat::new(Shape::legacy(2, Indent::empty()), &config);
rewrite_string("eq_", &fmt);
}
#[test]
fn should_break_on_whitespace() {
let string = "Placerat felis. Mauris porta ante sagittis purus.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::LineEnd("Placerat felis. ".to_string(), 16)
);
assert_eq!(
break_string(20, true, &graphemes[..]),
SnippetState::LineEnd("Placerat felis.".to_string(), 16)
);
}
#[test]
fn should_break_on_punctuation() {
let string = "Placerat_felis._Mauris_porta_ante_sagittis_purus.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::LineEnd("Placerat_felis.".to_string(), 15)
);
}
#[test]
fn should_break_forward() {
let string = "Venenatis_tellus_vel_tellus. Aliquam aliquam dolor at justo.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::LineEnd("Venenatis_tellus_vel_tellus. ".to_string(), 29)
);
assert_eq!(
break_string(20, true, &graphemes[..]),
SnippetState::LineEnd("Venenatis_tellus_vel_tellus.".to_string(), 29)
);
}
#[test]
fn nothing_to_break() {
let string = "Venenatis_tellus_vel_tellus";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::EndOfInput("Venenatis_tellus_vel_tellus".to_string())
);
}
#[test]
fn significant_whitespaces() {
let string = "Neque in sem. \n Pellentesque tellus augue.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(15, false, &graphemes[..]),
SnippetState::Overflow("Neque in sem. \n".to_string(), 20)
);
assert_eq!(
break_string(25, false, &graphemes[..]),
SnippetState::Overflow("Neque in sem. \n".to_string(), 20)
);
// if `StringFormat::line_end` is true, then the line feed does not matter anymore
assert_eq!(
break_string(15, true, &graphemes[..]),
SnippetState::LineEnd("Neque in sem.".to_string(), 26)
);
assert_eq!(
break_string(25, true, &graphemes[..]),
SnippetState::LineEnd("Neque in sem.".to_string(), 26)
);
}
#[test]
fn big_whitespace() {
let string = "Neque in sem. Pellentesque tellus augue.";
let graphemes = UnicodeSegmentation::graphemes(&*string, false).collect::<Vec<&str>>();
assert_eq!(
break_string(20, false, &graphemes[..]),
SnippetState::LineEnd("Neque in sem. ".to_string(), 25)
);
assert_eq!(
break_string(20, true, &graphemes[..]),
SnippetState::LineEnd("Neque in sem.".to_string(), 25)
);
}
} | .checked_sub(self.opener.len() + self.line_end.len() + 1)?
+ 1,
)
}
| random_line_split |
server.rs | protocol: Protocol::Udp,
}),
_ => Ok(Bind {
addr: s.to_owned(),
protocol: Protocol::Udp,
}),
}
}
}
impl Default for Config {
fn default() -> Self {
Config {
bind: Bind {
addr: "0.0.0.0:12201".to_owned(),
protocol: Protocol::Udp,
},
tcp_keep_alive_secs: 2 * 60, // 2 minutes
tcp_max_size_bytes: 1024 * 256, // 256kiB
}
}
}
/**
A GELF server.
*/
pub struct Server {
fut: BoxFuture<'static, ()>,
handle: Option<Handle>,
}
impl Server {
pub fn take_handle(&mut self) -> Option<Handle> {
self.handle.take()
}
pub fn run(self) -> Result<(), Error> {
// Run the server on a fresh runtime
// We attempt to shut this runtime down cleanly to release
// any used resources
let runtime = Runtime::new().expect("failed to start new Runtime");
runtime.block_on(self.fut);
runtime.shutdown_now();
Ok(())
}
}
/**
A handle to a running GELF server that can be used to interact with it
programmatically.
*/
pub struct Handle {
close: oneshot::Sender<()>,
}
impl Handle {
/**
Close the server.
*/
pub fn close(self) -> bool {
self.close.send(()).is_ok()
}
}
/**
Build a server to receive GELF messages and process them.
*/
pub fn build(
config: Config,
receive: impl FnMut(Bytes) -> Result<Option<Message>, Error> + Send + Sync + Unpin + Clone +'static,
mut process: impl FnMut(Message) -> Result<(), Error> + Send + Sync + Unpin + Clone +'static,
) -> Result<Server, Error> {
emit("Starting GELF server");
let addr = config.bind.addr.parse()?;
let (handle_tx, handle_rx) = oneshot::channel();
// Build a handle
let handle = Some(Handle { close: handle_tx });
let ctrl_c = ctrl_c()?;
let server = async move {
let incoming = match config.bind.protocol {
Protocol::Udp => {
let server = udp::Server::bind(&addr).await?.build(receive);
Either::Left(server)
}
Protocol::Tcp => {
let server = tcp::Server::bind(&addr).await?.build(
Duration::from_secs(config.tcp_keep_alive_secs),
config.tcp_max_size_bytes as usize,
receive,
);
Either::Right(server)
}
};
let mut close = handle_rx.fuse();
let mut ctrl_c = ctrl_c.fuse();
let mut incoming = incoming.fuse();
// NOTE: We don't use `?` here because we never want to carry results
// We always want to match them and deal with error cases directly
loop {
select! {
// A message that's ready to process
msg = incoming.next() => match msg {
// A complete message has been received
Some(Ok(Received::Complete(msg))) => {
increment!(server.receive_ok);
// Process the received message
match process(msg) {
Ok(()) => {
increment!(server.process_ok);
}
Err(err) => {
increment!(server.process_err);
emit_err(&err, "GELF processing failed");
}
}
},
// A chunk of a message has been received
Some(Ok(Received::Incomplete)) => {
continue;
},
// An error occurred receiving a chunk
Some(Err(err)) => {
increment!(server.receive_err);
emit_err(&err, "GELF processing failed");
},
None => {
unreachable!("receiver stream should never terminate")
},
},
// A termination signal from the programmatic handle
_ = close => {
emit("Handle closed; shutting down");
break;
},
// A termination signal from the environment
_ = ctrl_c.next() => {
emit("Termination signal received; shutting down");
break;
},
};
}
emit("Stopping GELF server");
Result::Ok::<(), Error>(())
};
Ok(Server {
fut: Box::pin(async move {
if let Err(err) = server.await {
emit_err(&err, "GELF server failed");
}
}),
handle,
})
}
enum Received {
Incomplete,
Complete(Message),
}
trait OptionMessageExt {
fn into_received(self) -> Option<Received>;
}
impl OptionMessageExt for Option<Message> {
fn into_received(self) -> Option<Received> {
match self {
Some(msg) => Some(Received::Complete(msg)),
None => Some(Received::Incomplete),
}
}
}
mod udp {
use super::*;
use tokio::{
codec::Decoder,
net::udp::{
UdpFramed,
UdpSocket,
},
};
pub(super) struct Server(UdpSocket);
impl Server {
pub(super) async fn bind(addr: &SocketAddr) -> Result<Self, Error> {
let sock = UdpSocket::bind(&addr).await?;
Ok(Server(sock))
}
pub(super) fn build(
self,
receive: impl FnMut(Bytes) -> Result<Option<Message>, Error> + Unpin,
) -> impl Stream<Item = Result<Received, Error>> {
emit("Setting up for UDP");
UdpFramed::new(self.0, Decode(receive)).map(|r| r.map(|(msg, _)| msg))
}
}
struct Decode<F>(F);
impl<F> Decoder for Decode<F>
where
F: FnMut(Bytes) -> Result<Option<Message>, Error> + Unpin,
{
type Item = Received;
type Error = Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
// All datagrams are considered a valid message
let src = src.take().freeze();
Ok((self.0)(src)?.into_received())
}
}
}
mod tcp {
use super::*;
use std::{
cmp,
pin::Pin,
};
use futures::{
future,
stream::{
futures_unordered::FuturesUnordered,
Fuse,
Stream,
StreamFuture,
},
task::{
Context,
Poll,
},
};
use pin_utils::unsafe_pinned;
use tokio::{
codec::{
Decoder,
FramedRead,
},
net::tcp::TcpListener,
timer::Timeout,
};
pub(super) struct Server(TcpListener);
impl Server {
pub(super) async fn bind(addr: &SocketAddr) -> Result<Self, Error> {
let listener = TcpListener::bind(&addr).await?;
Ok(Server(listener))
}
pub(super) fn build(
self,
keep_alive: Duration,
max_size_bytes: usize,
receive: impl FnMut(Bytes) -> Result<Option<Message>, Error>
+ Send
+ Sync
+ Unpin
+ Clone
+'static,
) -> impl Stream<Item = Result<Received, Error>> {
emit("Setting up for TCP");
self.0
.incoming()
.filter_map(move |conn| {
match conn {
// The connection was successfully established
// Create a new protocol reader over it
// It'll get added to the connection pool
Ok(conn) => {
let decode = Decode::new(max_size_bytes, receive.clone());
let protocol = FramedRead::new(conn, decode);
// NOTE: The timeout stream wraps _the protocol_
// That means it'll close the connection if it doesn't
// produce a valid message within the timeframe, not just
// whether or not it writes to the stream
future::ready(Some(TimeoutStream::new(protocol, keep_alive)))
}
// The connection could not be established
// Just ignore it
Err(_) => future::ready(None),
}
})
.listen(1024)
}
}
struct Listen<S>
where
S: Stream,
S::Item: Stream,
{
accept: Fuse<S>,
connections: FuturesUnordered<StreamFuture<S::Item>>,
max: usize,
}
impl<S> Listen<S>
where
S: Stream,
S::Item: Stream,
{
unsafe_pinned!(accept: Fuse<S>);
unsafe_pinned!(connections: FuturesUnordered<StreamFuture<S::Item>>);
}
impl<S, T> Stream for Listen<S>
where
S: Stream + Unpin,
S::Item: Stream<Item = Result<T, Error>> + Unpin,
{
type Item = Result<T, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
'poll_conns: loop {
// Fill up our accepted connections
'fill_conns: while self.connections.len() < self.max {
let conn = match self.as_mut().accept().poll_next(cx) {
Poll::Ready(Some(s)) => s.into_future(),
Poll::Ready(None) | Poll::Pending => break 'fill_conns,
};
self.connections.push(conn);
}
// Try polling the stream
// NOTE: We're assuming the unordered list will
// always make forward progress polling futures
// even if one future is particularly chatty
match self.as_mut().connections().poll_next(cx) {
// We have an item from a connection
Poll::Ready(Some((Some(item), conn))) => {
match item {
// A valid item was produced
// Return it and put the connection back in the pool.
Ok(item) => {
self.connections.push(conn.into_future());
return Poll::Ready(Some(Ok(item)));
}
// An error occurred, probably IO-related
// In this case the connection isn't returned to the pool.
// It's closed on drop and the error is returned.
Err(err) => {
return Poll::Ready(Some(Err(err.into())));
}
}
}
// A connection has closed
// Drop the connection and loop back
// This will mean attempting to accept a new connection
Poll::Ready(Some((None, _conn))) => continue 'poll_conns,
// The queue is empty or nothing is ready
Poll::Ready(None) | Poll::Pending => break 'poll_conns,
}
}
// If we've gotten this far, then there are no events for us to process
// and nothing was ready, so figure out if we're not done yet or if
// we've reached the end.
if self.accept.is_done() {
Poll::Ready(None)
} else {
Poll::Pending
}
}
}
trait StreamListenExt: Stream {
fn listen(self, max_connections: usize) -> Listen<Self>
where
Self: Sized + Unpin,
Self::Item: Stream + Unpin,
{
Listen {
accept: self.fuse(),
connections: FuturesUnordered::new(),
max: max_connections,
}
}
}
impl<S> StreamListenExt for S where S: Stream {}
struct Decode<F> {
max_size_bytes: usize,
read_head: usize,
discarding: bool,
receive: F,
}
impl<F> Decode<F> {
pub fn | (max_size_bytes: usize, receive: F) -> Self {
Decode {
read_head: 0,
discarding: false,
max_size_bytes,
receive,
}
}
}
impl<F> Decoder for Decode<F>
where
F: FnMut(Bytes) -> Result<Option<Message>, Error>,
{
type Item = Received;
type Error = Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
'read_frame: loop {
let read_to = cmp::min(self.max_size_bytes.saturating_add(1), src.len());
// Messages are separated by null bytes
let sep_offset = src[self.read_head..].iter().position(|b| *b == b'\0');
match (self.discarding, sep_offset) {
// A delimiter was found
// Split it from the buffer and return
(false, Some(offset)) => {
let frame_end = offset + self.read_head;
// The message is technically sitting right there
// for us, but since it's bigger than our max capacity
// we still discard it
if frame_end > self.max_size_bytes {
increment!(server.tcp_msg_overflow);
self.discarding = true;
continue'read_frame;
}
self.read_head = 0;
let src = src.split_to(frame_end + 1).freeze();
return Ok((self.receive)(src.slice_to(src.len() - 1))?.into_received());
}
// A delimiter wasn't found, but the incomplete
// message is too big. Start discarding the input
(false, None) if src.len() > self.max_size_bytes => {
increment!(server.tcp_msg_overflow);
self.discarding = true;
continue'read_frame;
}
// A delimiter wasn't found
// Move the read head forward so we'll check
// from that position next time data arrives
(false, None) => {
self.read_head = read_to;
// As per the contract of `Decoder`, we return `None`
// here to indicate more data is needed to complete a frame
return Ok(None);
}
// We're discarding input and have reached the end of the message
// Advance the source buffer to the end of that message and try again
(true, Some(offset)) => {
src.advance(offset + self.read_head + 1);
self.discarding = false;
| new | identifier_name |
server.rs | protocol: Protocol::Udp,
}),
_ => Ok(Bind {
addr: s.to_owned(),
protocol: Protocol::Udp,
}),
}
}
}
impl Default for Config {
fn default() -> Self {
Config {
bind: Bind {
addr: "0.0.0.0:12201".to_owned(),
protocol: Protocol::Udp,
},
tcp_keep_alive_secs: 2 * 60, // 2 minutes
tcp_max_size_bytes: 1024 * 256, // 256kiB
}
}
}
/**
A GELF server.
*/
pub struct Server {
fut: BoxFuture<'static, ()>,
handle: Option<Handle>,
}
impl Server {
pub fn take_handle(&mut self) -> Option<Handle> {
self.handle.take()
}
pub fn run(self) -> Result<(), Error> {
// Run the server on a fresh runtime
// We attempt to shut this runtime down cleanly to release
// any used resources
let runtime = Runtime::new().expect("failed to start new Runtime");
runtime.block_on(self.fut);
runtime.shutdown_now();
Ok(())
}
}
/**
A handle to a running GELF server that can be used to interact with it
programmatically.
*/
pub struct Handle {
close: oneshot::Sender<()>,
}
impl Handle {
/**
Close the server.
*/
pub fn close(self) -> bool {
self.close.send(()).is_ok()
}
}
/**
Build a server to receive GELF messages and process them.
*/
pub fn build(
config: Config,
receive: impl FnMut(Bytes) -> Result<Option<Message>, Error> + Send + Sync + Unpin + Clone +'static,
mut process: impl FnMut(Message) -> Result<(), Error> + Send + Sync + Unpin + Clone +'static,
) -> Result<Server, Error> {
emit("Starting GELF server");
let addr = config.bind.addr.parse()?;
let (handle_tx, handle_rx) = oneshot::channel();
// Build a handle
let handle = Some(Handle { close: handle_tx });
let ctrl_c = ctrl_c()?;
let server = async move {
let incoming = match config.bind.protocol {
Protocol::Udp => {
let server = udp::Server::bind(&addr).await?.build(receive);
Either::Left(server)
}
Protocol::Tcp => {
let server = tcp::Server::bind(&addr).await?.build(
Duration::from_secs(config.tcp_keep_alive_secs),
config.tcp_max_size_bytes as usize,
receive,
);
Either::Right(server)
}
};
let mut close = handle_rx.fuse();
let mut ctrl_c = ctrl_c.fuse();
let mut incoming = incoming.fuse();
// NOTE: We don't use `?` here because we never want to carry results
// We always want to match them and deal with error cases directly
loop {
select! {
// A message that's ready to process
msg = incoming.next() => match msg {
// A complete message has been received
Some(Ok(Received::Complete(msg))) => {
increment!(server.receive_ok);
// Process the received message
match process(msg) {
Ok(()) => {
increment!(server.process_ok);
}
Err(err) => {
increment!(server.process_err);
emit_err(&err, "GELF processing failed");
}
}
},
// A chunk of a message has been received
Some(Ok(Received::Incomplete)) => {
continue;
},
// An error occurred receiving a chunk
Some(Err(err)) => {
increment!(server.receive_err);
emit_err(&err, "GELF processing failed");
},
None => {
unreachable!("receiver stream should never terminate")
},
},
// A termination signal from the programmatic handle
_ = close => {
emit("Handle closed; shutting down");
break;
},
// A termination signal from the environment
_ = ctrl_c.next() => {
emit("Termination signal received; shutting down");
break;
},
};
}
emit("Stopping GELF server");
Result::Ok::<(), Error>(())
};
Ok(Server {
fut: Box::pin(async move {
if let Err(err) = server.await {
emit_err(&err, "GELF server failed");
}
}),
handle,
})
}
enum Received {
Incomplete,
Complete(Message),
}
trait OptionMessageExt {
fn into_received(self) -> Option<Received>;
}
impl OptionMessageExt for Option<Message> {
fn into_received(self) -> Option<Received> {
match self {
Some(msg) => Some(Received::Complete(msg)),
None => Some(Received::Incomplete),
}
}
}
mod udp {
use super::*;
use tokio::{
codec::Decoder,
net::udp::{
UdpFramed,
UdpSocket,
},
};
pub(super) struct Server(UdpSocket);
impl Server {
pub(super) async fn bind(addr: &SocketAddr) -> Result<Self, Error> {
let sock = UdpSocket::bind(&addr).await?;
Ok(Server(sock))
}
pub(super) fn build(
self,
receive: impl FnMut(Bytes) -> Result<Option<Message>, Error> + Unpin,
) -> impl Stream<Item = Result<Received, Error>> |
}
struct Decode<F>(F);
impl<F> Decoder for Decode<F>
where
F: FnMut(Bytes) -> Result<Option<Message>, Error> + Unpin,
{
type Item = Received;
type Error = Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
// All datagrams are considered a valid message
let src = src.take().freeze();
Ok((self.0)(src)?.into_received())
}
}
}
mod tcp {
use super::*;
use std::{
cmp,
pin::Pin,
};
use futures::{
future,
stream::{
futures_unordered::FuturesUnordered,
Fuse,
Stream,
StreamFuture,
},
task::{
Context,
Poll,
},
};
use pin_utils::unsafe_pinned;
use tokio::{
codec::{
Decoder,
FramedRead,
},
net::tcp::TcpListener,
timer::Timeout,
};
pub(super) struct Server(TcpListener);
impl Server {
pub(super) async fn bind(addr: &SocketAddr) -> Result<Self, Error> {
let listener = TcpListener::bind(&addr).await?;
Ok(Server(listener))
}
pub(super) fn build(
self,
keep_alive: Duration,
max_size_bytes: usize,
receive: impl FnMut(Bytes) -> Result<Option<Message>, Error>
+ Send
+ Sync
+ Unpin
+ Clone
+'static,
) -> impl Stream<Item = Result<Received, Error>> {
emit("Setting up for TCP");
self.0
.incoming()
.filter_map(move |conn| {
match conn {
// The connection was successfully established
// Create a new protocol reader over it
// It'll get added to the connection pool
Ok(conn) => {
let decode = Decode::new(max_size_bytes, receive.clone());
let protocol = FramedRead::new(conn, decode);
// NOTE: The timeout stream wraps _the protocol_
// That means it'll close the connection if it doesn't
// produce a valid message within the timeframe, not just
// whether or not it writes to the stream
future::ready(Some(TimeoutStream::new(protocol, keep_alive)))
}
// The connection could not be established
// Just ignore it
Err(_) => future::ready(None),
}
})
.listen(1024)
}
}
struct Listen<S>
where
S: Stream,
S::Item: Stream,
{
accept: Fuse<S>,
connections: FuturesUnordered<StreamFuture<S::Item>>,
max: usize,
}
impl<S> Listen<S>
where
S: Stream,
S::Item: Stream,
{
unsafe_pinned!(accept: Fuse<S>);
unsafe_pinned!(connections: FuturesUnordered<StreamFuture<S::Item>>);
}
impl<S, T> Stream for Listen<S>
where
S: Stream + Unpin,
S::Item: Stream<Item = Result<T, Error>> + Unpin,
{
type Item = Result<T, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
'poll_conns: loop {
// Fill up our accepted connections
'fill_conns: while self.connections.len() < self.max {
let conn = match self.as_mut().accept().poll_next(cx) {
Poll::Ready(Some(s)) => s.into_future(),
Poll::Ready(None) | Poll::Pending => break 'fill_conns,
};
self.connections.push(conn);
}
// Try polling the stream
// NOTE: We're assuming the unordered list will
// always make forward progress polling futures
// even if one future is particularly chatty
match self.as_mut().connections().poll_next(cx) {
// We have an item from a connection
Poll::Ready(Some((Some(item), conn))) => {
match item {
// A valid item was produced
// Return it and put the connection back in the pool.
Ok(item) => {
self.connections.push(conn.into_future());
return Poll::Ready(Some(Ok(item)));
}
// An error occurred, probably IO-related
// In this case the connection isn't returned to the pool.
// It's closed on drop and the error is returned.
Err(err) => {
return Poll::Ready(Some(Err(err.into())));
}
}
}
// A connection has closed
// Drop the connection and loop back
// This will mean attempting to accept a new connection
Poll::Ready(Some((None, _conn))) => continue 'poll_conns,
// The queue is empty or nothing is ready
Poll::Ready(None) | Poll::Pending => break 'poll_conns,
}
}
// If we've gotten this far, then there are no events for us to process
// and nothing was ready, so figure out if we're not done yet or if
// we've reached the end.
if self.accept.is_done() {
Poll::Ready(None)
} else {
Poll::Pending
}
}
}
trait StreamListenExt: Stream {
fn listen(self, max_connections: usize) -> Listen<Self>
where
Self: Sized + Unpin,
Self::Item: Stream + Unpin,
{
Listen {
accept: self.fuse(),
connections: FuturesUnordered::new(),
max: max_connections,
}
}
}
impl<S> StreamListenExt for S where S: Stream {}
struct Decode<F> {
max_size_bytes: usize,
read_head: usize,
discarding: bool,
receive: F,
}
impl<F> Decode<F> {
pub fn new(max_size_bytes: usize, receive: F) -> Self {
Decode {
read_head: 0,
discarding: false,
max_size_bytes,
receive,
}
}
}
impl<F> Decoder for Decode<F>
where
F: FnMut(Bytes) -> Result<Option<Message>, Error>,
{
type Item = Received;
type Error = Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
'read_frame: loop {
let read_to = cmp::min(self.max_size_bytes.saturating_add(1), src.len());
// Messages are separated by null bytes
let sep_offset = src[self.read_head..].iter().position(|b| *b == b'\0');
match (self.discarding, sep_offset) {
// A delimiter was found
// Split it from the buffer and return
(false, Some(offset)) => {
let frame_end = offset + self.read_head;
// The message is technically sitting right there
// for us, but since it's bigger than our max capacity
// we still discard it
if frame_end > self.max_size_bytes {
increment!(server.tcp_msg_overflow);
self.discarding = true;
continue'read_frame;
}
self.read_head = 0;
let src = src.split_to(frame_end + 1).freeze();
return Ok((self.receive)(src.slice_to(src.len() - 1))?.into_received());
}
// A delimiter wasn't found, but the incomplete
// message is too big. Start discarding the input
(false, None) if src.len() > self.max_size_bytes => {
increment!(server.tcp_msg_overflow);
self.discarding = true;
continue'read_frame;
}
// A delimiter wasn't found
// Move the read head forward so we'll check
// from that position next time data arrives
(false, None) => {
self.read_head = read_to;
// As per the contract of `Decoder`, we return `None`
// here to indicate more data is needed to complete a frame
return Ok(None);
}
// We're discarding input and have reached the end of the message
// Advance the source buffer to the end of that message and try again
(true, Some(offset)) => {
src.advance(offset + self.read_head + 1);
self.discarding = false;
| {
emit("Setting up for UDP");
UdpFramed::new(self.0, Decode(receive)).map(|r| r.map(|(msg, _)| msg))
} | identifier_body |
server.rs | protocol: Protocol::Udp,
}),
_ => Ok(Bind {
addr: s.to_owned(),
protocol: Protocol::Udp,
}),
}
}
}
impl Default for Config {
fn default() -> Self {
Config {
bind: Bind {
addr: "0.0.0.0:12201".to_owned(),
protocol: Protocol::Udp,
},
tcp_keep_alive_secs: 2 * 60, // 2 minutes
tcp_max_size_bytes: 1024 * 256, // 256kiB
}
}
}
/**
A GELF server.
*/
pub struct Server {
fut: BoxFuture<'static, ()>,
handle: Option<Handle>,
}
impl Server {
pub fn take_handle(&mut self) -> Option<Handle> {
self.handle.take()
}
pub fn run(self) -> Result<(), Error> {
// Run the server on a fresh runtime
// We attempt to shut this runtime down cleanly to release
// any used resources
let runtime = Runtime::new().expect("failed to start new Runtime");
runtime.block_on(self.fut);
runtime.shutdown_now();
Ok(())
}
}
/**
A handle to a running GELF server that can be used to interact with it
programmatically.
*/
pub struct Handle {
close: oneshot::Sender<()>,
}
impl Handle {
/**
Close the server.
*/
pub fn close(self) -> bool {
self.close.send(()).is_ok()
}
}
/**
Build a server to receive GELF messages and process them.
*/
pub fn build(
config: Config,
receive: impl FnMut(Bytes) -> Result<Option<Message>, Error> + Send + Sync + Unpin + Clone +'static,
mut process: impl FnMut(Message) -> Result<(), Error> + Send + Sync + Unpin + Clone +'static,
) -> Result<Server, Error> {
emit("Starting GELF server");
let addr = config.bind.addr.parse()?;
let (handle_tx, handle_rx) = oneshot::channel();
// Build a handle
let handle = Some(Handle { close: handle_tx });
let ctrl_c = ctrl_c()?;
let server = async move {
let incoming = match config.bind.protocol {
Protocol::Udp => {
let server = udp::Server::bind(&addr).await?.build(receive);
Either::Left(server)
}
Protocol::Tcp => {
let server = tcp::Server::bind(&addr).await?.build(
Duration::from_secs(config.tcp_keep_alive_secs),
config.tcp_max_size_bytes as usize,
receive,
);
Either::Right(server)
}
};
let mut close = handle_rx.fuse();
let mut ctrl_c = ctrl_c.fuse();
let mut incoming = incoming.fuse();
// NOTE: We don't use `?` here because we never want to carry results
// We always want to match them and deal with error cases directly
loop {
select! {
// A message that's ready to process
msg = incoming.next() => match msg {
// A complete message has been received | match process(msg) {
Ok(()) => {
increment!(server.process_ok);
}
Err(err) => {
increment!(server.process_err);
emit_err(&err, "GELF processing failed");
}
}
},
// A chunk of a message has been received
Some(Ok(Received::Incomplete)) => {
continue;
},
// An error occurred receiving a chunk
Some(Err(err)) => {
increment!(server.receive_err);
emit_err(&err, "GELF processing failed");
},
None => {
unreachable!("receiver stream should never terminate")
},
},
// A termination signal from the programmatic handle
_ = close => {
emit("Handle closed; shutting down");
break;
},
// A termination signal from the environment
_ = ctrl_c.next() => {
emit("Termination signal received; shutting down");
break;
},
};
}
emit("Stopping GELF server");
Result::Ok::<(), Error>(())
};
Ok(Server {
fut: Box::pin(async move {
if let Err(err) = server.await {
emit_err(&err, "GELF server failed");
}
}),
handle,
})
}
enum Received {
Incomplete,
Complete(Message),
}
trait OptionMessageExt {
fn into_received(self) -> Option<Received>;
}
impl OptionMessageExt for Option<Message> {
fn into_received(self) -> Option<Received> {
match self {
Some(msg) => Some(Received::Complete(msg)),
None => Some(Received::Incomplete),
}
}
}
mod udp {
use super::*;
use tokio::{
codec::Decoder,
net::udp::{
UdpFramed,
UdpSocket,
},
};
pub(super) struct Server(UdpSocket);
impl Server {
pub(super) async fn bind(addr: &SocketAddr) -> Result<Self, Error> {
let sock = UdpSocket::bind(&addr).await?;
Ok(Server(sock))
}
pub(super) fn build(
self,
receive: impl FnMut(Bytes) -> Result<Option<Message>, Error> + Unpin,
) -> impl Stream<Item = Result<Received, Error>> {
emit("Setting up for UDP");
UdpFramed::new(self.0, Decode(receive)).map(|r| r.map(|(msg, _)| msg))
}
}
struct Decode<F>(F);
impl<F> Decoder for Decode<F>
where
F: FnMut(Bytes) -> Result<Option<Message>, Error> + Unpin,
{
type Item = Received;
type Error = Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
// All datagrams are considered a valid message
let src = src.take().freeze();
Ok((self.0)(src)?.into_received())
}
}
}
mod tcp {
use super::*;
use std::{
cmp,
pin::Pin,
};
use futures::{
future,
stream::{
futures_unordered::FuturesUnordered,
Fuse,
Stream,
StreamFuture,
},
task::{
Context,
Poll,
},
};
use pin_utils::unsafe_pinned;
use tokio::{
codec::{
Decoder,
FramedRead,
},
net::tcp::TcpListener,
timer::Timeout,
};
pub(super) struct Server(TcpListener);
impl Server {
pub(super) async fn bind(addr: &SocketAddr) -> Result<Self, Error> {
let listener = TcpListener::bind(&addr).await?;
Ok(Server(listener))
}
pub(super) fn build(
self,
keep_alive: Duration,
max_size_bytes: usize,
receive: impl FnMut(Bytes) -> Result<Option<Message>, Error>
+ Send
+ Sync
+ Unpin
+ Clone
+'static,
) -> impl Stream<Item = Result<Received, Error>> {
emit("Setting up for TCP");
self.0
.incoming()
.filter_map(move |conn| {
match conn {
// The connection was successfully established
// Create a new protocol reader over it
// It'll get added to the connection pool
Ok(conn) => {
let decode = Decode::new(max_size_bytes, receive.clone());
let protocol = FramedRead::new(conn, decode);
// NOTE: The timeout stream wraps _the protocol_
// That means it'll close the connection if it doesn't
// produce a valid message within the timeframe, not just
// whether or not it writes to the stream
future::ready(Some(TimeoutStream::new(protocol, keep_alive)))
}
// The connection could not be established
// Just ignore it
Err(_) => future::ready(None),
}
})
.listen(1024)
}
}
struct Listen<S>
where
S: Stream,
S::Item: Stream,
{
accept: Fuse<S>,
connections: FuturesUnordered<StreamFuture<S::Item>>,
max: usize,
}
impl<S> Listen<S>
where
S: Stream,
S::Item: Stream,
{
unsafe_pinned!(accept: Fuse<S>);
unsafe_pinned!(connections: FuturesUnordered<StreamFuture<S::Item>>);
}
impl<S, T> Stream for Listen<S>
where
S: Stream + Unpin,
S::Item: Stream<Item = Result<T, Error>> + Unpin,
{
type Item = Result<T, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
'poll_conns: loop {
// Fill up our accepted connections
'fill_conns: while self.connections.len() < self.max {
let conn = match self.as_mut().accept().poll_next(cx) {
Poll::Ready(Some(s)) => s.into_future(),
Poll::Ready(None) | Poll::Pending => break 'fill_conns,
};
self.connections.push(conn);
}
// Try polling the stream
// NOTE: We're assuming the unordered list will
// always make forward progress polling futures
// even if one future is particularly chatty
match self.as_mut().connections().poll_next(cx) {
// We have an item from a connection
Poll::Ready(Some((Some(item), conn))) => {
match item {
// A valid item was produced
// Return it and put the connection back in the pool.
Ok(item) => {
self.connections.push(conn.into_future());
return Poll::Ready(Some(Ok(item)));
}
// An error occurred, probably IO-related
// In this case the connection isn't returned to the pool.
// It's closed on drop and the error is returned.
Err(err) => {
return Poll::Ready(Some(Err(err.into())));
}
}
}
// A connection has closed
// Drop the connection and loop back
// This will mean attempting to accept a new connection
Poll::Ready(Some((None, _conn))) => continue 'poll_conns,
// The queue is empty or nothing is ready
Poll::Ready(None) | Poll::Pending => break 'poll_conns,
}
}
// If we've gotten this far, then there are no events for us to process
// and nothing was ready, so figure out if we're not done yet or if
// we've reached the end.
if self.accept.is_done() {
Poll::Ready(None)
} else {
Poll::Pending
}
}
}
trait StreamListenExt: Stream {
fn listen(self, max_connections: usize) -> Listen<Self>
where
Self: Sized + Unpin,
Self::Item: Stream + Unpin,
{
Listen {
accept: self.fuse(),
connections: FuturesUnordered::new(),
max: max_connections,
}
}
}
impl<S> StreamListenExt for S where S: Stream {}
struct Decode<F> {
max_size_bytes: usize,
read_head: usize,
discarding: bool,
receive: F,
}
impl<F> Decode<F> {
pub fn new(max_size_bytes: usize, receive: F) -> Self {
Decode {
read_head: 0,
discarding: false,
max_size_bytes,
receive,
}
}
}
impl<F> Decoder for Decode<F>
where
F: FnMut(Bytes) -> Result<Option<Message>, Error>,
{
type Item = Received;
type Error = Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
'read_frame: loop {
let read_to = cmp::min(self.max_size_bytes.saturating_add(1), src.len());
// Messages are separated by null bytes
let sep_offset = src[self.read_head..].iter().position(|b| *b == b'\0');
match (self.discarding, sep_offset) {
// A delimiter was found
// Split it from the buffer and return
(false, Some(offset)) => {
let frame_end = offset + self.read_head;
// The message is technically sitting right there
// for us, but since it's bigger than our max capacity
// we still discard it
if frame_end > self.max_size_bytes {
increment!(server.tcp_msg_overflow);
self.discarding = true;
continue'read_frame;
}
self.read_head = 0;
let src = src.split_to(frame_end + 1).freeze();
return Ok((self.receive)(src.slice_to(src.len() - 1))?.into_received());
}
// A delimiter wasn't found, but the incomplete
// message is too big. Start discarding the input
(false, None) if src.len() > self.max_size_bytes => {
increment!(server.tcp_msg_overflow);
self.discarding = true;
continue'read_frame;
}
// A delimiter wasn't found
// Move the read head forward so we'll check
// from that position next time data arrives
(false, None) => {
self.read_head = read_to;
// As per the contract of `Decoder`, we return `None`
// here to indicate more data is needed to complete a frame
return Ok(None);
}
// We're discarding input and have reached the end of the message
// Advance the source buffer to the end of that message and try again
(true, Some(offset)) => {
src.advance(offset + self.read_head + 1);
self.discarding = false;
| Some(Ok(Received::Complete(msg))) => {
increment!(server.receive_ok);
// Process the received message | random_line_split |
remote.rs | use super::*;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::io::{ReadHalf, WriteHalf};
use tokio::net::TcpStream;
use tracing::debug;
use tracing::{error, Instrument};
async fn direct_to_control(mut incoming: TcpStream) {
let mut control_socket =
match TcpStream::connect(format!("localhost:{}", CONFIG.control_port)).await {
Ok(s) => s,
Err(error) => {
tracing::warn!(?error, "failed to connect to local control server");
return;
}
};
let (mut control_r, mut control_w) = control_socket.split();
let (mut incoming_r, mut incoming_w) = incoming.split();
let join_1 = tokio::io::copy(&mut control_r, &mut incoming_w);
let join_2 = tokio::io::copy(&mut incoming_r, &mut control_w);
match futures::future::join(join_1, join_2).await {
(Ok(_), Ok(_)) => {}
(Err(error), _) | (_, Err(error)) => {
tracing::error!(?error, "directing stream to control failed");
}
}
}
#[tracing::instrument(skip(socket))]
pub async fn accept_connection(socket: TcpStream) {
// peek the host of the http request
// if health check, then handle it and return
let StreamWithPeekedHost {
mut socket,
host,
forwarded_for,
} = match peek_http_request_host(socket).await {
Some(s) => s,
None => return,
};
tracing::info!(%host, %forwarded_for, "new remote connection");
// parse the host string and find our client
if CONFIG.allowed_hosts.contains(&host) {
error!("redirect to homepage");
let _ = socket.write_all(HTTP_REDIRECT_RESPONSE).await;
return;
}
let host = match validate_host_prefix(&host) {
Some(sub_domain) => sub_domain,
None => {
error!("invalid host specified");
let _ = socket.write_all(HTTP_INVALID_HOST_RESPONSE).await;
return;
}
};
// Special case -- we redirect this tcp connection to the control server
if host.as_str() == "wormhole" {
direct_to_control(socket).await;
return;
}
// find the client listening for this host
let client = match Connections::find_by_host(&host) {
Some(client) => client.clone(),
None => {
// check other instances that may be serving this host
match network::instance_for_host(&host).await {
Ok((instance, _)) => {
network::proxy_stream(instance, socket).await;
return;
}
Err(network::Error::DoesNotServeHost) => {
error!(%host, "no tunnel found");
let _ = socket.write_all(HTTP_NOT_FOUND_RESPONSE).await;
return;
}
Err(error) => {
error!(%host,?error, "failed to find instance");
let _ = socket.write_all(HTTP_ERROR_LOCATING_HOST_RESPONSE).await;
return;
}
}
}
};
// allocate a new stream for this request
let (active_stream, queue_rx) = ActiveStream::new(client.clone());
let stream_id = active_stream.id.clone();
tracing::debug!(
stream_id = %active_stream.id.to_string(),
"new stream connected"
);
let (stream, sink) = tokio::io::split(socket);
// add our stream
ACTIVE_STREAMS.insert(stream_id.clone(), active_stream.clone());
// read from socket, write to client
let span = observability::remote_trace("process_tcp_stream");
tokio::spawn(
async move {
process_tcp_stream(active_stream, stream).await;
}
.instrument(span),
);
// read from client, write to socket
let span = observability::remote_trace("tunnel_to_stream");
tokio::spawn(
async move {
tunnel_to_stream(host, stream_id, sink, queue_rx).await;
}
.instrument(span),
);
}
fn validate_host_prefix(host: &str) -> Option<String> {
let url = format!("http://{}", host);
let host = match url::Url::parse(&url)
.map(|u| u.host().map(|h| h.to_owned()))
.unwrap_or(None)
{
Some(domain) => domain.to_string(),
None => {
error!("invalid host header");
return None;
}
};
let domain_segments = host.split(".").collect::<Vec<&str>>();
let prefix = &domain_segments[0];
let remaining = &domain_segments[1..].join(".");
if CONFIG.allowed_hosts.contains(remaining) {
Some(prefix.to_string())
} else {
None
}
}
/// Response Constants
const HTTP_REDIRECT_RESPONSE:&'static [u8] = b"HTTP/1.1 301 Moved Permanently\r\nLocation: https://tunnelto.dev/\r\nContent-Length: 20\r\n\r\nhttps://tunnelto.dev";
const HTTP_INVALID_HOST_RESPONSE: &'static [u8] =
b"HTTP/1.1 400\r\nContent-Length: 23\r\n\r\nError: Invalid Hostname";
const HTTP_NOT_FOUND_RESPONSE: &'static [u8] =
b"HTTP/1.1 404\r\nContent-Length: 23\r\n\r\nError: Tunnel Not Found";
const HTTP_ERROR_LOCATING_HOST_RESPONSE: &'static [u8] =
b"HTTP/1.1 500\r\nContent-Length: 27\r\n\r\nError: Error finding tunnel";
const HTTP_TUNNEL_REFUSED_RESPONSE: &'static [u8] =
b"HTTP/1.1 500\r\nContent-Length: 32\r\n\r\nTunnel says: connection refused.";
const HTTP_OK_RESPONSE: &'static [u8] = b"HTTP/1.1 200 OK\r\nContent-Length: 2\r\n\r\nok";
const HEALTH_CHECK_PATH: &'static [u8] = b"/0xDEADBEEF_HEALTH_CHECK";
struct StreamWithPeekedHost {
socket: TcpStream,
host: String,
forwarded_for: String,
}
/// Filter incoming remote streams
#[tracing::instrument(skip(socket))]
async fn peek_http_request_host(mut socket: TcpStream) -> Option<StreamWithPeekedHost> {
/// Note we return out if the host header is not found
/// within the first 4kb of the request.
const MAX_HEADER_PEAK: usize = 4096;
let mut buf = vec![0; MAX_HEADER_PEAK]; //1kb
tracing::debug!("checking stream headers");
let n = match socket.peek(&mut buf).await {
Ok(n) => n,
Err(e) => {
error!("failed to read from tcp socket to determine host: {:?}", e);
return None;
}
};
// make sure we're not peeking the same header bytes
if n == 0 {
tracing::debug!("unable to peek header bytes");
return None;
}
tracing::debug!("peeked {} stream bytes ", n);
let mut headers = [httparse::EMPTY_HEADER; 64]; // 30 seems like a generous # of headers
let mut req = httparse::Request::new(&mut headers);
if let Err(e) = req.parse(&buf[..n]) {
error!("failed to parse incoming http bytes: {:?}", e);
return None;
}
// Handle the health check route
if req.path.map(|s| s.as_bytes()) == Some(HEALTH_CHECK_PATH) {
let _ = socket.write_all(HTTP_OK_RESPONSE).await.map_err(|e| {
error!("failed to write health_check: {:?}", e);
});
return None;
}
// get the ip addr in the header
let forwarded_for = if let Some(Ok(forwarded_for)) = req
.headers
.iter()
.filter(|h| h.name.to_lowercase() == "x-forwarded-for".to_string())
.map(|h| std::str::from_utf8(h.value))
.next()
{
forwarded_for.to_string()
} else {
String::default()
};
// look for a host header
if let Some(Ok(host)) = req
.headers
.iter()
.filter(|h| h.name.to_lowercase() == "host".to_string())
.map(|h| std::str::from_utf8(h.value))
.next()
{
tracing::info!(host=%host, path=%req.path.unwrap_or_default(), "peek request");
return Some(StreamWithPeekedHost {
socket,
host: host.to_string(),
forwarded_for,
});
}
tracing::info!("found no host header, dropping connection.");
None
}
/// Process Messages from the control path in & out of the remote stream
#[tracing::instrument(skip(tunnel_stream, tcp_stream))]
async fn process_tcp_stream(mut tunnel_stream: ActiveStream, mut tcp_stream: ReadHalf<TcpStream>) | error!("failed to read from tcp socket: {:?}", e);
return;
}
};
if n == 0 {
debug!("stream ended");
let _ = tunnel_stream
.client
.tx
.send(ControlPacket::End(tunnel_stream.id.clone()))
.await
.map_err(|e| {
error!("failed to send end signal: {:?}", e);
});
return;
}
debug!("read {} bytes", n);
let data = &buf[..n];
let packet = ControlPacket::Data(tunnel_stream.id.clone(), data.to_vec());
match tunnel_stream.client.tx.send(packet.clone()).await {
Ok(_) => debug!(client_id = %tunnel_stream.client.id, "sent data packet to client"),
Err(_) => {
error!("failed to forward tcp packets to disconnected client. dropping client.");
Connections::remove(&tunnel_stream.client);
}
}
}
}
#[tracing::instrument(skip(sink, stream_id, queue))]
async fn tunnel_to_stream(
subdomain: String,
stream_id: StreamId,
mut sink: WriteHalf<TcpStream>,
mut queue: UnboundedReceiver<StreamMessage>,
) {
loop {
let result = queue.next().await;
let result = if let Some(message) = result {
match message {
StreamMessage::Data(data) => Some(data),
StreamMessage::TunnelRefused => {
tracing::debug!(?stream_id, "tunnel refused");
let _ = sink.write_all(HTTP_TUNNEL_REFUSED_RESPONSE).await;
None
}
StreamMessage::NoClientTunnel => {
tracing::info!(%subdomain,?stream_id, "client tunnel not found");
let _ = sink.write_all(HTTP_NOT_FOUND_RESPONSE).await;
None
}
}
} else {
None
};
let data = match result {
Some(data) => data,
None => {
tracing::debug!("done tunneling to sink");
let _ = sink.shutdown().await.map_err(|_e| {
error!("error shutting down tcp stream");
});
ACTIVE_STREAMS.remove(&stream_id);
return;
}
};
let result = sink.write_all(&data).await;
if let Some(error) = result.err() {
tracing::warn!(?error, "stream closed, disconnecting");
return;
}
}
}
| {
// send initial control stream init to client
control_server::send_client_stream_init(tunnel_stream.clone()).await;
// now read from stream and forward to clients
let mut buf = [0; 1024];
loop {
// client is no longer connected
if Connections::get(&tunnel_stream.client.id).is_none() {
debug!("client disconnected, closing stream");
let _ = tunnel_stream.tx.send(StreamMessage::NoClientTunnel).await;
tunnel_stream.tx.close_channel();
return;
}
// read from stream
let n = match tcp_stream.read(&mut buf).await {
Ok(n) => n,
Err(e) => { | identifier_body |
remote.rs | use super::*;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::io::{ReadHalf, WriteHalf};
use tokio::net::TcpStream;
use tracing::debug;
use tracing::{error, Instrument};
async fn direct_to_control(mut incoming: TcpStream) {
let mut control_socket =
match TcpStream::connect(format!("localhost:{}", CONFIG.control_port)).await {
Ok(s) => s,
Err(error) => {
tracing::warn!(?error, "failed to connect to local control server");
return;
}
};
let (mut control_r, mut control_w) = control_socket.split();
let (mut incoming_r, mut incoming_w) = incoming.split();
let join_1 = tokio::io::copy(&mut control_r, &mut incoming_w);
let join_2 = tokio::io::copy(&mut incoming_r, &mut control_w);
match futures::future::join(join_1, join_2).await {
(Ok(_), Ok(_)) => {}
(Err(error), _) | (_, Err(error)) => {
tracing::error!(?error, "directing stream to control failed");
}
}
}
#[tracing::instrument(skip(socket))]
pub async fn accept_connection(socket: TcpStream) {
// peek the host of the http request
// if health check, then handle it and return
let StreamWithPeekedHost {
mut socket,
host,
forwarded_for,
} = match peek_http_request_host(socket).await {
Some(s) => s,
None => return,
};
tracing::info!(%host, %forwarded_for, "new remote connection");
// parse the host string and find our client
if CONFIG.allowed_hosts.contains(&host) {
error!("redirect to homepage");
let _ = socket.write_all(HTTP_REDIRECT_RESPONSE).await;
return;
}
let host = match validate_host_prefix(&host) {
Some(sub_domain) => sub_domain,
None => {
error!("invalid host specified");
let _ = socket.write_all(HTTP_INVALID_HOST_RESPONSE).await;
return;
}
};
// Special case -- we redirect this tcp connection to the control server
if host.as_str() == "wormhole" {
direct_to_control(socket).await;
return;
}
// find the client listening for this host
let client = match Connections::find_by_host(&host) {
Some(client) => client.clone(),
None => {
// check other instances that may be serving this host
match network::instance_for_host(&host).await {
Ok((instance, _)) => {
network::proxy_stream(instance, socket).await;
return;
}
Err(network::Error::DoesNotServeHost) => {
error!(%host, "no tunnel found");
let _ = socket.write_all(HTTP_NOT_FOUND_RESPONSE).await;
return;
}
Err(error) => {
error!(%host,?error, "failed to find instance");
let _ = socket.write_all(HTTP_ERROR_LOCATING_HOST_RESPONSE).await;
return;
}
}
}
};
// allocate a new stream for this request
let (active_stream, queue_rx) = ActiveStream::new(client.clone());
let stream_id = active_stream.id.clone();
tracing::debug!(
stream_id = %active_stream.id.to_string(),
"new stream connected"
);
let (stream, sink) = tokio::io::split(socket);
// add our stream
ACTIVE_STREAMS.insert(stream_id.clone(), active_stream.clone());
// read from socket, write to client
let span = observability::remote_trace("process_tcp_stream");
tokio::spawn(
async move {
process_tcp_stream(active_stream, stream).await;
}
.instrument(span),
);
// read from client, write to socket
let span = observability::remote_trace("tunnel_to_stream");
tokio::spawn(
async move {
tunnel_to_stream(host, stream_id, sink, queue_rx).await;
}
.instrument(span),
);
}
fn validate_host_prefix(host: &str) -> Option<String> {
let url = format!("http://{}", host);
let host = match url::Url::parse(&url)
.map(|u| u.host().map(|h| h.to_owned()))
.unwrap_or(None)
{
Some(domain) => domain.to_string(),
None => {
error!("invalid host header");
return None;
}
};
let domain_segments = host.split(".").collect::<Vec<&str>>();
let prefix = &domain_segments[0];
let remaining = &domain_segments[1..].join(".");
if CONFIG.allowed_hosts.contains(remaining) {
Some(prefix.to_string())
} else {
None
}
}
/// Response Constants
const HTTP_REDIRECT_RESPONSE:&'static [u8] = b"HTTP/1.1 301 Moved Permanently\r\nLocation: https://tunnelto.dev/\r\nContent-Length: 20\r\n\r\nhttps://tunnelto.dev";
const HTTP_INVALID_HOST_RESPONSE: &'static [u8] =
b"HTTP/1.1 400\r\nContent-Length: 23\r\n\r\nError: Invalid Hostname";
const HTTP_NOT_FOUND_RESPONSE: &'static [u8] =
b"HTTP/1.1 404\r\nContent-Length: 23\r\n\r\nError: Tunnel Not Found";
const HTTP_ERROR_LOCATING_HOST_RESPONSE: &'static [u8] =
b"HTTP/1.1 500\r\nContent-Length: 27\r\n\r\nError: Error finding tunnel";
const HTTP_TUNNEL_REFUSED_RESPONSE: &'static [u8] =
b"HTTP/1.1 500\r\nContent-Length: 32\r\n\r\nTunnel says: connection refused.";
const HTTP_OK_RESPONSE: &'static [u8] = b"HTTP/1.1 200 OK\r\nContent-Length: 2\r\n\r\nok";
const HEALTH_CHECK_PATH: &'static [u8] = b"/0xDEADBEEF_HEALTH_CHECK";
struct StreamWithPeekedHost {
socket: TcpStream,
host: String,
forwarded_for: String,
}
/// Filter incoming remote streams
#[tracing::instrument(skip(socket))]
async fn peek_http_request_host(mut socket: TcpStream) -> Option<StreamWithPeekedHost> {
/// Note we return out if the host header is not found
/// within the first 4kb of the request.
const MAX_HEADER_PEAK: usize = 4096;
let mut buf = vec![0; MAX_HEADER_PEAK]; //1kb
tracing::debug!("checking stream headers");
let n = match socket.peek(&mut buf).await {
Ok(n) => n,
Err(e) => {
error!("failed to read from tcp socket to determine host: {:?}", e);
return None;
}
};
// make sure we're not peeking the same header bytes
if n == 0 {
tracing::debug!("unable to peek header bytes");
return None;
}
tracing::debug!("peeked {} stream bytes ", n);
let mut headers = [httparse::EMPTY_HEADER; 64]; // 30 seems like a generous # of headers
let mut req = httparse::Request::new(&mut headers);
if let Err(e) = req.parse(&buf[..n]) {
error!("failed to parse incoming http bytes: {:?}", e);
return None;
}
// Handle the health check route
if req.path.map(|s| s.as_bytes()) == Some(HEALTH_CHECK_PATH) {
let _ = socket.write_all(HTTP_OK_RESPONSE).await.map_err(|e| {
error!("failed to write health_check: {:?}", e);
});
return None;
}
// get the ip addr in the header
let forwarded_for = if let Some(Ok(forwarded_for)) = req
.headers
.iter()
.filter(|h| h.name.to_lowercase() == "x-forwarded-for".to_string())
.map(|h| std::str::from_utf8(h.value))
.next()
{
forwarded_for.to_string()
} else { | // look for a host header
if let Some(Ok(host)) = req
.headers
.iter()
.filter(|h| h.name.to_lowercase() == "host".to_string())
.map(|h| std::str::from_utf8(h.value))
.next()
{
tracing::info!(host=%host, path=%req.path.unwrap_or_default(), "peek request");
return Some(StreamWithPeekedHost {
socket,
host: host.to_string(),
forwarded_for,
});
}
tracing::info!("found no host header, dropping connection.");
None
}
/// Process Messages from the control path in & out of the remote stream
#[tracing::instrument(skip(tunnel_stream, tcp_stream))]
async fn process_tcp_stream(mut tunnel_stream: ActiveStream, mut tcp_stream: ReadHalf<TcpStream>) {
// send initial control stream init to client
control_server::send_client_stream_init(tunnel_stream.clone()).await;
// now read from stream and forward to clients
let mut buf = [0; 1024];
loop {
// client is no longer connected
if Connections::get(&tunnel_stream.client.id).is_none() {
debug!("client disconnected, closing stream");
let _ = tunnel_stream.tx.send(StreamMessage::NoClientTunnel).await;
tunnel_stream.tx.close_channel();
return;
}
// read from stream
let n = match tcp_stream.read(&mut buf).await {
Ok(n) => n,
Err(e) => {
error!("failed to read from tcp socket: {:?}", e);
return;
}
};
if n == 0 {
debug!("stream ended");
let _ = tunnel_stream
.client
.tx
.send(ControlPacket::End(tunnel_stream.id.clone()))
.await
.map_err(|e| {
error!("failed to send end signal: {:?}", e);
});
return;
}
debug!("read {} bytes", n);
let data = &buf[..n];
let packet = ControlPacket::Data(tunnel_stream.id.clone(), data.to_vec());
match tunnel_stream.client.tx.send(packet.clone()).await {
Ok(_) => debug!(client_id = %tunnel_stream.client.id, "sent data packet to client"),
Err(_) => {
error!("failed to forward tcp packets to disconnected client. dropping client.");
Connections::remove(&tunnel_stream.client);
}
}
}
}
#[tracing::instrument(skip(sink, stream_id, queue))]
async fn tunnel_to_stream(
subdomain: String,
stream_id: StreamId,
mut sink: WriteHalf<TcpStream>,
mut queue: UnboundedReceiver<StreamMessage>,
) {
loop {
let result = queue.next().await;
let result = if let Some(message) = result {
match message {
StreamMessage::Data(data) => Some(data),
StreamMessage::TunnelRefused => {
tracing::debug!(?stream_id, "tunnel refused");
let _ = sink.write_all(HTTP_TUNNEL_REFUSED_RESPONSE).await;
None
}
StreamMessage::NoClientTunnel => {
tracing::info!(%subdomain,?stream_id, "client tunnel not found");
let _ = sink.write_all(HTTP_NOT_FOUND_RESPONSE).await;
None
}
}
} else {
None
};
let data = match result {
Some(data) => data,
None => {
tracing::debug!("done tunneling to sink");
let _ = sink.shutdown().await.map_err(|_e| {
error!("error shutting down tcp stream");
});
ACTIVE_STREAMS.remove(&stream_id);
return;
}
};
let result = sink.write_all(&data).await;
if let Some(error) = result.err() {
tracing::warn!(?error, "stream closed, disconnecting");
return;
}
}
} | String::default()
};
| random_line_split |
remote.rs | use super::*;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::io::{ReadHalf, WriteHalf};
use tokio::net::TcpStream;
use tracing::debug;
use tracing::{error, Instrument};
async fn direct_to_control(mut incoming: TcpStream) {
let mut control_socket =
match TcpStream::connect(format!("localhost:{}", CONFIG.control_port)).await {
Ok(s) => s,
Err(error) => {
tracing::warn!(?error, "failed to connect to local control server");
return;
}
};
let (mut control_r, mut control_w) = control_socket.split();
let (mut incoming_r, mut incoming_w) = incoming.split();
let join_1 = tokio::io::copy(&mut control_r, &mut incoming_w);
let join_2 = tokio::io::copy(&mut incoming_r, &mut control_w);
match futures::future::join(join_1, join_2).await {
(Ok(_), Ok(_)) => {}
(Err(error), _) | (_, Err(error)) => {
tracing::error!(?error, "directing stream to control failed");
}
}
}
#[tracing::instrument(skip(socket))]
pub async fn accept_connection(socket: TcpStream) {
// peek the host of the http request
// if health check, then handle it and return
let StreamWithPeekedHost {
mut socket,
host,
forwarded_for,
} = match peek_http_request_host(socket).await {
Some(s) => s,
None => return,
};
tracing::info!(%host, %forwarded_for, "new remote connection");
// parse the host string and find our client
if CONFIG.allowed_hosts.contains(&host) {
error!("redirect to homepage");
let _ = socket.write_all(HTTP_REDIRECT_RESPONSE).await;
return;
}
let host = match validate_host_prefix(&host) {
Some(sub_domain) => sub_domain,
None => {
error!("invalid host specified");
let _ = socket.write_all(HTTP_INVALID_HOST_RESPONSE).await;
return;
}
};
// Special case -- we redirect this tcp connection to the control server
if host.as_str() == "wormhole" {
direct_to_control(socket).await;
return;
}
// find the client listening for this host
let client = match Connections::find_by_host(&host) {
Some(client) => client.clone(),
None => {
// check other instances that may be serving this host
match network::instance_for_host(&host).await {
Ok((instance, _)) => {
network::proxy_stream(instance, socket).await;
return;
}
Err(network::Error::DoesNotServeHost) => {
error!(%host, "no tunnel found");
let _ = socket.write_all(HTTP_NOT_FOUND_RESPONSE).await;
return;
}
Err(error) => {
error!(%host,?error, "failed to find instance");
let _ = socket.write_all(HTTP_ERROR_LOCATING_HOST_RESPONSE).await;
return;
}
}
}
};
// allocate a new stream for this request
let (active_stream, queue_rx) = ActiveStream::new(client.clone());
let stream_id = active_stream.id.clone();
tracing::debug!(
stream_id = %active_stream.id.to_string(),
"new stream connected"
);
let (stream, sink) = tokio::io::split(socket);
// add our stream
ACTIVE_STREAMS.insert(stream_id.clone(), active_stream.clone());
// read from socket, write to client
let span = observability::remote_trace("process_tcp_stream");
tokio::spawn(
async move {
process_tcp_stream(active_stream, stream).await;
}
.instrument(span),
);
// read from client, write to socket
let span = observability::remote_trace("tunnel_to_stream");
tokio::spawn(
async move {
tunnel_to_stream(host, stream_id, sink, queue_rx).await;
}
.instrument(span),
);
}
fn validate_host_prefix(host: &str) -> Option<String> {
let url = format!("http://{}", host);
let host = match url::Url::parse(&url)
.map(|u| u.host().map(|h| h.to_owned()))
.unwrap_or(None)
{
Some(domain) => domain.to_string(),
None => {
error!("invalid host header");
return None;
}
};
let domain_segments = host.split(".").collect::<Vec<&str>>();
let prefix = &domain_segments[0];
let remaining = &domain_segments[1..].join(".");
if CONFIG.allowed_hosts.contains(remaining) {
Some(prefix.to_string())
} else {
None
}
}
/// Response Constants
const HTTP_REDIRECT_RESPONSE:&'static [u8] = b"HTTP/1.1 301 Moved Permanently\r\nLocation: https://tunnelto.dev/\r\nContent-Length: 20\r\n\r\nhttps://tunnelto.dev";
const HTTP_INVALID_HOST_RESPONSE: &'static [u8] =
b"HTTP/1.1 400\r\nContent-Length: 23\r\n\r\nError: Invalid Hostname";
const HTTP_NOT_FOUND_RESPONSE: &'static [u8] =
b"HTTP/1.1 404\r\nContent-Length: 23\r\n\r\nError: Tunnel Not Found";
const HTTP_ERROR_LOCATING_HOST_RESPONSE: &'static [u8] =
b"HTTP/1.1 500\r\nContent-Length: 27\r\n\r\nError: Error finding tunnel";
const HTTP_TUNNEL_REFUSED_RESPONSE: &'static [u8] =
b"HTTP/1.1 500\r\nContent-Length: 32\r\n\r\nTunnel says: connection refused.";
const HTTP_OK_RESPONSE: &'static [u8] = b"HTTP/1.1 200 OK\r\nContent-Length: 2\r\n\r\nok";
const HEALTH_CHECK_PATH: &'static [u8] = b"/0xDEADBEEF_HEALTH_CHECK";
struct StreamWithPeekedHost {
socket: TcpStream,
host: String,
forwarded_for: String,
}
/// Filter incoming remote streams
#[tracing::instrument(skip(socket))]
async fn peek_http_request_host(mut socket: TcpStream) -> Option<StreamWithPeekedHost> {
/// Note we return out if the host header is not found
/// within the first 4kb of the request.
const MAX_HEADER_PEAK: usize = 4096;
let mut buf = vec![0; MAX_HEADER_PEAK]; //1kb
tracing::debug!("checking stream headers");
let n = match socket.peek(&mut buf).await {
Ok(n) => n,
Err(e) => {
error!("failed to read from tcp socket to determine host: {:?}", e);
return None;
}
};
// make sure we're not peeking the same header bytes
if n == 0 {
tracing::debug!("unable to peek header bytes");
return None;
}
tracing::debug!("peeked {} stream bytes ", n);
let mut headers = [httparse::EMPTY_HEADER; 64]; // 30 seems like a generous # of headers
let mut req = httparse::Request::new(&mut headers);
if let Err(e) = req.parse(&buf[..n]) {
error!("failed to parse incoming http bytes: {:?}", e);
return None;
}
// Handle the health check route
if req.path.map(|s| s.as_bytes()) == Some(HEALTH_CHECK_PATH) {
let _ = socket.write_all(HTTP_OK_RESPONSE).await.map_err(|e| {
error!("failed to write health_check: {:?}", e);
});
return None;
}
// get the ip addr in the header
let forwarded_for = if let Some(Ok(forwarded_for)) = req
.headers
.iter()
.filter(|h| h.name.to_lowercase() == "x-forwarded-for".to_string())
.map(|h| std::str::from_utf8(h.value))
.next()
{
forwarded_for.to_string()
} else {
String::default()
};
// look for a host header
if let Some(Ok(host)) = req
.headers
.iter()
.filter(|h| h.name.to_lowercase() == "host".to_string())
.map(|h| std::str::from_utf8(h.value))
.next()
{
tracing::info!(host=%host, path=%req.path.unwrap_or_default(), "peek request");
return Some(StreamWithPeekedHost {
socket,
host: host.to_string(),
forwarded_for,
});
}
tracing::info!("found no host header, dropping connection.");
None
}
/// Process Messages from the control path in & out of the remote stream
#[tracing::instrument(skip(tunnel_stream, tcp_stream))]
async fn | (mut tunnel_stream: ActiveStream, mut tcp_stream: ReadHalf<TcpStream>) {
// send initial control stream init to client
control_server::send_client_stream_init(tunnel_stream.clone()).await;
// now read from stream and forward to clients
let mut buf = [0; 1024];
loop {
// client is no longer connected
if Connections::get(&tunnel_stream.client.id).is_none() {
debug!("client disconnected, closing stream");
let _ = tunnel_stream.tx.send(StreamMessage::NoClientTunnel).await;
tunnel_stream.tx.close_channel();
return;
}
// read from stream
let n = match tcp_stream.read(&mut buf).await {
Ok(n) => n,
Err(e) => {
error!("failed to read from tcp socket: {:?}", e);
return;
}
};
if n == 0 {
debug!("stream ended");
let _ = tunnel_stream
.client
.tx
.send(ControlPacket::End(tunnel_stream.id.clone()))
.await
.map_err(|e| {
error!("failed to send end signal: {:?}", e);
});
return;
}
debug!("read {} bytes", n);
let data = &buf[..n];
let packet = ControlPacket::Data(tunnel_stream.id.clone(), data.to_vec());
match tunnel_stream.client.tx.send(packet.clone()).await {
Ok(_) => debug!(client_id = %tunnel_stream.client.id, "sent data packet to client"),
Err(_) => {
error!("failed to forward tcp packets to disconnected client. dropping client.");
Connections::remove(&tunnel_stream.client);
}
}
}
}
#[tracing::instrument(skip(sink, stream_id, queue))]
async fn tunnel_to_stream(
subdomain: String,
stream_id: StreamId,
mut sink: WriteHalf<TcpStream>,
mut queue: UnboundedReceiver<StreamMessage>,
) {
loop {
let result = queue.next().await;
let result = if let Some(message) = result {
match message {
StreamMessage::Data(data) => Some(data),
StreamMessage::TunnelRefused => {
tracing::debug!(?stream_id, "tunnel refused");
let _ = sink.write_all(HTTP_TUNNEL_REFUSED_RESPONSE).await;
None
}
StreamMessage::NoClientTunnel => {
tracing::info!(%subdomain,?stream_id, "client tunnel not found");
let _ = sink.write_all(HTTP_NOT_FOUND_RESPONSE).await;
None
}
}
} else {
None
};
let data = match result {
Some(data) => data,
None => {
tracing::debug!("done tunneling to sink");
let _ = sink.shutdown().await.map_err(|_e| {
error!("error shutting down tcp stream");
});
ACTIVE_STREAMS.remove(&stream_id);
return;
}
};
let result = sink.write_all(&data).await;
if let Some(error) = result.err() {
tracing::warn!(?error, "stream closed, disconnecting");
return;
}
}
}
| process_tcp_stream | identifier_name |
trig.rs | /*
This file is part of trig-rs, a library for doing typesafe trigonometry
with a variety of angle formats (radians, degrees, grad, turns, and so on).
*/
//! # `trig-rs`: Typesafe Trigonometric Primitives
//!
//! Leverage Rust's super-powered enums to create a typesafe system for
//! trigonometry in degrees, radians, and more.
//!
//! The code is hosted on [GitHub](https://github.com/atheriel/trig-rs), and a
//! copy of the documentation should be available at
//! [Rust-CI](http://www.rust-ci.org/atheriel/trig-rs/doc/trig/).
//!
//! ## Examples
//!
//! ```rust
//! use trig::{Angle, Rad, sin, cos};
//!
//! // Angle can be constructed in both common formats:
//! let angle1: Angle<f64> = Angle::degrees(180.0);
//! let angle2: Angle<f64> = Angle::radians(Float::pi());
//!
//! // As well as some more estoric ones:
//! let angle3: Angle<f64> = Angle::gradians(200.0);
//! let angle4: Angle<f64> = Angle::turns(0.5);
//!
//! // And convert between them seemlessly:
//! match angle4.to_radians() {
//! Rad(val) => println!("0.5 turns is {}!", Rad(val)),
//! _ => fail!("But I wanted radians!")
//! }
//!
//! // We can use the top-level trigonometric functions on any of them:
//! assert_eq!(sin(angle1), sin(angle2));
//! assert_eq!(cos(angle3), cos(angle4));
//!
//! // We can also concatenate angles using Rust's + and - syntax, which will
//! // automatically handle conversion between different angle formats:
//! assert_eq!(angle1 + angle2, angle1 + angle3);
//!
//! // Note that angles are guaranteed to fall in the domains you'd expect
//! // them to:
//! assert_eq!(angle1, angle1 + angle1 + angle1)
//! ```
#![crate_name = "trig"]
#![comment = "Provides trigonometric primitives."]
#![crate_type = "dylib"]
#![crate_type = "rlib"]
#![unstable]
#![feature(macro_rules)]
#![feature(struct_variant)]
use std::fmt;
/*
Top-level functions.
*/
/// Calculate the sine.
#[stable] #[inline] pub fn sin<S: BaseFloat, T: Trigonometry<S>>(t: T) -> S |
/// Calculate the cosine.
#[stable] #[inline] pub fn cos<S: BaseFloat, T: Trigonometry<S>>(t: T) -> S { t.cos() }
/// Calculate the tangent.
#[stable] #[inline] pub fn tan<S: BaseFloat, T: Trigonometry<S>>(t: T) -> S { t.tan() }
/// Calculate the arcsine (in radians).
#[inline] pub fn asin<S: BaseFloat>(s: S) -> Angle<S> { Angle::radians(s.asin()) }
/// Calculate the arccosine (in radians).
#[inline] pub fn acos<S: BaseFloat>(s: S) -> Angle<S> { Angle::radians(s.acos()) }
/// Calculate the arctangent (in radians).
#[inline] pub fn atan<S: BaseFloat>(s: S) -> Angle<S> { Angle::radians(s.atan()) }
/*
The Trigonometry trait.
*/
/// Represents an object for which trigonometric methods are sensible and return
/// values of type `S`.
#[stable]
pub trait Trigonometry<S> {
/// Compute the sine of the object.
fn sin(&self) -> S;
/// Compute the cosine of the object.
fn cos(&self) -> S;
/// Compute the tangent of the object.
fn tan(&self) -> S;
// /// Compute the cosecant of the object.
// fn csc(&self) -> S;
// /// Compute the secant of the object.
// fn sec(&self) -> S;
// /// Compute the cotangent of the object.
// fn cot(&self) -> S;
}
/*
The Angle enum and its implementations.
*/
/// Base floating point types
pub trait BaseFloat: Primitive + FromPrimitive + fmt::Show + fmt::Float + Float + FloatMath {}
impl BaseFloat for f32 {}
impl BaseFloat for f64 {}
/// Encompasses representations of angles in the Euclidean plane.
#[deriving(Clone, PartialEq, PartialOrd, Hash)]
pub enum Angle<S> {
/// An angle in radians.
#[stable] Rad(S),
/// An angle in degrees.
#[stable] Deg(S),
/// An angle in [gradians](http://en.wikipedia.org/wiki/Grad_(angle)).
#[stable] Grad(S),
/// An angle in [turns](http://en.wikipedia.org/wiki/Turn_(geometry)).
#[stable] Turn(S),
/// An angle as it would appear on the face of a clock.
#[experimental] Clock {
/// The hours portion.
pub hour: S,
/// The minutes portion.
pub minute: S,
/// The seconds portion.
pub second: S
},
}
impl<S: BaseFloat + Mul<S, S> + Div<S, S> + Rem<S, S>> Angle<S> {
/// Returns an angle in radians.
pub fn radians(s: S) -> Angle<S> { Rad(s % Float::two_pi()) }
/// Returns an angle in degrees.
pub fn degrees(s: S) -> Angle<S> { Deg(s % FromPrimitive::from_f64(360.0).unwrap()) }
/// Returns an angle in gradians.
pub fn gradians(s: S) -> Angle<S> { Grad(s % FromPrimitive::from_f64(400.0).unwrap()) }
/// Returns an angle in turns.
pub fn turns(s: S) -> Angle<S> { Turn(s.fract()) }
/// Returns an angle as it would appear on a clock.
pub fn clock_face(hour: S, minute: S, second: S) -> Angle<S> {
Clock { hour: hour, minute: minute, second: second }
}
/// Converts an angle to radians.
pub fn to_radians(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::radians(val),
&Deg(val) => Angle::radians(val.to_radians()),
&Grad(val) => Angle::radians(val * Float::pi() / FromPrimitive::from_f64(200.0).unwrap()),
&Turn(val) => Angle::radians(val * Float::two_pi()),
_ => unimplemented!()
}
}
/// Converts an angle to degrees.
pub fn to_degrees(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::degrees(val.to_degrees()),
&Deg(val) => Angle::degrees(val),
&Grad(val) => Angle::degrees(val * FromPrimitive::from_f64(360.0 / 400.0).unwrap()),
&Turn(val) => Angle::degrees(val * FromPrimitive::from_f64(360.0).unwrap()),
_ => unimplemented!()
}
}
/// Converts an angle to gradians.
pub fn to_gradians(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::gradians(val / Float::pi() * FromPrimitive::from_f64(200.0).unwrap()),
&Deg(val) => Angle::gradians(val * FromPrimitive::from_f64(400.0 / 360.0).unwrap()),
&Grad(val) => Angle::gradians(val),
&Turn(val) => Angle::gradians(val * FromPrimitive::from_f64(400.0).unwrap()),
_ => unimplemented!()
}
}
/// Converts an angle to turns.
pub fn to_turns(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::turns(val / Float::two_pi()),
&Deg(val) => Angle::turns(val / FromPrimitive::from_f64(360.0).unwrap()),
&Grad(val) => Angle::turns(val / FromPrimitive::from_f64(400.0).unwrap()),
&Turn(val) => Angle::turns(val),
_ => unimplemented!()
}
}
/// One half of the domain. In radians, this is `π`.
pub fn half() -> Angle<S> { Rad(Float::pi()) }
/// One quarter of the domain. In radians, this is `π/2`.
pub fn quarter() -> Angle<S> { Rad(Float::frac_pi_2()) }
/// One sixth of the domain. In radians, this is `π/3`.
pub fn sixth() -> Angle<S> { Rad(Float::frac_pi_3()) }
/// One eighth of the domain. In radians, this is `π/4`.
pub fn eighth() -> Angle<S> { Rad(Float::frac_pi_4()) }
/// Gets the raw value that is stored in the angle.
///
/// ## Failure
///
/// Clock-valued angles are not encoded as a single value, and so this
/// method will always fail for them.
pub fn unwrap(&self) -> S {
match self {
&Rad(s)|&Deg(s)|&Grad(s)|&Turn(s) => s,
_ => fail!("Clock values cannot be unwrapped.")
}
}
}
impl<S: BaseFloat> Add<Angle<S>, Angle<S>> for Angle<S> {
#[inline]
fn add(&self, other: &Angle<S>) -> Angle<S> {
match (self, other) {
(&Rad(val), othr) => Angle::radians(val + othr.to_radians().unwrap()),
(&Deg(val), othr) => Angle::degrees(val + othr.to_degrees().unwrap()),
(&Grad(val), othr) => Angle::gradians(val + othr.to_gradians().unwrap()),
(&Turn(val), othr) => Angle::turns(val + othr.to_turns().unwrap()),
_ => unimplemented!()
}
}
}
impl<S: BaseFloat> Sub<Angle<S>, Angle<S>> for Angle<S> {
#[inline]
fn sub(&self, other: &Angle<S>) -> Angle<S> {
match (self, other) {
(&Rad(val), othr) => Angle::radians(val - othr.to_radians().unwrap()),
(&Deg(val), othr) => Angle::degrees(val - othr.to_degrees().unwrap()),
(&Grad(val), othr) => Angle::gradians(val - othr.to_gradians().unwrap()),
(&Turn(val), othr) => Angle::turns(val - othr.to_turns().unwrap()),
_ => unimplemented!()
}
}
}
impl<S: BaseFloat + fmt::Show> fmt::Show for Angle<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&Deg(val) => write!(f, "{}°", val),
&Rad(val) => write!(f, "{} rad", val),
&Grad(val) => write!(f, "{} gon", val),
&Turn(val) => write!(f, "{} turns", val),
_ => fail!("Not yet implemented.")
}
}
}
macro_rules! angle_trigonometry (
($($method:ident),+ ) => (
impl<S: BaseFloat> Trigonometry<S> for Angle<S> {
$(fn $method(&self) -> S {
match self {
&Rad(val) => val.$method(),
&other => other.to_radians().$method()
}
}
)+
}
)
)
angle_trigonometry!(sin, cos, tan)
/*
Test suite.
*/
#[cfg(test)]
mod test {
use super::Angle;
#[test]
fn test_conversion() {
let half: Angle<f64> = Angle::half();
assert_eq!(half.to_degrees().to_gradians().to_turns().to_radians(), half);
assert_eq!(half.to_turns().to_gradians().to_degrees().to_radians(), half);
assert_eq!(half.to_degrees().to_turns().to_gradians().to_radians(), half);
assert_eq!(half.to_gradians().to_radians(), half);
}
#[test]
fn test_operators() {
assert_eq!(Angle::degrees(100.0f64) + Angle::degrees(100.0f64), Angle::degrees(200.0f64));
assert_eq!(Angle::degrees(100.0f64) - Angle::degrees(100.0f64), Angle::degrees(0.0f64));
assert_eq!(Angle::degrees(100.0f64) + Angle::radians(0.0f64), Angle::degrees(100.0f64));
assert_eq!(Angle::radians(1.0f64) - Angle::degrees(0.0f64), Angle::radians(1.0f64));
}
}
| { t.sin() } | identifier_body |
trig.rs | /*
This file is part of trig-rs, a library for doing typesafe trigonometry
with a variety of angle formats (radians, degrees, grad, turns, and so on).
*/
//! # `trig-rs`: Typesafe Trigonometric Primitives
//!
//! Leverage Rust's super-powered enums to create a typesafe system for
//! trigonometry in degrees, radians, and more.
//!
//! The code is hosted on [GitHub](https://github.com/atheriel/trig-rs), and a
//! copy of the documentation should be available at
//! [Rust-CI](http://www.rust-ci.org/atheriel/trig-rs/doc/trig/).
//!
//! ## Examples
//!
//! ```rust
//! use trig::{Angle, Rad, sin, cos};
//!
//! // Angle can be constructed in both common formats:
//! let angle1: Angle<f64> = Angle::degrees(180.0);
//! let angle2: Angle<f64> = Angle::radians(Float::pi());
//!
//! // As well as some more estoric ones:
//! let angle3: Angle<f64> = Angle::gradians(200.0);
//! let angle4: Angle<f64> = Angle::turns(0.5);
//!
//! // And convert between them seemlessly:
//! match angle4.to_radians() {
//! Rad(val) => println!("0.5 turns is {}!", Rad(val)),
//! _ => fail!("But I wanted radians!")
//! }
//!
//! // We can use the top-level trigonometric functions on any of them:
//! assert_eq!(sin(angle1), sin(angle2));
//! assert_eq!(cos(angle3), cos(angle4));
//!
//! // We can also concatenate angles using Rust's + and - syntax, which will
//! // automatically handle conversion between different angle formats:
//! assert_eq!(angle1 + angle2, angle1 + angle3);
//!
//! // Note that angles are guaranteed to fall in the domains you'd expect
//! // them to:
//! assert_eq!(angle1, angle1 + angle1 + angle1)
//! ```
#![crate_name = "trig"]
#![comment = "Provides trigonometric primitives."]
#![crate_type = "dylib"]
#![crate_type = "rlib"]
#![unstable]
#![feature(macro_rules)]
#![feature(struct_variant)]
use std::fmt;
/*
Top-level functions.
*/
/// Calculate the sine.
#[stable] #[inline] pub fn sin<S: BaseFloat, T: Trigonometry<S>>(t: T) -> S { t.sin() }
/// Calculate the cosine.
#[stable] #[inline] pub fn cos<S: BaseFloat, T: Trigonometry<S>>(t: T) -> S { t.cos() }
/// Calculate the tangent.
#[stable] #[inline] pub fn tan<S: BaseFloat, T: Trigonometry<S>>(t: T) -> S { t.tan() }
/// Calculate the arcsine (in radians).
#[inline] pub fn asin<S: BaseFloat>(s: S) -> Angle<S> { Angle::radians(s.asin()) }
/// Calculate the arccosine (in radians).
#[inline] pub fn acos<S: BaseFloat>(s: S) -> Angle<S> { Angle::radians(s.acos()) }
/// Calculate the arctangent (in radians).
#[inline] pub fn atan<S: BaseFloat>(s: S) -> Angle<S> { Angle::radians(s.atan()) }
/*
The Trigonometry trait.
*/
/// Represents an object for which trigonometric methods are sensible and return
/// values of type `S`.
#[stable]
pub trait Trigonometry<S> {
/// Compute the sine of the object.
fn sin(&self) -> S;
/// Compute the cosine of the object.
fn cos(&self) -> S;
/// Compute the tangent of the object.
fn tan(&self) -> S;
// /// Compute the cosecant of the object.
// fn csc(&self) -> S;
// /// Compute the secant of the object.
// fn sec(&self) -> S;
// /// Compute the cotangent of the object.
// fn cot(&self) -> S;
}
/*
The Angle enum and its implementations.
*/
/// Base floating point types
pub trait BaseFloat: Primitive + FromPrimitive + fmt::Show + fmt::Float + Float + FloatMath {}
impl BaseFloat for f32 {}
impl BaseFloat for f64 {}
/// Encompasses representations of angles in the Euclidean plane.
#[deriving(Clone, PartialEq, PartialOrd, Hash)]
pub enum Angle<S> {
/// An angle in radians.
#[stable] Rad(S),
/// An angle in degrees.
#[stable] Deg(S),
/// An angle in [gradians](http://en.wikipedia.org/wiki/Grad_(angle)).
#[stable] Grad(S),
/// An angle in [turns](http://en.wikipedia.org/wiki/Turn_(geometry)).
#[stable] Turn(S),
/// An angle as it would appear on the face of a clock.
#[experimental] Clock {
/// The hours portion.
pub hour: S,
/// The minutes portion.
pub minute: S,
/// The seconds portion.
pub second: S
},
}
impl<S: BaseFloat + Mul<S, S> + Div<S, S> + Rem<S, S>> Angle<S> {
/// Returns an angle in radians.
pub fn | (s: S) -> Angle<S> { Rad(s % Float::two_pi()) }
/// Returns an angle in degrees.
pub fn degrees(s: S) -> Angle<S> { Deg(s % FromPrimitive::from_f64(360.0).unwrap()) }
/// Returns an angle in gradians.
pub fn gradians(s: S) -> Angle<S> { Grad(s % FromPrimitive::from_f64(400.0).unwrap()) }
/// Returns an angle in turns.
pub fn turns(s: S) -> Angle<S> { Turn(s.fract()) }
/// Returns an angle as it would appear on a clock.
pub fn clock_face(hour: S, minute: S, second: S) -> Angle<S> {
Clock { hour: hour, minute: minute, second: second }
}
/// Converts an angle to radians.
pub fn to_radians(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::radians(val),
&Deg(val) => Angle::radians(val.to_radians()),
&Grad(val) => Angle::radians(val * Float::pi() / FromPrimitive::from_f64(200.0).unwrap()),
&Turn(val) => Angle::radians(val * Float::two_pi()),
_ => unimplemented!()
}
}
/// Converts an angle to degrees.
pub fn to_degrees(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::degrees(val.to_degrees()),
&Deg(val) => Angle::degrees(val),
&Grad(val) => Angle::degrees(val * FromPrimitive::from_f64(360.0 / 400.0).unwrap()),
&Turn(val) => Angle::degrees(val * FromPrimitive::from_f64(360.0).unwrap()),
_ => unimplemented!()
}
}
/// Converts an angle to gradians.
pub fn to_gradians(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::gradians(val / Float::pi() * FromPrimitive::from_f64(200.0).unwrap()),
&Deg(val) => Angle::gradians(val * FromPrimitive::from_f64(400.0 / 360.0).unwrap()),
&Grad(val) => Angle::gradians(val),
&Turn(val) => Angle::gradians(val * FromPrimitive::from_f64(400.0).unwrap()),
_ => unimplemented!()
}
}
/// Converts an angle to turns.
pub fn to_turns(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::turns(val / Float::two_pi()),
&Deg(val) => Angle::turns(val / FromPrimitive::from_f64(360.0).unwrap()),
&Grad(val) => Angle::turns(val / FromPrimitive::from_f64(400.0).unwrap()),
&Turn(val) => Angle::turns(val),
_ => unimplemented!()
}
}
/// One half of the domain. In radians, this is `π`.
pub fn half() -> Angle<S> { Rad(Float::pi()) }
/// One quarter of the domain. In radians, this is `π/2`.
pub fn quarter() -> Angle<S> { Rad(Float::frac_pi_2()) }
/// One sixth of the domain. In radians, this is `π/3`.
pub fn sixth() -> Angle<S> { Rad(Float::frac_pi_3()) }
/// One eighth of the domain. In radians, this is `π/4`.
pub fn eighth() -> Angle<S> { Rad(Float::frac_pi_4()) }
/// Gets the raw value that is stored in the angle.
///
/// ## Failure
///
/// Clock-valued angles are not encoded as a single value, and so this
/// method will always fail for them.
pub fn unwrap(&self) -> S {
match self {
&Rad(s)|&Deg(s)|&Grad(s)|&Turn(s) => s,
_ => fail!("Clock values cannot be unwrapped.")
}
}
}
impl<S: BaseFloat> Add<Angle<S>, Angle<S>> for Angle<S> {
#[inline]
fn add(&self, other: &Angle<S>) -> Angle<S> {
match (self, other) {
(&Rad(val), othr) => Angle::radians(val + othr.to_radians().unwrap()),
(&Deg(val), othr) => Angle::degrees(val + othr.to_degrees().unwrap()),
(&Grad(val), othr) => Angle::gradians(val + othr.to_gradians().unwrap()),
(&Turn(val), othr) => Angle::turns(val + othr.to_turns().unwrap()),
_ => unimplemented!()
}
}
}
impl<S: BaseFloat> Sub<Angle<S>, Angle<S>> for Angle<S> {
#[inline]
fn sub(&self, other: &Angle<S>) -> Angle<S> {
match (self, other) {
(&Rad(val), othr) => Angle::radians(val - othr.to_radians().unwrap()),
(&Deg(val), othr) => Angle::degrees(val - othr.to_degrees().unwrap()),
(&Grad(val), othr) => Angle::gradians(val - othr.to_gradians().unwrap()),
(&Turn(val), othr) => Angle::turns(val - othr.to_turns().unwrap()),
_ => unimplemented!()
}
}
}
impl<S: BaseFloat + fmt::Show> fmt::Show for Angle<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&Deg(val) => write!(f, "{}°", val),
&Rad(val) => write!(f, "{} rad", val),
&Grad(val) => write!(f, "{} gon", val),
&Turn(val) => write!(f, "{} turns", val),
_ => fail!("Not yet implemented.")
}
}
}
macro_rules! angle_trigonometry (
($($method:ident),+ ) => (
impl<S: BaseFloat> Trigonometry<S> for Angle<S> {
$(fn $method(&self) -> S {
match self {
&Rad(val) => val.$method(),
&other => other.to_radians().$method()
}
}
)+
}
)
)
angle_trigonometry!(sin, cos, tan)
/*
Test suite.
*/
#[cfg(test)]
mod test {
use super::Angle;
#[test]
fn test_conversion() {
let half: Angle<f64> = Angle::half();
assert_eq!(half.to_degrees().to_gradians().to_turns().to_radians(), half);
assert_eq!(half.to_turns().to_gradians().to_degrees().to_radians(), half);
assert_eq!(half.to_degrees().to_turns().to_gradians().to_radians(), half);
assert_eq!(half.to_gradians().to_radians(), half);
}
#[test]
fn test_operators() {
assert_eq!(Angle::degrees(100.0f64) + Angle::degrees(100.0f64), Angle::degrees(200.0f64));
assert_eq!(Angle::degrees(100.0f64) - Angle::degrees(100.0f64), Angle::degrees(0.0f64));
assert_eq!(Angle::degrees(100.0f64) + Angle::radians(0.0f64), Angle::degrees(100.0f64));
assert_eq!(Angle::radians(1.0f64) - Angle::degrees(0.0f64), Angle::radians(1.0f64));
}
}
| radians | identifier_name |
trig.rs | /*
This file is part of trig-rs, a library for doing typesafe trigonometry
with a variety of angle formats (radians, degrees, grad, turns, and so on).
*/
//! # `trig-rs`: Typesafe Trigonometric Primitives
//!
//! Leverage Rust's super-powered enums to create a typesafe system for
//! trigonometry in degrees, radians, and more. | //! copy of the documentation should be available at
//! [Rust-CI](http://www.rust-ci.org/atheriel/trig-rs/doc/trig/).
//!
//! ## Examples
//!
//! ```rust
//! use trig::{Angle, Rad, sin, cos};
//!
//! // Angle can be constructed in both common formats:
//! let angle1: Angle<f64> = Angle::degrees(180.0);
//! let angle2: Angle<f64> = Angle::radians(Float::pi());
//!
//! // As well as some more estoric ones:
//! let angle3: Angle<f64> = Angle::gradians(200.0);
//! let angle4: Angle<f64> = Angle::turns(0.5);
//!
//! // And convert between them seemlessly:
//! match angle4.to_radians() {
//! Rad(val) => println!("0.5 turns is {}!", Rad(val)),
//! _ => fail!("But I wanted radians!")
//! }
//!
//! // We can use the top-level trigonometric functions on any of them:
//! assert_eq!(sin(angle1), sin(angle2));
//! assert_eq!(cos(angle3), cos(angle4));
//!
//! // We can also concatenate angles using Rust's + and - syntax, which will
//! // automatically handle conversion between different angle formats:
//! assert_eq!(angle1 + angle2, angle1 + angle3);
//!
//! // Note that angles are guaranteed to fall in the domains you'd expect
//! // them to:
//! assert_eq!(angle1, angle1 + angle1 + angle1)
//! ```
#![crate_name = "trig"]
#![comment = "Provides trigonometric primitives."]
#![crate_type = "dylib"]
#![crate_type = "rlib"]
#![unstable]
#![feature(macro_rules)]
#![feature(struct_variant)]
use std::fmt;
/*
Top-level functions.
*/
/// Calculate the sine.
#[stable] #[inline] pub fn sin<S: BaseFloat, T: Trigonometry<S>>(t: T) -> S { t.sin() }
/// Calculate the cosine.
#[stable] #[inline] pub fn cos<S: BaseFloat, T: Trigonometry<S>>(t: T) -> S { t.cos() }
/// Calculate the tangent.
#[stable] #[inline] pub fn tan<S: BaseFloat, T: Trigonometry<S>>(t: T) -> S { t.tan() }
/// Calculate the arcsine (in radians).
#[inline] pub fn asin<S: BaseFloat>(s: S) -> Angle<S> { Angle::radians(s.asin()) }
/// Calculate the arccosine (in radians).
#[inline] pub fn acos<S: BaseFloat>(s: S) -> Angle<S> { Angle::radians(s.acos()) }
/// Calculate the arctangent (in radians).
#[inline] pub fn atan<S: BaseFloat>(s: S) -> Angle<S> { Angle::radians(s.atan()) }
/*
The Trigonometry trait.
*/
/// Represents an object for which trigonometric methods are sensible and return
/// values of type `S`.
#[stable]
pub trait Trigonometry<S> {
/// Compute the sine of the object.
fn sin(&self) -> S;
/// Compute the cosine of the object.
fn cos(&self) -> S;
/// Compute the tangent of the object.
fn tan(&self) -> S;
// /// Compute the cosecant of the object.
// fn csc(&self) -> S;
// /// Compute the secant of the object.
// fn sec(&self) -> S;
// /// Compute the cotangent of the object.
// fn cot(&self) -> S;
}
/*
The Angle enum and its implementations.
*/
/// Base floating point types
pub trait BaseFloat: Primitive + FromPrimitive + fmt::Show + fmt::Float + Float + FloatMath {}
impl BaseFloat for f32 {}
impl BaseFloat for f64 {}
/// Encompasses representations of angles in the Euclidean plane.
#[deriving(Clone, PartialEq, PartialOrd, Hash)]
pub enum Angle<S> {
/// An angle in radians.
#[stable] Rad(S),
/// An angle in degrees.
#[stable] Deg(S),
/// An angle in [gradians](http://en.wikipedia.org/wiki/Grad_(angle)).
#[stable] Grad(S),
/// An angle in [turns](http://en.wikipedia.org/wiki/Turn_(geometry)).
#[stable] Turn(S),
/// An angle as it would appear on the face of a clock.
#[experimental] Clock {
/// The hours portion.
pub hour: S,
/// The minutes portion.
pub minute: S,
/// The seconds portion.
pub second: S
},
}
impl<S: BaseFloat + Mul<S, S> + Div<S, S> + Rem<S, S>> Angle<S> {
/// Returns an angle in radians.
pub fn radians(s: S) -> Angle<S> { Rad(s % Float::two_pi()) }
/// Returns an angle in degrees.
pub fn degrees(s: S) -> Angle<S> { Deg(s % FromPrimitive::from_f64(360.0).unwrap()) }
/// Returns an angle in gradians.
pub fn gradians(s: S) -> Angle<S> { Grad(s % FromPrimitive::from_f64(400.0).unwrap()) }
/// Returns an angle in turns.
pub fn turns(s: S) -> Angle<S> { Turn(s.fract()) }
/// Returns an angle as it would appear on a clock.
pub fn clock_face(hour: S, minute: S, second: S) -> Angle<S> {
Clock { hour: hour, minute: minute, second: second }
}
/// Converts an angle to radians.
pub fn to_radians(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::radians(val),
&Deg(val) => Angle::radians(val.to_radians()),
&Grad(val) => Angle::radians(val * Float::pi() / FromPrimitive::from_f64(200.0).unwrap()),
&Turn(val) => Angle::radians(val * Float::two_pi()),
_ => unimplemented!()
}
}
/// Converts an angle to degrees.
pub fn to_degrees(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::degrees(val.to_degrees()),
&Deg(val) => Angle::degrees(val),
&Grad(val) => Angle::degrees(val * FromPrimitive::from_f64(360.0 / 400.0).unwrap()),
&Turn(val) => Angle::degrees(val * FromPrimitive::from_f64(360.0).unwrap()),
_ => unimplemented!()
}
}
/// Converts an angle to gradians.
pub fn to_gradians(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::gradians(val / Float::pi() * FromPrimitive::from_f64(200.0).unwrap()),
&Deg(val) => Angle::gradians(val * FromPrimitive::from_f64(400.0 / 360.0).unwrap()),
&Grad(val) => Angle::gradians(val),
&Turn(val) => Angle::gradians(val * FromPrimitive::from_f64(400.0).unwrap()),
_ => unimplemented!()
}
}
/// Converts an angle to turns.
pub fn to_turns(&self) -> Angle<S> {
match self {
&Rad(val) => Angle::turns(val / Float::two_pi()),
&Deg(val) => Angle::turns(val / FromPrimitive::from_f64(360.0).unwrap()),
&Grad(val) => Angle::turns(val / FromPrimitive::from_f64(400.0).unwrap()),
&Turn(val) => Angle::turns(val),
_ => unimplemented!()
}
}
/// One half of the domain. In radians, this is `π`.
pub fn half() -> Angle<S> { Rad(Float::pi()) }
/// One quarter of the domain. In radians, this is `π/2`.
pub fn quarter() -> Angle<S> { Rad(Float::frac_pi_2()) }
/// One sixth of the domain. In radians, this is `π/3`.
pub fn sixth() -> Angle<S> { Rad(Float::frac_pi_3()) }
/// One eighth of the domain. In radians, this is `π/4`.
pub fn eighth() -> Angle<S> { Rad(Float::frac_pi_4()) }
/// Gets the raw value that is stored in the angle.
///
/// ## Failure
///
/// Clock-valued angles are not encoded as a single value, and so this
/// method will always fail for them.
pub fn unwrap(&self) -> S {
match self {
&Rad(s)|&Deg(s)|&Grad(s)|&Turn(s) => s,
_ => fail!("Clock values cannot be unwrapped.")
}
}
}
impl<S: BaseFloat> Add<Angle<S>, Angle<S>> for Angle<S> {
#[inline]
fn add(&self, other: &Angle<S>) -> Angle<S> {
match (self, other) {
(&Rad(val), othr) => Angle::radians(val + othr.to_radians().unwrap()),
(&Deg(val), othr) => Angle::degrees(val + othr.to_degrees().unwrap()),
(&Grad(val), othr) => Angle::gradians(val + othr.to_gradians().unwrap()),
(&Turn(val), othr) => Angle::turns(val + othr.to_turns().unwrap()),
_ => unimplemented!()
}
}
}
impl<S: BaseFloat> Sub<Angle<S>, Angle<S>> for Angle<S> {
#[inline]
fn sub(&self, other: &Angle<S>) -> Angle<S> {
match (self, other) {
(&Rad(val), othr) => Angle::radians(val - othr.to_radians().unwrap()),
(&Deg(val), othr) => Angle::degrees(val - othr.to_degrees().unwrap()),
(&Grad(val), othr) => Angle::gradians(val - othr.to_gradians().unwrap()),
(&Turn(val), othr) => Angle::turns(val - othr.to_turns().unwrap()),
_ => unimplemented!()
}
}
}
impl<S: BaseFloat + fmt::Show> fmt::Show for Angle<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&Deg(val) => write!(f, "{}°", val),
&Rad(val) => write!(f, "{} rad", val),
&Grad(val) => write!(f, "{} gon", val),
&Turn(val) => write!(f, "{} turns", val),
_ => fail!("Not yet implemented.")
}
}
}
macro_rules! angle_trigonometry (
($($method:ident),+ ) => (
impl<S: BaseFloat> Trigonometry<S> for Angle<S> {
$(fn $method(&self) -> S {
match self {
&Rad(val) => val.$method(),
&other => other.to_radians().$method()
}
}
)+
}
)
)
angle_trigonometry!(sin, cos, tan)
/*
Test suite.
*/
#[cfg(test)]
mod test {
use super::Angle;
#[test]
fn test_conversion() {
let half: Angle<f64> = Angle::half();
assert_eq!(half.to_degrees().to_gradians().to_turns().to_radians(), half);
assert_eq!(half.to_turns().to_gradians().to_degrees().to_radians(), half);
assert_eq!(half.to_degrees().to_turns().to_gradians().to_radians(), half);
assert_eq!(half.to_gradians().to_radians(), half);
}
#[test]
fn test_operators() {
assert_eq!(Angle::degrees(100.0f64) + Angle::degrees(100.0f64), Angle::degrees(200.0f64));
assert_eq!(Angle::degrees(100.0f64) - Angle::degrees(100.0f64), Angle::degrees(0.0f64));
assert_eq!(Angle::degrees(100.0f64) + Angle::radians(0.0f64), Angle::degrees(100.0f64));
assert_eq!(Angle::radians(1.0f64) - Angle::degrees(0.0f64), Angle::radians(1.0f64));
}
} | //!
//! The code is hosted on [GitHub](https://github.com/atheriel/trig-rs), and a | random_line_split |
lib.rs | //! # Bracket Parse
//!
//! A Utility for parsing Bracketed lists and sets of strings.
//!
//! It is a relatively lazy way of parsing items from a bracketed string,
//!
//! "hello(peter,dave)" is easy for it to handle, as are nested brackets.
//!
//! The above will result in something like
//!
//! >Branch[Leaf("hello"),Branch[Leaf("peter"),Leaf("dave")]]
//!
//! This is not intended super extensible right now,
//! though contributions are welcome.
//!
//! The list can also be constructed relatively simply by
//! using chained builder type methods
//!
//! ```
//! use bracket_parse::{Bracket,br};
//! use bracket_parse::Bracket::{Leaf,Branch};
//! use std::str::FromStr;
//!
//! let str1 = Bracket::from_str("hello(peter,dave)").unwrap();
//!
//! //Standard Build method
//! let basic1 = Branch(vec![Leaf("hello".to_string()),
//! Branch(vec![Leaf("peter".to_string()),
//! Leaf("dave".to_string())])]);
//!
//! //Chaining Build method
//! let chain1 = br().sib_lf("hello")
//! .sib(br().sib_lf("peter").sib_lf("dave"));
//!
//! assert_eq!(str1,basic1);
//! assert_eq!(str1,chain1);
//! ```
//!
//! It can also handle string input with escapes. Quotes are removed and the string item is
//! considered a single Leaf value;
//!
//! ```
//! use bracket_parse::{Bracket,br,lf};
//! use std::str::FromStr;
//!
//! let bk = Bracket::from_str(r#""hello"'matt"' "and \"friends\"""#).unwrap();
//! let chn = br().sib_lf("hello").sib_lf("matt\"").sib_lf("and \"friends\"");
//! assert_eq!(bk,chn);
//!
//! ```
use std::str::FromStr;
use std::fmt;
use std::fmt::Display;
use std::iter::IntoIterator;
pub mod tail;
pub use tail::{Tail};
use tail::EMPTY_BRACKET;
pub mod iter;
pub use iter::*;
#[derive(PartialEq,Debug)]
pub enum Bracket{
Branch(Vec<Bracket>),
Leaf(String),
Empty,
}
pub fn lf(s:&str)->Bracket{
Bracket::Leaf(s.to_string())
}
pub fn br()->Bracket{
Bracket::Branch(Vec::new())
}
impl FromStr for Bracket{
type Err = String;
fn from_str(s:&str)->Result<Bracket,String>{
let mut res = Bracket::Empty;
let mut it = s.chars();
let mut curr = String::new();
while let Some(c) = it.next() {
Bracket::match_char(c,&mut it,&mut curr,&mut res)?;
}
if curr.len() >0 {
res.add_sib_str(curr);
}
Ok(res)
}
}
impl<'a>IntoIterator for &'a Bracket{
type Item = &'a Bracket;
type IntoIter = BracketIter<'a>;
fn into_iter(self)->Self::IntoIter{
BracketIter::new(self)
}
}
impl Bracket{
fn add_sib_str(&mut self,s:String){
if s.len() == 0 {
return
}
self.add_sibling(Bracket::Leaf(s));
}
/// chaining method for quickly creating a tree Adds a sibling to a bracket
/// if it is a leaf makes it a parent.
pub fn sib(mut self,s:Self)->Self{
self.add_sibling(s);
self
}
/// chainging method for easily adding a leaf as a sibling from an &str
pub fn sib_lf(self,s:&str)->Self{
self.sib(lf(s))
}
fn add_sibling(&mut self,s:Bracket){
if s == Bracket::Empty {
return
}
let c:String = match self {
Bracket::Branch(ref mut v)=>{
v.push(s);
return
}
Bracket::Empty=>{
*self = s;
return
}
Bracket::Leaf(content)=>content.to_string(),
};
*self = Bracket::Branch(vec![Bracket::Leaf(c),s]);
}
fn match_char<I>(c:char,it:&mut I,curr:&mut String,res:&mut Bracket)->Result<(),String>
where I:Iterator<Item=char>{
match c {
'('=>{ // When Non Lexical Lifetimes comes, we can get rid of these curr.clone()s hopefully
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_bracket(it,')')?);
},
'{'=>{ //Todo make Json-esque prob needs Object Variant
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_bracket(it,'}')?);
},
'['=>{
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_bracket(it,']')?);
},
'"'|'\''=>{
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_quotes(it,c)?);
}
''|','=>{
res.add_sib_str(curr.clone());
*curr = String::new();
},
other=>curr.push(other),
}
Ok(())
}
fn from_bracket<I:Iterator<Item=char>>(it:&mut I,delim:char)->Result<Bracket,String>{
let mut res = Bracket::Branch(Vec::new());
let mut curr = String::new();
while let Some(c) = it.next() {
if c == delim {
res.add_sib_str(curr.clone());
return Ok(res);
}
Bracket::match_char(c,it,&mut curr,&mut res)?;
}
Err(format!("Close Delim '{}' not found",delim))
}
fn from_quotes<I:Iterator<Item=char>>(it:&mut I,delim:char)->Result<Bracket,String>{
let mut curr = String::new();
while let Some(c) = it.next() {
if c == delim {
return Ok(Bracket::Leaf(curr));
}
match c {
'\\'=>{
match it.next(){
Some(c2)=>{
curr.push(c2);
continue
},
None=>return Err("Escape before end of string".to_string()),
}
},
_=> curr.push(c),
}
}
Err(format!("Close Delim '{}' not found",delim))
}
pub fn head<'a>(&'a self)->&'a Bracket{
match self{
Bracket::Branch(v)=>match v.len(){
0 => &EMPTY_BRACKET,
_ => &v[0],
}
_ => &EMPTY_BRACKET,
}
}
pub fn tail<'a>(&'a self)->Tail<'a>{
match self{
Bracket::Branch(v)=>match v.len(){
0|1 =>Tail::Empty,
_=>Tail::Rest(&v[1..]),
}
_=>Tail::Empty,
}
}
pub fn tail_n<'a>(&'a self,n:usize)->Tail<'a>{
match self{
Bracket::Branch(v)=>{
if v.len() <= n {
return Tail::Empty;
}
Tail::Rest(&v[n..])
}
_=>Tail::Empty,
}
}
pub fn tail_h<'a>(&'a self, n:usize)->&'a Bracket{
match self{
Bracket::Branch(v)=>{
if v.len() <= n{
return &EMPTY_BRACKET;
}
&v[n]
}
_=>&EMPTY_BRACKET,
}
}
pub fn head_tail<'a>(&'a self)->(&'a Bracket,Tail<'a>){
(self.head(),self.tail())
}
pub fn match_str<'a>(&'a self)->&'a str{
match self {
Bracket::Leaf(ref s)=>s.as_ref(),
_=>"",
}
}
}
impl Display for Bracket {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Bracket::Branch(ref v)=>{
let mut gap = "";
for b in v {
let res = match b {
Bracket::Branch(_)=>write!(f,"{}[{}]",gap,b),
_=>write!(f,"{}{}",gap,b),
};
if res.is_err(){
return res;
}
gap = " ";
}
Ok(())
},
Bracket::Leaf(s)=>{
//TODO handle Escapes
write!(f,"\"{}\"",s)
},
_=>{ write!(f,"--EMPTY--") },
}
}
}
#[cfg(test)]
mod tests {
use super::{Bracket,br,lf};
use std::str::FromStr;
#[test]
fn spaces() {
let b1 = Bracket::from_str("matt dave (andy steve)").unwrap();
let c1 = br().sib_lf("matt").sib_lf("dave").sib(
br().sib_lf("andy").sib_lf("steve")
);
let b2 = Bracket::from_str("matt dave( andy steve)").unwrap();
let b3 = Bracket::from_str(" matt dave ( andy steve ) ").unwrap();
assert_eq!(b1,c1);
assert_eq!(b1,b2);
assert_eq!(b1,b3);
}
#[test]
fn empty_parent(){
let b1 = Bracket::from_str("matt () dave").unwrap();
let c1 = br().sib_lf("matt").sib(br()).sib_lf("dave");
assert_eq!(b1,c1);
}
#[test]
fn many_parent(){
let b1 = Bracket::from_str("matt ({[() ()]})").unwrap();
let c1 = lf("matt")
.sib(
br().sib(
br().sib(
br().sib(br()).sib(br())
)
)
);
assert_eq!(b1,c1);
}
#[test]
fn strings(){
let b1 = Bracket::from_str(r#"matt"dave""#).unwrap();
let c1 = br().sib_lf("matt").sib_lf("dave");
assert_eq!(b1,c1);
let b2 = Bracket::from_str(r#""andy \"hates\" cheese""#).unwrap();
let c2 = lf(r#"andy "hates" cheese"#);
assert_eq!(b2,c2);
}
#[test]
fn errors(){
assert!(Bracket::from_str("peop ( er").is_err());
assert!(Bracket::from_str(r#""poop"#).is_err());
} | #[test]
fn test_head_tail(){
let b1 = Bracket::from_str("hello (andy dave)").unwrap();
match b1.head().match_str(){
"hello"=>{},//Where the actual code might go
_=>panic!("Head is not hello leaf"),
}
}
#[test]
fn many_tails(){
let pb = br().sib_lf("matt").sib_lf("dave").sib_lf("pete").sib_lf("andy");
let t1 = pb.tail(); //pb is parent bracket, t1 is tail
let t4 = t1.tail_h(2).match_str();
assert_eq!(t4,"andy");
let th1 = pb.tail_h(3).match_str();
assert_eq!(t4,th1);
}
#[test]
fn test_to_string(){
let br = Bracket::from_str("matt dave( andy steve)").unwrap();
let bs = br.to_string();
assert_eq!(&bs,r#""matt" "dave" ["andy" "steve"]"#);
}
} | random_line_split |
|
lib.rs | //! # Bracket Parse
//!
//! A Utility for parsing Bracketed lists and sets of strings.
//!
//! It is a relatively lazy way of parsing items from a bracketed string,
//!
//! "hello(peter,dave)" is easy for it to handle, as are nested brackets.
//!
//! The above will result in something like
//!
//! >Branch[Leaf("hello"),Branch[Leaf("peter"),Leaf("dave")]]
//!
//! This is not intended super extensible right now,
//! though contributions are welcome.
//!
//! The list can also be constructed relatively simply by
//! using chained builder type methods
//!
//! ```
//! use bracket_parse::{Bracket,br};
//! use bracket_parse::Bracket::{Leaf,Branch};
//! use std::str::FromStr;
//!
//! let str1 = Bracket::from_str("hello(peter,dave)").unwrap();
//!
//! //Standard Build method
//! let basic1 = Branch(vec![Leaf("hello".to_string()),
//! Branch(vec![Leaf("peter".to_string()),
//! Leaf("dave".to_string())])]);
//!
//! //Chaining Build method
//! let chain1 = br().sib_lf("hello")
//! .sib(br().sib_lf("peter").sib_lf("dave"));
//!
//! assert_eq!(str1,basic1);
//! assert_eq!(str1,chain1);
//! ```
//!
//! It can also handle string input with escapes. Quotes are removed and the string item is
//! considered a single Leaf value;
//!
//! ```
//! use bracket_parse::{Bracket,br,lf};
//! use std::str::FromStr;
//!
//! let bk = Bracket::from_str(r#""hello"'matt"' "and \"friends\"""#).unwrap();
//! let chn = br().sib_lf("hello").sib_lf("matt\"").sib_lf("and \"friends\"");
//! assert_eq!(bk,chn);
//!
//! ```
use std::str::FromStr;
use std::fmt;
use std::fmt::Display;
use std::iter::IntoIterator;
pub mod tail;
pub use tail::{Tail};
use tail::EMPTY_BRACKET;
pub mod iter;
pub use iter::*;
#[derive(PartialEq,Debug)]
pub enum Bracket{
Branch(Vec<Bracket>),
Leaf(String),
Empty,
}
pub fn lf(s:&str)->Bracket |
pub fn br()->Bracket{
Bracket::Branch(Vec::new())
}
impl FromStr for Bracket{
type Err = String;
fn from_str(s:&str)->Result<Bracket,String>{
let mut res = Bracket::Empty;
let mut it = s.chars();
let mut curr = String::new();
while let Some(c) = it.next() {
Bracket::match_char(c,&mut it,&mut curr,&mut res)?;
}
if curr.len() >0 {
res.add_sib_str(curr);
}
Ok(res)
}
}
impl<'a>IntoIterator for &'a Bracket{
type Item = &'a Bracket;
type IntoIter = BracketIter<'a>;
fn into_iter(self)->Self::IntoIter{
BracketIter::new(self)
}
}
impl Bracket{
fn add_sib_str(&mut self,s:String){
if s.len() == 0 {
return
}
self.add_sibling(Bracket::Leaf(s));
}
/// chaining method for quickly creating a tree Adds a sibling to a bracket
/// if it is a leaf makes it a parent.
pub fn sib(mut self,s:Self)->Self{
self.add_sibling(s);
self
}
/// chainging method for easily adding a leaf as a sibling from an &str
pub fn sib_lf(self,s:&str)->Self{
self.sib(lf(s))
}
fn add_sibling(&mut self,s:Bracket){
if s == Bracket::Empty {
return
}
let c:String = match self {
Bracket::Branch(ref mut v)=>{
v.push(s);
return
}
Bracket::Empty=>{
*self = s;
return
}
Bracket::Leaf(content)=>content.to_string(),
};
*self = Bracket::Branch(vec![Bracket::Leaf(c),s]);
}
fn match_char<I>(c:char,it:&mut I,curr:&mut String,res:&mut Bracket)->Result<(),String>
where I:Iterator<Item=char>{
match c {
'('=>{ // When Non Lexical Lifetimes comes, we can get rid of these curr.clone()s hopefully
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_bracket(it,')')?);
},
'{'=>{ //Todo make Json-esque prob needs Object Variant
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_bracket(it,'}')?);
},
'['=>{
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_bracket(it,']')?);
},
'"'|'\''=>{
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_quotes(it,c)?);
}
''|','=>{
res.add_sib_str(curr.clone());
*curr = String::new();
},
other=>curr.push(other),
}
Ok(())
}
fn from_bracket<I:Iterator<Item=char>>(it:&mut I,delim:char)->Result<Bracket,String>{
let mut res = Bracket::Branch(Vec::new());
let mut curr = String::new();
while let Some(c) = it.next() {
if c == delim {
res.add_sib_str(curr.clone());
return Ok(res);
}
Bracket::match_char(c,it,&mut curr,&mut res)?;
}
Err(format!("Close Delim '{}' not found",delim))
}
fn from_quotes<I:Iterator<Item=char>>(it:&mut I,delim:char)->Result<Bracket,String>{
let mut curr = String::new();
while let Some(c) = it.next() {
if c == delim {
return Ok(Bracket::Leaf(curr));
}
match c {
'\\'=>{
match it.next(){
Some(c2)=>{
curr.push(c2);
continue
},
None=>return Err("Escape before end of string".to_string()),
}
},
_=> curr.push(c),
}
}
Err(format!("Close Delim '{}' not found",delim))
}
pub fn head<'a>(&'a self)->&'a Bracket{
match self{
Bracket::Branch(v)=>match v.len(){
0 => &EMPTY_BRACKET,
_ => &v[0],
}
_ => &EMPTY_BRACKET,
}
}
pub fn tail<'a>(&'a self)->Tail<'a>{
match self{
Bracket::Branch(v)=>match v.len(){
0|1 =>Tail::Empty,
_=>Tail::Rest(&v[1..]),
}
_=>Tail::Empty,
}
}
pub fn tail_n<'a>(&'a self,n:usize)->Tail<'a>{
match self{
Bracket::Branch(v)=>{
if v.len() <= n {
return Tail::Empty;
}
Tail::Rest(&v[n..])
}
_=>Tail::Empty,
}
}
pub fn tail_h<'a>(&'a self, n:usize)->&'a Bracket{
match self{
Bracket::Branch(v)=>{
if v.len() <= n{
return &EMPTY_BRACKET;
}
&v[n]
}
_=>&EMPTY_BRACKET,
}
}
pub fn head_tail<'a>(&'a self)->(&'a Bracket,Tail<'a>){
(self.head(),self.tail())
}
pub fn match_str<'a>(&'a self)->&'a str{
match self {
Bracket::Leaf(ref s)=>s.as_ref(),
_=>"",
}
}
}
impl Display for Bracket {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Bracket::Branch(ref v)=>{
let mut gap = "";
for b in v {
let res = match b {
Bracket::Branch(_)=>write!(f,"{}[{}]",gap,b),
_=>write!(f,"{}{}",gap,b),
};
if res.is_err(){
return res;
}
gap = " ";
}
Ok(())
},
Bracket::Leaf(s)=>{
//TODO handle Escapes
write!(f,"\"{}\"",s)
},
_=>{ write!(f,"--EMPTY--") },
}
}
}
#[cfg(test)]
mod tests {
use super::{Bracket,br,lf};
use std::str::FromStr;
#[test]
fn spaces() {
let b1 = Bracket::from_str("matt dave (andy steve)").unwrap();
let c1 = br().sib_lf("matt").sib_lf("dave").sib(
br().sib_lf("andy").sib_lf("steve")
);
let b2 = Bracket::from_str("matt dave( andy steve)").unwrap();
let b3 = Bracket::from_str(" matt dave ( andy steve ) ").unwrap();
assert_eq!(b1,c1);
assert_eq!(b1,b2);
assert_eq!(b1,b3);
}
#[test]
fn empty_parent(){
let b1 = Bracket::from_str("matt () dave").unwrap();
let c1 = br().sib_lf("matt").sib(br()).sib_lf("dave");
assert_eq!(b1,c1);
}
#[test]
fn many_parent(){
let b1 = Bracket::from_str("matt ({[() ()]})").unwrap();
let c1 = lf("matt")
.sib(
br().sib(
br().sib(
br().sib(br()).sib(br())
)
)
);
assert_eq!(b1,c1);
}
#[test]
fn strings(){
let b1 = Bracket::from_str(r#"matt"dave""#).unwrap();
let c1 = br().sib_lf("matt").sib_lf("dave");
assert_eq!(b1,c1);
let b2 = Bracket::from_str(r#""andy \"hates\" cheese""#).unwrap();
let c2 = lf(r#"andy "hates" cheese"#);
assert_eq!(b2,c2);
}
#[test]
fn errors(){
assert!(Bracket::from_str("peop ( er").is_err());
assert!(Bracket::from_str(r#""poop"#).is_err());
}
#[test]
fn test_head_tail(){
let b1 = Bracket::from_str("hello (andy dave)").unwrap();
match b1.head().match_str(){
"hello"=>{},//Where the actual code might go
_=>panic!("Head is not hello leaf"),
}
}
#[test]
fn many_tails(){
let pb = br().sib_lf("matt").sib_lf("dave").sib_lf("pete").sib_lf("andy");
let t1 = pb.tail(); //pb is parent bracket, t1 is tail
let t4 = t1.tail_h(2).match_str();
assert_eq!(t4,"andy");
let th1 = pb.tail_h(3).match_str();
assert_eq!(t4,th1);
}
#[test]
fn test_to_string(){
let br = Bracket::from_str("matt dave( andy steve)").unwrap();
let bs = br.to_string();
assert_eq!(&bs,r#""matt" "dave" ["andy" "steve"]"#);
}
}
| {
Bracket::Leaf(s.to_string())
} | identifier_body |
lib.rs | //! # Bracket Parse
//!
//! A Utility for parsing Bracketed lists and sets of strings.
//!
//! It is a relatively lazy way of parsing items from a bracketed string,
//!
//! "hello(peter,dave)" is easy for it to handle, as are nested brackets.
//!
//! The above will result in something like
//!
//! >Branch[Leaf("hello"),Branch[Leaf("peter"),Leaf("dave")]]
//!
//! This is not intended super extensible right now,
//! though contributions are welcome.
//!
//! The list can also be constructed relatively simply by
//! using chained builder type methods
//!
//! ```
//! use bracket_parse::{Bracket,br};
//! use bracket_parse::Bracket::{Leaf,Branch};
//! use std::str::FromStr;
//!
//! let str1 = Bracket::from_str("hello(peter,dave)").unwrap();
//!
//! //Standard Build method
//! let basic1 = Branch(vec![Leaf("hello".to_string()),
//! Branch(vec![Leaf("peter".to_string()),
//! Leaf("dave".to_string())])]);
//!
//! //Chaining Build method
//! let chain1 = br().sib_lf("hello")
//! .sib(br().sib_lf("peter").sib_lf("dave"));
//!
//! assert_eq!(str1,basic1);
//! assert_eq!(str1,chain1);
//! ```
//!
//! It can also handle string input with escapes. Quotes are removed and the string item is
//! considered a single Leaf value;
//!
//! ```
//! use bracket_parse::{Bracket,br,lf};
//! use std::str::FromStr;
//!
//! let bk = Bracket::from_str(r#""hello"'matt"' "and \"friends\"""#).unwrap();
//! let chn = br().sib_lf("hello").sib_lf("matt\"").sib_lf("and \"friends\"");
//! assert_eq!(bk,chn);
//!
//! ```
use std::str::FromStr;
use std::fmt;
use std::fmt::Display;
use std::iter::IntoIterator;
pub mod tail;
pub use tail::{Tail};
use tail::EMPTY_BRACKET;
pub mod iter;
pub use iter::*;
#[derive(PartialEq,Debug)]
pub enum Bracket{
Branch(Vec<Bracket>),
Leaf(String),
Empty,
}
pub fn lf(s:&str)->Bracket{
Bracket::Leaf(s.to_string())
}
pub fn br()->Bracket{
Bracket::Branch(Vec::new())
}
impl FromStr for Bracket{
type Err = String;
fn from_str(s:&str)->Result<Bracket,String>{
let mut res = Bracket::Empty;
let mut it = s.chars();
let mut curr = String::new();
while let Some(c) = it.next() {
Bracket::match_char(c,&mut it,&mut curr,&mut res)?;
}
if curr.len() >0 {
res.add_sib_str(curr);
}
Ok(res)
}
}
impl<'a>IntoIterator for &'a Bracket{
type Item = &'a Bracket;
type IntoIter = BracketIter<'a>;
fn into_iter(self)->Self::IntoIter{
BracketIter::new(self)
}
}
impl Bracket{
fn add_sib_str(&mut self,s:String){
if s.len() == 0 {
return
}
self.add_sibling(Bracket::Leaf(s));
}
/// chaining method for quickly creating a tree Adds a sibling to a bracket
/// if it is a leaf makes it a parent.
pub fn sib(mut self,s:Self)->Self{
self.add_sibling(s);
self
}
/// chainging method for easily adding a leaf as a sibling from an &str
pub fn sib_lf(self,s:&str)->Self{
self.sib(lf(s))
}
fn add_sibling(&mut self,s:Bracket){
if s == Bracket::Empty {
return
}
let c:String = match self {
Bracket::Branch(ref mut v)=>{
v.push(s);
return
}
Bracket::Empty=>{
*self = s;
return
}
Bracket::Leaf(content)=>content.to_string(),
};
*self = Bracket::Branch(vec![Bracket::Leaf(c),s]);
}
fn match_char<I>(c:char,it:&mut I,curr:&mut String,res:&mut Bracket)->Result<(),String>
where I:Iterator<Item=char>{
match c {
'('=>{ // When Non Lexical Lifetimes comes, we can get rid of these curr.clone()s hopefully
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_bracket(it,')')?);
},
'{'=>{ //Todo make Json-esque prob needs Object Variant
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_bracket(it,'}')?);
},
'['=>{
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_bracket(it,']')?);
},
'"'|'\''=>{
res.add_sib_str(curr.clone());
*curr = String::new();
res.add_sibling(Bracket::from_quotes(it,c)?);
}
''|','=>{
res.add_sib_str(curr.clone());
*curr = String::new();
},
other=>curr.push(other),
}
Ok(())
}
fn from_bracket<I:Iterator<Item=char>>(it:&mut I,delim:char)->Result<Bracket,String>{
let mut res = Bracket::Branch(Vec::new());
let mut curr = String::new();
while let Some(c) = it.next() {
if c == delim {
res.add_sib_str(curr.clone());
return Ok(res);
}
Bracket::match_char(c,it,&mut curr,&mut res)?;
}
Err(format!("Close Delim '{}' not found",delim))
}
fn from_quotes<I:Iterator<Item=char>>(it:&mut I,delim:char)->Result<Bracket,String>{
let mut curr = String::new();
while let Some(c) = it.next() {
if c == delim {
return Ok(Bracket::Leaf(curr));
}
match c {
'\\'=>{
match it.next(){
Some(c2)=>{
curr.push(c2);
continue
},
None=>return Err("Escape before end of string".to_string()),
}
},
_=> curr.push(c),
}
}
Err(format!("Close Delim '{}' not found",delim))
}
pub fn head<'a>(&'a self)->&'a Bracket{
match self{
Bracket::Branch(v)=>match v.len(){
0 => &EMPTY_BRACKET,
_ => &v[0],
}
_ => &EMPTY_BRACKET,
}
}
pub fn tail<'a>(&'a self)->Tail<'a>{
match self{
Bracket::Branch(v)=>match v.len(){
0|1 =>Tail::Empty,
_=>Tail::Rest(&v[1..]),
}
_=>Tail::Empty,
}
}
pub fn tail_n<'a>(&'a self,n:usize)->Tail<'a>{
match self{
Bracket::Branch(v)=>{
if v.len() <= n {
return Tail::Empty;
}
Tail::Rest(&v[n..])
}
_=>Tail::Empty,
}
}
pub fn tail_h<'a>(&'a self, n:usize)->&'a Bracket{
match self{
Bracket::Branch(v)=>{
if v.len() <= n{
return &EMPTY_BRACKET;
}
&v[n]
}
_=>&EMPTY_BRACKET,
}
}
pub fn head_tail<'a>(&'a self)->(&'a Bracket,Tail<'a>){
(self.head(),self.tail())
}
pub fn match_str<'a>(&'a self)->&'a str{
match self {
Bracket::Leaf(ref s)=>s.as_ref(),
_=>"",
}
}
}
impl Display for Bracket {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Bracket::Branch(ref v)=>{
let mut gap = "";
for b in v {
let res = match b {
Bracket::Branch(_)=>write!(f,"{}[{}]",gap,b),
_=>write!(f,"{}{}",gap,b),
};
if res.is_err(){
return res;
}
gap = " ";
}
Ok(())
},
Bracket::Leaf(s)=>{
//TODO handle Escapes
write!(f,"\"{}\"",s)
},
_=>{ write!(f,"--EMPTY--") },
}
}
}
#[cfg(test)]
mod tests {
use super::{Bracket,br,lf};
use std::str::FromStr;
#[test]
fn | () {
let b1 = Bracket::from_str("matt dave (andy steve)").unwrap();
let c1 = br().sib_lf("matt").sib_lf("dave").sib(
br().sib_lf("andy").sib_lf("steve")
);
let b2 = Bracket::from_str("matt dave( andy steve)").unwrap();
let b3 = Bracket::from_str(" matt dave ( andy steve ) ").unwrap();
assert_eq!(b1,c1);
assert_eq!(b1,b2);
assert_eq!(b1,b3);
}
#[test]
fn empty_parent(){
let b1 = Bracket::from_str("matt () dave").unwrap();
let c1 = br().sib_lf("matt").sib(br()).sib_lf("dave");
assert_eq!(b1,c1);
}
#[test]
fn many_parent(){
let b1 = Bracket::from_str("matt ({[() ()]})").unwrap();
let c1 = lf("matt")
.sib(
br().sib(
br().sib(
br().sib(br()).sib(br())
)
)
);
assert_eq!(b1,c1);
}
#[test]
fn strings(){
let b1 = Bracket::from_str(r#"matt"dave""#).unwrap();
let c1 = br().sib_lf("matt").sib_lf("dave");
assert_eq!(b1,c1);
let b2 = Bracket::from_str(r#""andy \"hates\" cheese""#).unwrap();
let c2 = lf(r#"andy "hates" cheese"#);
assert_eq!(b2,c2);
}
#[test]
fn errors(){
assert!(Bracket::from_str("peop ( er").is_err());
assert!(Bracket::from_str(r#""poop"#).is_err());
}
#[test]
fn test_head_tail(){
let b1 = Bracket::from_str("hello (andy dave)").unwrap();
match b1.head().match_str(){
"hello"=>{},//Where the actual code might go
_=>panic!("Head is not hello leaf"),
}
}
#[test]
fn many_tails(){
let pb = br().sib_lf("matt").sib_lf("dave").sib_lf("pete").sib_lf("andy");
let t1 = pb.tail(); //pb is parent bracket, t1 is tail
let t4 = t1.tail_h(2).match_str();
assert_eq!(t4,"andy");
let th1 = pb.tail_h(3).match_str();
assert_eq!(t4,th1);
}
#[test]
fn test_to_string(){
let br = Bracket::from_str("matt dave( andy steve)").unwrap();
let bs = br.to_string();
assert_eq!(&bs,r#""matt" "dave" ["andy" "steve"]"#);
}
}
| spaces | identifier_name |
mod.rs | //! Storage for span data shared by multiple [`Layer`]s.
//!
//! ## Using the Span Registry
//!
//! This module provides the [`Registry`] type, a [`Subscriber`] implementation
//! which tracks per-span data and exposes it to [`Layer`]s. When a `Registry`
//! is used as the base `Subscriber` of a `Layer` stack, the
//! [`layer::Context`][ctx] type will provide methods allowing `Layer`s to
//! [look up span data][lookup] stored in the registry. While [`Registry`] is a
//! reasonable default for storing spans and events, other stores that implement
//! [`LookupSpan`] and [`Subscriber`] themselves (with [`SpanData`] implemented
//! by the per-span data they store) can be used as a drop-in replacement.
//!
//! For example, we might create a `Registry` and add multiple `Layer`s like so:
//! ```rust
//! use tracing_subscriber::{registry::Registry, Layer, prelude::*};
//! # use tracing_core::Subscriber;
//! # pub struct FooLayer {}
//! # pub struct BarLayer {}
//! # impl<S: Subscriber> Layer<S> for FooLayer {}
//! # impl<S: Subscriber> Layer<S> for BarLayer {}
//! # impl FooLayer {
//! # fn new() -> Self { Self {} }
//! # }
//! # impl BarLayer {
//! # fn new() -> Self { Self {} }
//! # }
//!
//! let subscriber = Registry::default()
//! .with(FooLayer::new())
//! .with(BarLayer::new());
//! ```
//!
//! If a type implementing `Layer` depends on the functionality of a `Registry`
//! implementation, it should bound its `Subscriber` type parameter with the
//! [`LookupSpan`] trait, like so:
//!
//! ```rust
//! use tracing_subscriber::{registry, Layer};
//! use tracing_core::Subscriber;
//!
//! pub struct MyLayer {
//! //...
//! }
//!
//! impl<S> Layer<S> for MyLayer
//! where
//! S: Subscriber + for<'a> registry::LookupSpan<'a>,
//! {
//! //...
//! }
//! ```
//! When this bound is added, the `Layer` implementation will be guaranteed
//! access to the [`Context`][ctx] methods, such as [`Context::span`][lookup], that
//! require the root subscriber to be a registry.
//!
//! [`Layer`]:../layer/trait.Layer.html
//! [`Subscriber`]:
//! https://docs.rs/tracing-core/latest/tracing_core/subscriber/trait.Subscriber.html
//! [`Registry`]: struct.Registry.html
//! [ctx]:../layer/struct.Context.html
//! [lookup]:../layer/struct.Context.html#method.span
//! [`LookupSpan`]: trait.LookupSpan.html
//! [`SpanData`]: trait.SpanData.html
use tracing_core::{field::FieldSet, span::Id, Metadata};
/// A module containing a type map of span extensions.
mod extensions;
#[cfg(feature = "registry")]
mod sharded;
#[cfg(feature = "registry")]
mod stack;
pub use extensions::{Extensions, ExtensionsMut};
#[cfg(feature = "registry")]
#[cfg_attr(docsrs, doc(cfg(feature = "registry")))]
pub use sharded::Data;
#[cfg(feature = "registry")]
#[cfg_attr(docsrs, doc(cfg(feature = "registry")))]
pub use sharded::Registry;
/// Provides access to stored span data.
///
/// Subscribers which store span data and associate it with span IDs should
/// implement this trait; if they do, any [`Layer`]s wrapping them can look up
/// metadata via the [`Context`] type's [`span()`] method.
///
/// [`Layer`]:../layer/trait.Layer.html
/// [`Context`]:../layer/struct.Context.html
/// [`span()`]:../layer/struct.Context.html#method.metadata
pub trait LookupSpan<'a> {
/// The type of span data stored in this registry.
type Data: SpanData<'a>;
/// Returns the [`SpanData`] for a given `Id`, if it exists.
///
/// <div class="information">
/// <div class="tooltip ignore" style="">ⓘ<span class="tooltiptext">Note</span></div>
/// </div>
/// <div class="example-wrap" style="display:inline-block">
/// <pre class="ignore" style="white-space:normal;font:inherit;">
/// <strong>Note</strong>: users of the <code>LookupSpan<code> trait should
/// typically call the <a href="#method.span"><code>span</code> method rather
/// than this method. The <code>span</code> method is implemented by
/// <em>calling</em> <code>span_data</code>, but returns a reference which is
/// capable of performing more sophisiticated queries.
/// </pre></div>
///
/// [`SpanData`]: trait.SpanData.html
fn span_data(&'a self, id: &Id) -> Option<Self::Data>;
/// Returns a [`SpanRef`] for the span with the given `Id`, if it exists.
///
/// A `SpanRef` is similar to [`SpanData`], but it allows performing
/// additional lookups against the registryr that stores the wrapped data.
///
/// In general, _users_ of the `LookupSpan` trait should use this method
/// rather than the [`span_data`] method; while _implementors_ of this trait
/// should only implement `span_data`.
///
/// [`SpanRef`]: struct.SpanRef.html
/// [`SpanData`]: trait.SpanData.html
/// [`span_data`]: #method.span_data
fn span(&'a self, id: &Id) -> Option<SpanRef<'_, Self>>
where
Self: Sized,
{
let data = self.span_data(&id)?;
Some(SpanRef {
registry: self,
data,
})
}
}
/// A stored representation of data associated with a span.
pub trait SpanData<'a> {
/// Returns this span's ID.
fn id(&self) -> Id;
/// Returns a reference to the span's `Metadata`.
fn metadata(&self) -> &'static Metadata<'static>;
/// Returns a reference to the ID
fn parent(&self) -> Option<&Id>;
/// Returns a reference to this span's `Extensions`.
///
/// The extensions may be used by `Layer`s to store additional data
/// describing the span.
fn extensions(&self) -> Extensions<'_>;
/// Returns a mutable reference to this span's `Extensions`.
///
/// The extensions may be used by `Layer`s to store additional data
/// describing the span.
fn extensions_mut(&self) -> ExtensionsMut<'_>;
}
/// A reference to [span data] and the associated [registry].
///
/// This type implements all the same methods as [`SpanData`][span data], and
/// provides additional methods for querying the registry based on values from
/// the span.
///
/// [span data]: trait.SpanData.html
/// [registry]: trait.LookupSpan.html
#[derive(Debug)]
pub struct SpanRef<'a, R: LookupSpan<'a>> {
registry: &'a R,
data: R::Data,
}
/// An iterator over the parents of a span.
///
/// This is returned by the [`SpanRef::parents`] method.
///
/// [`SpanRef::parents`]: struct.SpanRef.html#method.parents
#[derive(Debug)]
pub struct Parents<'a, R> {
registry: &'a R,
next: Option<Id>,
}
/// An iterator over a span's parents, starting with the root of the trace
/// tree.
///
/// For additonal details, see [`SpanRef::from_root`].
///
/// [`Span::from_root`]: struct.SpanRef.html#method.from_root
pub struct FromRoot<'a, R: LookupSpan<'a>> {
#[cfg(feature = "smallvec")]
inner: std::iter::Rev<smallvec::IntoIter<SpanRefVecArray<'a, R>>>,
#[cfg(not(feature = "smallvec"))]
inner: std::iter::Rev<std::vec::IntoIter<SpanRef<'a, R>>>,
}
#[cfg(feature = "smallvec")]
type SpanRefVecArray<'span, L> = [SpanRef<'span, L>; 16];
impl<'a, R> SpanRef<'a, R>
where
R: LookupSpan<'a>,
{
/// Returns this span's ID.
pub fn id(&self) -> Id {
| /// Returns a static reference to the span's metadata.
pub fn metadata(&self) -> &'static Metadata<'static> {
self.data.metadata()
}
/// Returns the span's name,
pub fn name(&self) -> &'static str {
self.data.metadata().name()
}
/// Returns a list of [fields] defined by the span.
///
/// [fields]: https://docs.rs/tracing-core/latest/tracing_core/field/index.html
pub fn fields(&self) -> &FieldSet {
self.data.metadata().fields()
}
/// Returns the ID of this span's parent, or `None` if this span is the root
/// of its trace tree.
pub fn parent_id(&self) -> Option<&Id> {
self.data.parent()
}
/// Returns a `SpanRef` describing this span's parent, or `None` if this
/// span is the root of its trace tree.
pub fn parent(&self) -> Option<Self> {
let id = self.data.parent()?;
let data = self.registry.span_data(id)?;
Some(Self {
registry: self.registry,
data,
})
}
/// Returns an iterator over all parents of this span, starting with the
/// immediate parent.
///
/// The iterator will first return the span's immediate parent, followed by
/// that span's parent, followed by _that_ span's parent, and so on, until a
/// it reaches a root span.
pub fn parents(&self) -> Parents<'a, R> {
Parents {
registry: self.registry,
next: self.parent().map(|parent| parent.id()),
}
}
/// Returns an iterator over all parents of this span, starting with the
/// root of the trace tree.
///
/// The iterator will return the root of the trace tree, followed by the
/// next span, and then the next, until this span's immediate parent is
/// returned.
///
/// **Note**: if the "smallvec" feature flag is not enabled, this may
/// allocate.
pub fn from_root(&self) -> FromRoot<'a, R> {
#[cfg(feature = "smallvec")]
type SpanRefVec<'span, L> = smallvec::SmallVec<SpanRefVecArray<'span, L>>;
#[cfg(not(feature = "smallvec"))]
type SpanRefVec<'span, L> = Vec<SpanRef<'span, L>>;
// an alternative way to handle this would be to the recursive approach that
// `fmt` uses that _does not_ entail any allocation in this fmt'ing
// spans path.
let parents = self.parents().collect::<SpanRefVec<'a, _>>();
let inner = parents.into_iter().rev();
FromRoot { inner }
}
/// Returns a reference to this span's `Extensions`.
///
/// The extensions may be used by `Layer`s to store additional data
/// describing the span.
pub fn extensions(&self) -> Extensions<'_> {
self.data.extensions()
}
/// Returns a mutable reference to this span's `Extensions`.
///
/// The extensions may be used by `Layer`s to store additional data
/// describing the span.
pub fn extensions_mut(&self) -> ExtensionsMut<'_> {
self.data.extensions_mut()
}
}
impl<'a, R> Iterator for Parents<'a, R>
where
R: LookupSpan<'a>,
{
type Item = SpanRef<'a, R>;
fn next(&mut self) -> Option<Self::Item> {
let id = self.next.take()?;
let span = self.registry.span(&id)?;
self.next = span.parent().map(|parent| parent.id());
Some(span)
}
}
// === impl FromRoot ===
impl<'span, R> Iterator for FromRoot<'span, R>
where
R: LookupSpan<'span>,
{
type Item = SpanRef<'span, R>;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.inner.next()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
impl<'span, R> std::fmt::Debug for FromRoot<'span, R>
where
R: LookupSpan<'span>,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.pad("FromRoot {.. }")
}
}
| self.data.id()
}
| identifier_body |
mod.rs | //! Storage for span data shared by multiple [`Layer`]s.
//!
//! ## Using the Span Registry
//!
//! This module provides the [`Registry`] type, a [`Subscriber`] implementation
//! which tracks per-span data and exposes it to [`Layer`]s. When a `Registry`
//! is used as the base `Subscriber` of a `Layer` stack, the
//! [`layer::Context`][ctx] type will provide methods allowing `Layer`s to
//! [look up span data][lookup] stored in the registry. While [`Registry`] is a
//! reasonable default for storing spans and events, other stores that implement
//! [`LookupSpan`] and [`Subscriber`] themselves (with [`SpanData`] implemented
//! by the per-span data they store) can be used as a drop-in replacement.
//!
//! For example, we might create a `Registry` and add multiple `Layer`s like so:
//! ```rust
//! use tracing_subscriber::{registry::Registry, Layer, prelude::*};
//! # use tracing_core::Subscriber;
//! # pub struct FooLayer {}
//! # pub struct BarLayer {}
//! # impl<S: Subscriber> Layer<S> for FooLayer {}
//! # impl<S: Subscriber> Layer<S> for BarLayer {}
//! # impl FooLayer {
//! # fn new() -> Self { Self {} }
//! # }
//! # impl BarLayer {
//! # fn new() -> Self { Self {} }
//! # }
//!
//! let subscriber = Registry::default()
//! .with(FooLayer::new())
//! .with(BarLayer::new());
//! ```
//!
//! If a type implementing `Layer` depends on the functionality of a `Registry`
//! implementation, it should bound its `Subscriber` type parameter with the
//! [`LookupSpan`] trait, like so:
//!
//! ```rust
//! use tracing_subscriber::{registry, Layer};
//! use tracing_core::Subscriber;
//!
//! pub struct MyLayer {
//! //...
//! }
//!
//! impl<S> Layer<S> for MyLayer
//! where
//! S: Subscriber + for<'a> registry::LookupSpan<'a>,
//! {
//! //...
//! }
//! ```
//! When this bound is added, the `Layer` implementation will be guaranteed
//! access to the [`Context`][ctx] methods, such as [`Context::span`][lookup], that
//! require the root subscriber to be a registry.
//!
//! [`Layer`]:../layer/trait.Layer.html
//! [`Subscriber`]:
//! https://docs.rs/tracing-core/latest/tracing_core/subscriber/trait.Subscriber.html
//! [`Registry`]: struct.Registry.html
//! [ctx]:../layer/struct.Context.html
//! [lookup]:../layer/struct.Context.html#method.span
//! [`LookupSpan`]: trait.LookupSpan.html
//! [`SpanData`]: trait.SpanData.html
use tracing_core::{field::FieldSet, span::Id, Metadata};
/// A module containing a type map of span extensions.
mod extensions;
#[cfg(feature = "registry")]
mod sharded;
#[cfg(feature = "registry")]
mod stack;
pub use extensions::{Extensions, ExtensionsMut};
#[cfg(feature = "registry")]
#[cfg_attr(docsrs, doc(cfg(feature = "registry")))]
pub use sharded::Data;
#[cfg(feature = "registry")]
#[cfg_attr(docsrs, doc(cfg(feature = "registry")))]
pub use sharded::Registry;
/// Provides access to stored span data.
///
/// Subscribers which store span data and associate it with span IDs should
/// implement this trait; if they do, any [`Layer`]s wrapping them can look up
/// metadata via the [`Context`] type's [`span()`] method.
///
/// [`Layer`]:../layer/trait.Layer.html
/// [`Context`]:../layer/struct.Context.html
/// [`span()`]:../layer/struct.Context.html#method.metadata
pub trait LookupSpan<'a> {
/// The type of span data stored in this registry.
type Data: SpanData<'a>;
/// Returns the [`SpanData`] for a given `Id`, if it exists.
///
/// <div class="information">
/// <div class="tooltip ignore" style="">ⓘ<span class="tooltiptext">Note</span></div>
/// </div>
/// <div class="example-wrap" style="display:inline-block">
/// <pre class="ignore" style="white-space:normal;font:inherit;">
/// <strong>Note</strong>: users of the <code>LookupSpan<code> trait should
/// typically call the <a href="#method.span"><code>span</code> method rather
/// than this method. The <code>span</code> method is implemented by
/// <em>calling</em> <code>span_data</code>, but returns a reference which is
/// capable of performing more sophisiticated queries.
/// </pre></div>
///
/// [`SpanData`]: trait.SpanData.html
fn span_data(&'a self, id: &Id) -> Option<Self::Data>;
/// Returns a [`SpanRef`] for the span with the given `Id`, if it exists.
///
/// A `SpanRef` is similar to [`SpanData`], but it allows performing
/// additional lookups against the registryr that stores the wrapped data.
///
/// In general, _users_ of the `LookupSpan` trait should use this method
/// rather than the [`span_data`] method; while _implementors_ of this trait
/// should only implement `span_data`.
///
/// [`SpanRef`]: struct.SpanRef.html
/// [`SpanData`]: trait.SpanData.html
/// [`span_data`]: #method.span_data
fn span(&'a self, id: &Id) -> Option<SpanRef<'_, Self>>
where
Self: Sized,
{
let data = self.span_data(&id)?;
Some(SpanRef {
registry: self,
data,
})
}
}
/// A stored representation of data associated with a span.
pub trait SpanData<'a> {
/// Returns this span's ID.
fn id(&self) -> Id;
/// Returns a reference to the span's `Metadata`.
fn metadata(&self) -> &'static Metadata<'static>;
/// Returns a reference to the ID
fn parent(&self) -> Option<&Id>;
/// Returns a reference to this span's `Extensions`.
///
/// The extensions may be used by `Layer`s to store additional data
/// describing the span.
fn extensions(&self) -> Extensions<'_>;
/// Returns a mutable reference to this span's `Extensions`.
///
/// The extensions may be used by `Layer`s to store additional data
/// describing the span.
fn extensions_mut(&self) -> ExtensionsMut<'_>;
}
/// A reference to [span data] and the associated [registry].
///
/// This type implements all the same methods as [`SpanData`][span data], and
/// provides additional methods for querying the registry based on values from
/// the span.
///
/// [span data]: trait.SpanData.html
/// [registry]: trait.LookupSpan.html
#[derive(Debug)]
pub struct SpanRef<'a, R: LookupSpan<'a>> {
registry: &'a R,
data: R::Data,
}
/// An iterator over the parents of a span.
///
/// This is returned by the [`SpanRef::parents`] method.
///
/// [`SpanRef::parents`]: struct.SpanRef.html#method.parents
#[derive(Debug)]
pub struct Parents<'a, R> {
registry: &'a R,
next: Option<Id>,
}
/// An iterator over a span's parents, starting with the root of the trace
/// tree.
///
/// For additonal details, see [`SpanRef::from_root`].
///
/// [`Span::from_root`]: struct.SpanRef.html#method.from_root
pub struct FromRoot<'a, R: LookupSpan<'a>> {
#[cfg(feature = "smallvec")]
inner: std::iter::Rev<smallvec::IntoIter<SpanRefVecArray<'a, R>>>,
#[cfg(not(feature = "smallvec"))]
inner: std::iter::Rev<std::vec::IntoIter<SpanRef<'a, R>>>,
}
#[cfg(feature = "smallvec")]
type SpanRefVecArray<'span, L> = [SpanRef<'span, L>; 16];
impl<'a, R> SpanRef<'a, R>
where
R: LookupSpan<'a>,
{
/// Returns this span's ID.
pub fn id(&self) -> Id {
self.data.id()
}
/// Returns a static reference to the span's metadata.
pub fn metadata(&self) -> &'static Metadata<'static> {
self.data.metadata()
}
/// Returns the span's name,
pub fn name(&self) -> &'static str {
self.data.metadata().name()
}
/// Returns a list of [fields] defined by the span.
///
/// [fields]: https://docs.rs/tracing-core/latest/tracing_core/field/index.html
pub fn fields(&self) -> &FieldSet {
self.data.metadata().fields()
}
/// Returns the ID of this span's parent, or `None` if this span is the root
/// of its trace tree.
pub fn parent_id(&self) -> Option<&Id> {
self.data.parent()
}
/// Returns a `SpanRef` describing this span's parent, or `None` if this
/// span is the root of its trace tree.
pub fn parent(&self) -> Option<Self> {
let id = self.data.parent()?;
let data = self.registry.span_data(id)?;
Some(Self {
registry: self.registry,
data,
})
}
/// Returns an iterator over all parents of this span, starting with the
/// immediate parent.
///
/// The iterator will first return the span's immediate parent, followed by
/// that span's parent, followed by _that_ span's parent, and so on, until a
/// it reaches a root span.
pub fn parents(&self) -> Parents<'a, R> {
Parents {
registry: self.registry,
next: self.parent().map(|parent| parent.id()),
}
}
/// Returns an iterator over all parents of this span, starting with the
/// root of the trace tree.
///
/// The iterator will return the root of the trace tree, followed by the
/// next span, and then the next, until this span's immediate parent is
/// returned.
///
/// **Note**: if the "smallvec" feature flag is not enabled, this may
/// allocate.
pub fn from_root(&self) -> FromRoot<'a, R> {
#[cfg(feature = "smallvec")]
type SpanRefVec<'span, L> = smallvec::SmallVec<SpanRefVecArray<'span, L>>;
#[cfg(not(feature = "smallvec"))]
type SpanRefVec<'span, L> = Vec<SpanRef<'span, L>>;
// an alternative way to handle this would be to the recursive approach that
// `fmt` uses that _does not_ entail any allocation in this fmt'ing
// spans path.
let parents = self.parents().collect::<SpanRefVec<'a, _>>();
let inner = parents.into_iter().rev();
FromRoot { inner }
}
/// Returns a reference to this span's `Extensions`.
///
/// The extensions may be used by `Layer`s to store additional data
/// describing the span.
pub fn ex | self) -> Extensions<'_> {
self.data.extensions()
}
/// Returns a mutable reference to this span's `Extensions`.
///
/// The extensions may be used by `Layer`s to store additional data
/// describing the span.
pub fn extensions_mut(&self) -> ExtensionsMut<'_> {
self.data.extensions_mut()
}
}
impl<'a, R> Iterator for Parents<'a, R>
where
R: LookupSpan<'a>,
{
type Item = SpanRef<'a, R>;
fn next(&mut self) -> Option<Self::Item> {
let id = self.next.take()?;
let span = self.registry.span(&id)?;
self.next = span.parent().map(|parent| parent.id());
Some(span)
}
}
// === impl FromRoot ===
impl<'span, R> Iterator for FromRoot<'span, R>
where
R: LookupSpan<'span>,
{
type Item = SpanRef<'span, R>;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.inner.next()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
impl<'span, R> std::fmt::Debug for FromRoot<'span, R>
where
R: LookupSpan<'span>,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.pad("FromRoot {.. }")
}
}
| tensions(& | identifier_name |
mod.rs | //! Storage for span data shared by multiple [`Layer`]s.
//!
//! ## Using the Span Registry
//!
//! This module provides the [`Registry`] type, a [`Subscriber`] implementation
//! which tracks per-span data and exposes it to [`Layer`]s. When a `Registry`
//! is used as the base `Subscriber` of a `Layer` stack, the
//! [`layer::Context`][ctx] type will provide methods allowing `Layer`s to
//! [look up span data][lookup] stored in the registry. While [`Registry`] is a
//! reasonable default for storing spans and events, other stores that implement
//! [`LookupSpan`] and [`Subscriber`] themselves (with [`SpanData`] implemented
//! by the per-span data they store) can be used as a drop-in replacement.
//!
//! For example, we might create a `Registry` and add multiple `Layer`s like so:
//! ```rust
//! use tracing_subscriber::{registry::Registry, Layer, prelude::*};
//! # use tracing_core::Subscriber;
//! # pub struct FooLayer {}
//! # pub struct BarLayer {}
//! # impl<S: Subscriber> Layer<S> for FooLayer {}
//! # impl<S: Subscriber> Layer<S> for BarLayer {}
//! # impl FooLayer {
//! # fn new() -> Self { Self {} }
//! # }
//! # impl BarLayer {
//! # fn new() -> Self { Self {} }
//! # }
//!
//! let subscriber = Registry::default()
//! .with(FooLayer::new())
//! .with(BarLayer::new());
//! ```
//!
//! If a type implementing `Layer` depends on the functionality of a `Registry`
//! implementation, it should bound its `Subscriber` type parameter with the
//! [`LookupSpan`] trait, like so:
//!
//! ```rust
//! use tracing_subscriber::{registry, Layer};
//! use tracing_core::Subscriber;
//!
//! pub struct MyLayer {
//! //...
//! }
//!
//! impl<S> Layer<S> for MyLayer
//! where
//! S: Subscriber + for<'a> registry::LookupSpan<'a>,
//! {
//! //...
//! }
//! ```
//! When this bound is added, the `Layer` implementation will be guaranteed
//! access to the [`Context`][ctx] methods, such as [`Context::span`][lookup], that
//! require the root subscriber to be a registry.
//!
//! [`Layer`]:../layer/trait.Layer.html
//! [`Subscriber`]:
//! https://docs.rs/tracing-core/latest/tracing_core/subscriber/trait.Subscriber.html
//! [`Registry`]: struct.Registry.html
//! [ctx]:../layer/struct.Context.html
//! [lookup]:../layer/struct.Context.html#method.span
//! [`LookupSpan`]: trait.LookupSpan.html
//! [`SpanData`]: trait.SpanData.html
use tracing_core::{field::FieldSet, span::Id, Metadata};
/// A module containing a type map of span extensions.
mod extensions;
#[cfg(feature = "registry")]
mod sharded;
#[cfg(feature = "registry")]
mod stack;
pub use extensions::{Extensions, ExtensionsMut};
#[cfg(feature = "registry")]
#[cfg_attr(docsrs, doc(cfg(feature = "registry")))]
pub use sharded::Data;
#[cfg(feature = "registry")] | pub use sharded::Registry;
/// Provides access to stored span data.
///
/// Subscribers which store span data and associate it with span IDs should
/// implement this trait; if they do, any [`Layer`]s wrapping them can look up
/// metadata via the [`Context`] type's [`span()`] method.
///
/// [`Layer`]:../layer/trait.Layer.html
/// [`Context`]:../layer/struct.Context.html
/// [`span()`]:../layer/struct.Context.html#method.metadata
pub trait LookupSpan<'a> {
/// The type of span data stored in this registry.
type Data: SpanData<'a>;
/// Returns the [`SpanData`] for a given `Id`, if it exists.
///
/// <div class="information">
/// <div class="tooltip ignore" style="">ⓘ<span class="tooltiptext">Note</span></div>
/// </div>
/// <div class="example-wrap" style="display:inline-block">
/// <pre class="ignore" style="white-space:normal;font:inherit;">
/// <strong>Note</strong>: users of the <code>LookupSpan<code> trait should
/// typically call the <a href="#method.span"><code>span</code> method rather
/// than this method. The <code>span</code> method is implemented by
/// <em>calling</em> <code>span_data</code>, but returns a reference which is
/// capable of performing more sophisiticated queries.
/// </pre></div>
///
/// [`SpanData`]: trait.SpanData.html
fn span_data(&'a self, id: &Id) -> Option<Self::Data>;
/// Returns a [`SpanRef`] for the span with the given `Id`, if it exists.
///
/// A `SpanRef` is similar to [`SpanData`], but it allows performing
/// additional lookups against the registryr that stores the wrapped data.
///
/// In general, _users_ of the `LookupSpan` trait should use this method
/// rather than the [`span_data`] method; while _implementors_ of this trait
/// should only implement `span_data`.
///
/// [`SpanRef`]: struct.SpanRef.html
/// [`SpanData`]: trait.SpanData.html
/// [`span_data`]: #method.span_data
fn span(&'a self, id: &Id) -> Option<SpanRef<'_, Self>>
where
Self: Sized,
{
let data = self.span_data(&id)?;
Some(SpanRef {
registry: self,
data,
})
}
}
/// A stored representation of data associated with a span.
pub trait SpanData<'a> {
/// Returns this span's ID.
fn id(&self) -> Id;
/// Returns a reference to the span's `Metadata`.
fn metadata(&self) -> &'static Metadata<'static>;
/// Returns a reference to the ID
fn parent(&self) -> Option<&Id>;
/// Returns a reference to this span's `Extensions`.
///
/// The extensions may be used by `Layer`s to store additional data
/// describing the span.
fn extensions(&self) -> Extensions<'_>;
/// Returns a mutable reference to this span's `Extensions`.
///
/// The extensions may be used by `Layer`s to store additional data
/// describing the span.
fn extensions_mut(&self) -> ExtensionsMut<'_>;
}
/// A reference to [span data] and the associated [registry].
///
/// This type implements all the same methods as [`SpanData`][span data], and
/// provides additional methods for querying the registry based on values from
/// the span.
///
/// [span data]: trait.SpanData.html
/// [registry]: trait.LookupSpan.html
#[derive(Debug)]
pub struct SpanRef<'a, R: LookupSpan<'a>> {
registry: &'a R,
data: R::Data,
}
/// An iterator over the parents of a span.
///
/// This is returned by the [`SpanRef::parents`] method.
///
/// [`SpanRef::parents`]: struct.SpanRef.html#method.parents
#[derive(Debug)]
pub struct Parents<'a, R> {
registry: &'a R,
next: Option<Id>,
}
/// An iterator over a span's parents, starting with the root of the trace
/// tree.
///
/// For additonal details, see [`SpanRef::from_root`].
///
/// [`Span::from_root`]: struct.SpanRef.html#method.from_root
pub struct FromRoot<'a, R: LookupSpan<'a>> {
#[cfg(feature = "smallvec")]
inner: std::iter::Rev<smallvec::IntoIter<SpanRefVecArray<'a, R>>>,
#[cfg(not(feature = "smallvec"))]
inner: std::iter::Rev<std::vec::IntoIter<SpanRef<'a, R>>>,
}
#[cfg(feature = "smallvec")]
type SpanRefVecArray<'span, L> = [SpanRef<'span, L>; 16];
impl<'a, R> SpanRef<'a, R>
where
R: LookupSpan<'a>,
{
/// Returns this span's ID.
pub fn id(&self) -> Id {
self.data.id()
}
/// Returns a static reference to the span's metadata.
pub fn metadata(&self) -> &'static Metadata<'static> {
self.data.metadata()
}
/// Returns the span's name,
pub fn name(&self) -> &'static str {
self.data.metadata().name()
}
/// Returns a list of [fields] defined by the span.
///
/// [fields]: https://docs.rs/tracing-core/latest/tracing_core/field/index.html
pub fn fields(&self) -> &FieldSet {
self.data.metadata().fields()
}
/// Returns the ID of this span's parent, or `None` if this span is the root
/// of its trace tree.
pub fn parent_id(&self) -> Option<&Id> {
self.data.parent()
}
/// Returns a `SpanRef` describing this span's parent, or `None` if this
/// span is the root of its trace tree.
pub fn parent(&self) -> Option<Self> {
let id = self.data.parent()?;
let data = self.registry.span_data(id)?;
Some(Self {
registry: self.registry,
data,
})
}
/// Returns an iterator over all parents of this span, starting with the
/// immediate parent.
///
/// The iterator will first return the span's immediate parent, followed by
/// that span's parent, followed by _that_ span's parent, and so on, until a
/// it reaches a root span.
pub fn parents(&self) -> Parents<'a, R> {
Parents {
registry: self.registry,
next: self.parent().map(|parent| parent.id()),
}
}
/// Returns an iterator over all parents of this span, starting with the
/// root of the trace tree.
///
/// The iterator will return the root of the trace tree, followed by the
/// next span, and then the next, until this span's immediate parent is
/// returned.
///
/// **Note**: if the "smallvec" feature flag is not enabled, this may
/// allocate.
pub fn from_root(&self) -> FromRoot<'a, R> {
#[cfg(feature = "smallvec")]
type SpanRefVec<'span, L> = smallvec::SmallVec<SpanRefVecArray<'span, L>>;
#[cfg(not(feature = "smallvec"))]
type SpanRefVec<'span, L> = Vec<SpanRef<'span, L>>;
// an alternative way to handle this would be to the recursive approach that
// `fmt` uses that _does not_ entail any allocation in this fmt'ing
// spans path.
let parents = self.parents().collect::<SpanRefVec<'a, _>>();
let inner = parents.into_iter().rev();
FromRoot { inner }
}
/// Returns a reference to this span's `Extensions`.
///
/// The extensions may be used by `Layer`s to store additional data
/// describing the span.
pub fn extensions(&self) -> Extensions<'_> {
self.data.extensions()
}
/// Returns a mutable reference to this span's `Extensions`.
///
/// The extensions may be used by `Layer`s to store additional data
/// describing the span.
pub fn extensions_mut(&self) -> ExtensionsMut<'_> {
self.data.extensions_mut()
}
}
impl<'a, R> Iterator for Parents<'a, R>
where
R: LookupSpan<'a>,
{
type Item = SpanRef<'a, R>;
fn next(&mut self) -> Option<Self::Item> {
let id = self.next.take()?;
let span = self.registry.span(&id)?;
self.next = span.parent().map(|parent| parent.id());
Some(span)
}
}
// === impl FromRoot ===
impl<'span, R> Iterator for FromRoot<'span, R>
where
R: LookupSpan<'span>,
{
type Item = SpanRef<'span, R>;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.inner.next()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
impl<'span, R> std::fmt::Debug for FromRoot<'span, R>
where
R: LookupSpan<'span>,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.pad("FromRoot {.. }")
}
} | #[cfg_attr(docsrs, doc(cfg(feature = "registry")))] | random_line_split |
docker_compose.rs | //! Provides utility functions for [ServerPlugin] and [ServerHandle] implementations that use Docker Compose.
//!
//! These functions all assume that the server has a dedicated directory, which contains a custom shell
//! script that wraps `docker-compose` with any setup, environment variables, etc. needed to run things
//! correctly for that FHIR server.
use super::ServerPluginWrapper;
use crate::servers::{ServerHandle, ServerName, ServerPlugin};
use crate::AppState;
use async_trait::async_trait;
use eyre::{eyre, Context, Result};
use std::ffi::OsStr;
use std::fmt::Debug;
use std::path::PathBuf;
use std::process::Command;
use std::process::Output;
use url::Url;
/// Each instance of this struct represents a particular FHIR Server implementation, where the implementation
/// is launched and managed via Docker Compose.
#[derive(Clone, Debug)]
pub struct DockerComposeServerPlugin {
server_name: ServerName,
server_script: PathBuf,
base_url: Url,
request_builder_factory:
fn(client: reqwest::Client, method: http::Method, url: Url) -> reqwest::RequestBuilder,
}
impl DockerComposeServerPlugin {
/// Returns the [PathBuf] to the `docker compose` wrapper script for this server.
fn server_script(&self) -> PathBuf {
self.server_script.clone()
}
/// Returns the base [Url] that the server will use, once launched.
fn base_url(&self) -> &Url {
&self.base_url
}
}
impl DockerComposeServerPlugin {
/// Constructs a new `DockerComposeServerPlugin` instance that will represent a particular FHIR Server
/// implementation.
///
/// Parameters:
/// * `server_name`: the [ServerName] that will uniquely identify the FHIR Server implemenation
/// * `server_script`: a [PathBuf] to the shell script that wraps the `docker compose` command for this
/// particular FHIR Server implementation
/// * `base_url`: the base [Url] that should be used for all requests to the FHIR Server, once launched
/// * `request_builder_factory`: a function that can produce the [reqwest::RequestBuilder] to use when
/// querying the FHIR Server, once launched
pub fn new(
server_name: ServerName,
server_script: PathBuf,
base_url: Url,
request_builder_factory: fn(
client: reqwest::Client,
method: http::Method,
url: Url,
) -> reqwest::RequestBuilder,
) -> DockerComposeServerPlugin {
DockerComposeServerPlugin {
server_name,
server_script,
base_url,
request_builder_factory,
}
}
}
#[async_trait]
impl ServerPlugin for DockerComposeServerPlugin {
fn server_name(&self) -> &ServerName {
&self.server_name
}
async fn launch(&self, app_state: &AppState) -> Result<Box<dyn ServerHandle>> {
launch_server(app_state, self).await
}
}
/// Runs the specified Docker Compose subcommand with the specified argument, for the specified FHIR Server
/// implementation.
///
/// Parameters:
/// * `server_plugin`: the [DockerComposeServerPlugin] that represents the FHIR Server implementation to run
/// the command for/against
/// * `args`: the Docker Compose subcommand and options to run, e.g. `["up", "--detach"]`
#[tracing::instrument(level = "info", skip(server_plugin))]
fn run_docker_compose<I, S>(server_plugin: &DockerComposeServerPlugin, args: I) -> Result<Output>
where
I: IntoIterator<Item = S> + Debug,
S: AsRef<OsStr>,
{
/*
* Build and launch the FHIR server.
*/
let docker_compose_output = Command::new(server_plugin.server_script())
.args(args)
.output()
.with_context(|| {
format!(
"Error returned by control command for the '{}' FHIR server.",
server_plugin.server_name()
)
})?;
if!docker_compose_output.status.success() {
return Err(eyre!(crate::errors::AppError::ChildProcessFailure(
docker_compose_output.status,
format!(
"Error returned by control command for the '{}' FHIR server.",
server_plugin.server_name()
),
String::from_utf8_lossy(&docker_compose_output.stdout).into(),
String::from_utf8_lossy(&docker_compose_output.stderr).into()
)));
}
Ok(docker_compose_output)
}
/// Launches the server, producing a boxed [SparkFhirServerHandle].
///
/// Parameters:
/// * `app_state`: the application's [AppState]
/// * `server_plugin`: the [DockerComposeServerPlugin] for the server to launch
async fn launch_server(
app_state: &AppState,
server_plugin: &DockerComposeServerPlugin,
) -> Result<Box<dyn ServerHandle>> {
/*
* Build and launch the server.
*/
run_docker_compose(server_plugin, &["up", "--detach"]).with_context(|| {
format!(
"Running '{} up --detach' failed.",
server_plugin
.server_script()
.file_name()
.expect("Unable to get control script name.")
.to_string_lossy()
)
})?;
/*
* The server containers have now been started, though they're not necessarily ready yet. Build a
* handle for it, copying any fields from the plugin that will be needed (as we can't safely downcast
* the plugin, so this is the only way to have access to those fields from the handle).
*/
let server_plugin = app_state
.find_server_plugin(server_plugin.server_name().as_str())
.expect("Unable to find server plugin");
let http_client = super::client_default()?;
let server_handle = DockerComposeServerHandle {
server_plugin: server_plugin.clone(),
http_client,
};
// Wait (up to a timeout) for the server to be ready.
match wait_for_ready(app_state, &server_handle).await {
Err(err) => {
server_handle.emit_logs_info()?;
Err(err)
}
Ok(_) => {
let server_handle: Box<dyn ServerHandle> = Box::new(server_handle);
Ok(server_handle)
}
}
}
/// Checks the specified server repeatedly to see if it is ready, up to a hardcoded timeout.
///
/// Parameters:
/// * `app_state`: the application's [AppState]
/// * `server_handle`: the [DockerComposeServerPlugin] to test
///
/// Returns an empty [Result], where an error indicates that the server was not ready.
#[tracing::instrument(level = "debug", skip(app_state, server_handle))]
async fn wait_for_ready(
app_state: &AppState,
server_handle: &DockerComposeServerHandle,
) -> Result<()> {
let probe_result = tokio::time::timeout(std::time::Duration::from_secs(60 * 5), async {
let mut ready = false;
let mut probe = None;
while!ready {
probe = Some(
crate::test_framework::metadata::check_metadata_operation(app_state, server_handle)
.await,
);
ready = probe.as_ref().expect("probe result missing").is_ok();
if!ready {
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
}
}
probe.expect("probe results missing")
})
.await
.with_context(|| {
format!(
"Timed out while waiting for server '{}' to launch.",
server_handle.plugin().server_name()
)
})?;
match probe_result {
Err(err) => {
server_handle.emit_logs_info()?;
Err(err)
}
Ok(_) => Ok(()),
}
}
/// Represents a running instance of a [DockerComposeServerPlugin] instance.
struct DockerComposeServerHandle {
server_plugin: ServerPluginWrapper,
http_client: reqwest::Client,
}
#[async_trait]
impl ServerHandle for DockerComposeServerHandle {
fn plugin(&self) -> &ServerPluginWrapper {
&self.server_plugin
}
fn base_url(&self) -> url::Url {
let server_plugin = server_plugin_downcast(self);
server_plugin.base_url().clone()
}
fn client(&self) -> Result<reqwest::Client> {
Ok(self.http_client.clone())
}
fn request_builder(
&self,
client: reqwest::Client,
method: http::Method,
url: Url,
) -> reqwest::RequestBuilder {
let server_plugin = server_plugin_downcast(self);
(server_plugin.request_builder_factory)(client, method, url)
}
fn | (&self) -> Result<String> {
let server_plugin = server_plugin_downcast(self);
match run_docker_compose(server_plugin, &["logs", "--no-color"]).with_context(|| {
format!(
"Running '{} up --detach' failed.",
server_plugin
.server_script()
.file_name()
.expect("Unable to get control script name.")
.to_string_lossy()
)
}) {
Ok(output) => Ok(String::from_utf8_lossy(&output.stdout).to_string()),
Err(err) => Err(err),
}
}
#[tracing::instrument(level = "debug", skip(self, app_state))]
async fn expunge_all_content(&self, app_state: &AppState) -> Result<()> {
self.shutdown()?;
let server_plugin = server_plugin_downcast(self);
launch_server(app_state, server_plugin).await?;
Ok(())
}
#[tracing::instrument(level = "debug", skip(self))]
fn shutdown(&self) -> Result<()> {
let server_plugin = server_plugin_downcast(self);
let docker_down_output =
run_docker_compose(server_plugin, &["down"]).with_context(|| {
format!(
"Running '{} down' failed.",
server_plugin
.server_script()
.file_name()
.expect("Unable to get control script name.")
.to_string_lossy()
)
})?;
if!docker_down_output.status.success() {
return Err(eyre!(crate::errors::AppError::ChildProcessFailure(
docker_down_output.status,
format!(
"Failed to shutdown '{}' via Docker Compose.",
server_plugin.server_name()
),
String::from_utf8_lossy(&docker_down_output.stdout).into(),
String::from_utf8_lossy(&docker_down_output.stderr).into()
)));
}
Ok(())
}
}
/// Extract the downcast [DockerComposeServerPlugin] from the specified [DockerComposeServerHandle].
fn server_plugin_downcast(server_handle: &DockerComposeServerHandle) -> &DockerComposeServerPlugin {
match &server_handle.server_plugin {
ServerPluginWrapper::DockerComposeServerPlugin(server_plugin) => server_plugin,
#[allow(unreachable_patterns)]
_ => panic!("Unsupported downcast attempt."),
}
}
| emit_logs | identifier_name |
docker_compose.rs | //! Provides utility functions for [ServerPlugin] and [ServerHandle] implementations that use Docker Compose.
//!
//! These functions all assume that the server has a dedicated directory, which contains a custom shell
//! script that wraps `docker-compose` with any setup, environment variables, etc. needed to run things
//! correctly for that FHIR server.
use super::ServerPluginWrapper;
use crate::servers::{ServerHandle, ServerName, ServerPlugin};
use crate::AppState;
use async_trait::async_trait;
use eyre::{eyre, Context, Result};
use std::ffi::OsStr;
use std::fmt::Debug;
use std::path::PathBuf;
use std::process::Command;
use std::process::Output;
use url::Url;
/// Each instance of this struct represents a particular FHIR Server implementation, where the implementation
/// is launched and managed via Docker Compose.
#[derive(Clone, Debug)]
pub struct DockerComposeServerPlugin {
server_name: ServerName,
server_script: PathBuf,
base_url: Url,
request_builder_factory:
fn(client: reqwest::Client, method: http::Method, url: Url) -> reqwest::RequestBuilder,
}
impl DockerComposeServerPlugin {
/// Returns the [PathBuf] to the `docker compose` wrapper script for this server.
fn server_script(&self) -> PathBuf {
self.server_script.clone()
}
/// Returns the base [Url] that the server will use, once launched.
fn base_url(&self) -> &Url {
&self.base_url
}
}
impl DockerComposeServerPlugin {
/// Constructs a new `DockerComposeServerPlugin` instance that will represent a particular FHIR Server
/// implementation.
///
/// Parameters:
/// * `server_name`: the [ServerName] that will uniquely identify the FHIR Server implemenation
/// * `server_script`: a [PathBuf] to the shell script that wraps the `docker compose` command for this
/// particular FHIR Server implementation
/// * `base_url`: the base [Url] that should be used for all requests to the FHIR Server, once launched
/// * `request_builder_factory`: a function that can produce the [reqwest::RequestBuilder] to use when
/// querying the FHIR Server, once launched
pub fn new(
server_name: ServerName,
server_script: PathBuf,
base_url: Url,
request_builder_factory: fn(
client: reqwest::Client,
method: http::Method,
url: Url,
) -> reqwest::RequestBuilder,
) -> DockerComposeServerPlugin {
DockerComposeServerPlugin {
server_name,
server_script,
base_url,
request_builder_factory,
}
}
}
#[async_trait]
impl ServerPlugin for DockerComposeServerPlugin {
fn server_name(&self) -> &ServerName {
&self.server_name
}
async fn launch(&self, app_state: &AppState) -> Result<Box<dyn ServerHandle>> {
launch_server(app_state, self).await
}
}
/// Runs the specified Docker Compose subcommand with the specified argument, for the specified FHIR Server
/// implementation.
///
/// Parameters:
/// * `server_plugin`: the [DockerComposeServerPlugin] that represents the FHIR Server implementation to run
/// the command for/against
/// * `args`: the Docker Compose subcommand and options to run, e.g. `["up", "--detach"]`
#[tracing::instrument(level = "info", skip(server_plugin))]
fn run_docker_compose<I, S>(server_plugin: &DockerComposeServerPlugin, args: I) -> Result<Output>
where
I: IntoIterator<Item = S> + Debug,
S: AsRef<OsStr>,
{
/*
* Build and launch the FHIR server.
*/
let docker_compose_output = Command::new(server_plugin.server_script())
.args(args)
.output()
.with_context(|| {
format!(
"Error returned by control command for the '{}' FHIR server.",
server_plugin.server_name()
)
})?;
if!docker_compose_output.status.success() {
return Err(eyre!(crate::errors::AppError::ChildProcessFailure(
docker_compose_output.status,
format!(
"Error returned by control command for the '{}' FHIR server.",
server_plugin.server_name()
),
String::from_utf8_lossy(&docker_compose_output.stdout).into(),
String::from_utf8_lossy(&docker_compose_output.stderr).into()
)));
}
Ok(docker_compose_output)
}
/// Launches the server, producing a boxed [SparkFhirServerHandle].
///
/// Parameters:
/// * `app_state`: the application's [AppState]
/// * `server_plugin`: the [DockerComposeServerPlugin] for the server to launch
async fn launch_server(
app_state: &AppState,
server_plugin: &DockerComposeServerPlugin,
) -> Result<Box<dyn ServerHandle>> {
/*
* Build and launch the server.
*/
run_docker_compose(server_plugin, &["up", "--detach"]).with_context(|| {
format!(
"Running '{} up --detach' failed.",
server_plugin
.server_script()
.file_name()
.expect("Unable to get control script name.")
.to_string_lossy()
)
})?;
/*
* The server containers have now been started, though they're not necessarily ready yet. Build a
* handle for it, copying any fields from the plugin that will be needed (as we can't safely downcast
* the plugin, so this is the only way to have access to those fields from the handle).
*/
let server_plugin = app_state
.find_server_plugin(server_plugin.server_name().as_str())
.expect("Unable to find server plugin");
let http_client = super::client_default()?; |
// Wait (up to a timeout) for the server to be ready.
match wait_for_ready(app_state, &server_handle).await {
Err(err) => {
server_handle.emit_logs_info()?;
Err(err)
}
Ok(_) => {
let server_handle: Box<dyn ServerHandle> = Box::new(server_handle);
Ok(server_handle)
}
}
}
/// Checks the specified server repeatedly to see if it is ready, up to a hardcoded timeout.
///
/// Parameters:
/// * `app_state`: the application's [AppState]
/// * `server_handle`: the [DockerComposeServerPlugin] to test
///
/// Returns an empty [Result], where an error indicates that the server was not ready.
#[tracing::instrument(level = "debug", skip(app_state, server_handle))]
async fn wait_for_ready(
app_state: &AppState,
server_handle: &DockerComposeServerHandle,
) -> Result<()> {
let probe_result = tokio::time::timeout(std::time::Duration::from_secs(60 * 5), async {
let mut ready = false;
let mut probe = None;
while!ready {
probe = Some(
crate::test_framework::metadata::check_metadata_operation(app_state, server_handle)
.await,
);
ready = probe.as_ref().expect("probe result missing").is_ok();
if!ready {
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
}
}
probe.expect("probe results missing")
})
.await
.with_context(|| {
format!(
"Timed out while waiting for server '{}' to launch.",
server_handle.plugin().server_name()
)
})?;
match probe_result {
Err(err) => {
server_handle.emit_logs_info()?;
Err(err)
}
Ok(_) => Ok(()),
}
}
/// Represents a running instance of a [DockerComposeServerPlugin] instance.
struct DockerComposeServerHandle {
server_plugin: ServerPluginWrapper,
http_client: reqwest::Client,
}
#[async_trait]
impl ServerHandle for DockerComposeServerHandle {
fn plugin(&self) -> &ServerPluginWrapper {
&self.server_plugin
}
fn base_url(&self) -> url::Url {
let server_plugin = server_plugin_downcast(self);
server_plugin.base_url().clone()
}
fn client(&self) -> Result<reqwest::Client> {
Ok(self.http_client.clone())
}
fn request_builder(
&self,
client: reqwest::Client,
method: http::Method,
url: Url,
) -> reqwest::RequestBuilder {
let server_plugin = server_plugin_downcast(self);
(server_plugin.request_builder_factory)(client, method, url)
}
fn emit_logs(&self) -> Result<String> {
let server_plugin = server_plugin_downcast(self);
match run_docker_compose(server_plugin, &["logs", "--no-color"]).with_context(|| {
format!(
"Running '{} up --detach' failed.",
server_plugin
.server_script()
.file_name()
.expect("Unable to get control script name.")
.to_string_lossy()
)
}) {
Ok(output) => Ok(String::from_utf8_lossy(&output.stdout).to_string()),
Err(err) => Err(err),
}
}
#[tracing::instrument(level = "debug", skip(self, app_state))]
async fn expunge_all_content(&self, app_state: &AppState) -> Result<()> {
self.shutdown()?;
let server_plugin = server_plugin_downcast(self);
launch_server(app_state, server_plugin).await?;
Ok(())
}
#[tracing::instrument(level = "debug", skip(self))]
fn shutdown(&self) -> Result<()> {
let server_plugin = server_plugin_downcast(self);
let docker_down_output =
run_docker_compose(server_plugin, &["down"]).with_context(|| {
format!(
"Running '{} down' failed.",
server_plugin
.server_script()
.file_name()
.expect("Unable to get control script name.")
.to_string_lossy()
)
})?;
if!docker_down_output.status.success() {
return Err(eyre!(crate::errors::AppError::ChildProcessFailure(
docker_down_output.status,
format!(
"Failed to shutdown '{}' via Docker Compose.",
server_plugin.server_name()
),
String::from_utf8_lossy(&docker_down_output.stdout).into(),
String::from_utf8_lossy(&docker_down_output.stderr).into()
)));
}
Ok(())
}
}
/// Extract the downcast [DockerComposeServerPlugin] from the specified [DockerComposeServerHandle].
fn server_plugin_downcast(server_handle: &DockerComposeServerHandle) -> &DockerComposeServerPlugin {
match &server_handle.server_plugin {
ServerPluginWrapper::DockerComposeServerPlugin(server_plugin) => server_plugin,
#[allow(unreachable_patterns)]
_ => panic!("Unsupported downcast attempt."),
}
} | let server_handle = DockerComposeServerHandle {
server_plugin: server_plugin.clone(),
http_client,
}; | random_line_split |
docker_compose.rs | //! Provides utility functions for [ServerPlugin] and [ServerHandle] implementations that use Docker Compose.
//!
//! These functions all assume that the server has a dedicated directory, which contains a custom shell
//! script that wraps `docker-compose` with any setup, environment variables, etc. needed to run things
//! correctly for that FHIR server.
use super::ServerPluginWrapper;
use crate::servers::{ServerHandle, ServerName, ServerPlugin};
use crate::AppState;
use async_trait::async_trait;
use eyre::{eyre, Context, Result};
use std::ffi::OsStr;
use std::fmt::Debug;
use std::path::PathBuf;
use std::process::Command;
use std::process::Output;
use url::Url;
/// Each instance of this struct represents a particular FHIR Server implementation, where the implementation
/// is launched and managed via Docker Compose.
#[derive(Clone, Debug)]
pub struct DockerComposeServerPlugin {
server_name: ServerName,
server_script: PathBuf,
base_url: Url,
request_builder_factory:
fn(client: reqwest::Client, method: http::Method, url: Url) -> reqwest::RequestBuilder,
}
impl DockerComposeServerPlugin {
/// Returns the [PathBuf] to the `docker compose` wrapper script for this server.
fn server_script(&self) -> PathBuf {
self.server_script.clone()
}
/// Returns the base [Url] that the server will use, once launched.
fn base_url(&self) -> &Url {
&self.base_url
}
}
impl DockerComposeServerPlugin {
/// Constructs a new `DockerComposeServerPlugin` instance that will represent a particular FHIR Server
/// implementation.
///
/// Parameters:
/// * `server_name`: the [ServerName] that will uniquely identify the FHIR Server implemenation
/// * `server_script`: a [PathBuf] to the shell script that wraps the `docker compose` command for this
/// particular FHIR Server implementation
/// * `base_url`: the base [Url] that should be used for all requests to the FHIR Server, once launched
/// * `request_builder_factory`: a function that can produce the [reqwest::RequestBuilder] to use when
/// querying the FHIR Server, once launched
pub fn new(
server_name: ServerName,
server_script: PathBuf,
base_url: Url,
request_builder_factory: fn(
client: reqwest::Client,
method: http::Method,
url: Url,
) -> reqwest::RequestBuilder,
) -> DockerComposeServerPlugin {
DockerComposeServerPlugin {
server_name,
server_script,
base_url,
request_builder_factory,
}
}
}
#[async_trait]
impl ServerPlugin for DockerComposeServerPlugin {
fn server_name(&self) -> &ServerName {
&self.server_name
}
async fn launch(&self, app_state: &AppState) -> Result<Box<dyn ServerHandle>> |
}
/// Runs the specified Docker Compose subcommand with the specified argument, for the specified FHIR Server
/// implementation.
///
/// Parameters:
/// * `server_plugin`: the [DockerComposeServerPlugin] that represents the FHIR Server implementation to run
/// the command for/against
/// * `args`: the Docker Compose subcommand and options to run, e.g. `["up", "--detach"]`
#[tracing::instrument(level = "info", skip(server_plugin))]
fn run_docker_compose<I, S>(server_plugin: &DockerComposeServerPlugin, args: I) -> Result<Output>
where
I: IntoIterator<Item = S> + Debug,
S: AsRef<OsStr>,
{
/*
* Build and launch the FHIR server.
*/
let docker_compose_output = Command::new(server_plugin.server_script())
.args(args)
.output()
.with_context(|| {
format!(
"Error returned by control command for the '{}' FHIR server.",
server_plugin.server_name()
)
})?;
if!docker_compose_output.status.success() {
return Err(eyre!(crate::errors::AppError::ChildProcessFailure(
docker_compose_output.status,
format!(
"Error returned by control command for the '{}' FHIR server.",
server_plugin.server_name()
),
String::from_utf8_lossy(&docker_compose_output.stdout).into(),
String::from_utf8_lossy(&docker_compose_output.stderr).into()
)));
}
Ok(docker_compose_output)
}
/// Launches the server, producing a boxed [SparkFhirServerHandle].
///
/// Parameters:
/// * `app_state`: the application's [AppState]
/// * `server_plugin`: the [DockerComposeServerPlugin] for the server to launch
async fn launch_server(
app_state: &AppState,
server_plugin: &DockerComposeServerPlugin,
) -> Result<Box<dyn ServerHandle>> {
/*
* Build and launch the server.
*/
run_docker_compose(server_plugin, &["up", "--detach"]).with_context(|| {
format!(
"Running '{} up --detach' failed.",
server_plugin
.server_script()
.file_name()
.expect("Unable to get control script name.")
.to_string_lossy()
)
})?;
/*
* The server containers have now been started, though they're not necessarily ready yet. Build a
* handle for it, copying any fields from the plugin that will be needed (as we can't safely downcast
* the plugin, so this is the only way to have access to those fields from the handle).
*/
let server_plugin = app_state
.find_server_plugin(server_plugin.server_name().as_str())
.expect("Unable to find server plugin");
let http_client = super::client_default()?;
let server_handle = DockerComposeServerHandle {
server_plugin: server_plugin.clone(),
http_client,
};
// Wait (up to a timeout) for the server to be ready.
match wait_for_ready(app_state, &server_handle).await {
Err(err) => {
server_handle.emit_logs_info()?;
Err(err)
}
Ok(_) => {
let server_handle: Box<dyn ServerHandle> = Box::new(server_handle);
Ok(server_handle)
}
}
}
/// Checks the specified server repeatedly to see if it is ready, up to a hardcoded timeout.
///
/// Parameters:
/// * `app_state`: the application's [AppState]
/// * `server_handle`: the [DockerComposeServerPlugin] to test
///
/// Returns an empty [Result], where an error indicates that the server was not ready.
#[tracing::instrument(level = "debug", skip(app_state, server_handle))]
async fn wait_for_ready(
app_state: &AppState,
server_handle: &DockerComposeServerHandle,
) -> Result<()> {
let probe_result = tokio::time::timeout(std::time::Duration::from_secs(60 * 5), async {
let mut ready = false;
let mut probe = None;
while!ready {
probe = Some(
crate::test_framework::metadata::check_metadata_operation(app_state, server_handle)
.await,
);
ready = probe.as_ref().expect("probe result missing").is_ok();
if!ready {
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
}
}
probe.expect("probe results missing")
})
.await
.with_context(|| {
format!(
"Timed out while waiting for server '{}' to launch.",
server_handle.plugin().server_name()
)
})?;
match probe_result {
Err(err) => {
server_handle.emit_logs_info()?;
Err(err)
}
Ok(_) => Ok(()),
}
}
/// Represents a running instance of a [DockerComposeServerPlugin] instance.
struct DockerComposeServerHandle {
server_plugin: ServerPluginWrapper,
http_client: reqwest::Client,
}
#[async_trait]
impl ServerHandle for DockerComposeServerHandle {
fn plugin(&self) -> &ServerPluginWrapper {
&self.server_plugin
}
fn base_url(&self) -> url::Url {
let server_plugin = server_plugin_downcast(self);
server_plugin.base_url().clone()
}
fn client(&self) -> Result<reqwest::Client> {
Ok(self.http_client.clone())
}
fn request_builder(
&self,
client: reqwest::Client,
method: http::Method,
url: Url,
) -> reqwest::RequestBuilder {
let server_plugin = server_plugin_downcast(self);
(server_plugin.request_builder_factory)(client, method, url)
}
fn emit_logs(&self) -> Result<String> {
let server_plugin = server_plugin_downcast(self);
match run_docker_compose(server_plugin, &["logs", "--no-color"]).with_context(|| {
format!(
"Running '{} up --detach' failed.",
server_plugin
.server_script()
.file_name()
.expect("Unable to get control script name.")
.to_string_lossy()
)
}) {
Ok(output) => Ok(String::from_utf8_lossy(&output.stdout).to_string()),
Err(err) => Err(err),
}
}
#[tracing::instrument(level = "debug", skip(self, app_state))]
async fn expunge_all_content(&self, app_state: &AppState) -> Result<()> {
self.shutdown()?;
let server_plugin = server_plugin_downcast(self);
launch_server(app_state, server_plugin).await?;
Ok(())
}
#[tracing::instrument(level = "debug", skip(self))]
fn shutdown(&self) -> Result<()> {
let server_plugin = server_plugin_downcast(self);
let docker_down_output =
run_docker_compose(server_plugin, &["down"]).with_context(|| {
format!(
"Running '{} down' failed.",
server_plugin
.server_script()
.file_name()
.expect("Unable to get control script name.")
.to_string_lossy()
)
})?;
if!docker_down_output.status.success() {
return Err(eyre!(crate::errors::AppError::ChildProcessFailure(
docker_down_output.status,
format!(
"Failed to shutdown '{}' via Docker Compose.",
server_plugin.server_name()
),
String::from_utf8_lossy(&docker_down_output.stdout).into(),
String::from_utf8_lossy(&docker_down_output.stderr).into()
)));
}
Ok(())
}
}
/// Extract the downcast [DockerComposeServerPlugin] from the specified [DockerComposeServerHandle].
fn server_plugin_downcast(server_handle: &DockerComposeServerHandle) -> &DockerComposeServerPlugin {
match &server_handle.server_plugin {
ServerPluginWrapper::DockerComposeServerPlugin(server_plugin) => server_plugin,
#[allow(unreachable_patterns)]
_ => panic!("Unsupported downcast attempt."),
}
}
| {
launch_server(app_state, self).await
} | identifier_body |
lookup.rs | use crate::utils::{f64_compare, TValue, TValueType};
use super::*;
/// Functionality relating to looking up properties of the `Bezier` or points along the `Bezier`.
impl Bezier {
/// Convert a euclidean distance ratio along the `Bezier` curve to a parametric `t`-value.
pub fn euclidean_to_parametric(&self, ratio: f64, error: f64) -> f64 | } else {
high = mid;
}
}
mid
}
/// Convert a [TValue] to a parametric `t`-value.
pub(crate) fn t_value_to_parametric(&self, t: TValue) -> f64 {
match t {
TValue::Parametric(t) => {
assert!((0.0..=1.).contains(&t));
t
}
TValue::Euclidean(t) => {
assert!((0.0..=1.).contains(&t));
self.euclidean_to_parametric(t, DEFAULT_EUCLIDEAN_ERROR_BOUND)
}
TValue::EuclideanWithinError { t, error } => {
assert!((0.0..=1.).contains(&t));
self.euclidean_to_parametric(t, error)
}
}
}
/// Calculate the point on the curve based on the `t`-value provided.
pub(crate) fn unrestricted_parametric_evaluate(&self, t: f64) -> DVec2 {
// Basis code based off of pseudocode found here: <https://pomax.github.io/bezierinfo/#explanation>.
let t_squared = t * t;
let one_minus_t = 1. - t;
let squared_one_minus_t = one_minus_t * one_minus_t;
match self.handles {
BezierHandles::Linear => self.start.lerp(self.end, t),
BezierHandles::Quadratic { handle } => squared_one_minus_t * self.start + 2. * one_minus_t * t * handle + t_squared * self.end,
BezierHandles::Cubic { handle_start, handle_end } => {
let t_cubed = t_squared * t;
let cubed_one_minus_t = squared_one_minus_t * one_minus_t;
cubed_one_minus_t * self.start + 3. * squared_one_minus_t * t * handle_start + 3. * one_minus_t * t_squared * handle_end + t_cubed * self.end
}
}
}
/// Calculate the coordinates of the point `t` along the curve.
/// Expects `t` to be within the inclusive range `[0, 1]`.
/// <iframe frameBorder="0" width="100%" height="350px" src="https://graphite.rs/libraries/bezier-rs#bezier/evaluate/solo" title="Evaluate Demo"></iframe>
pub fn evaluate(&self, t: TValue) -> DVec2 {
let t = self.t_value_to_parametric(t);
self.unrestricted_parametric_evaluate(t)
}
/// Return a selection of equidistant points on the bezier curve.
/// If no value is provided for `steps`, then the function will default `steps` to be 10.
/// <iframe frameBorder="0" width="100%" height="350px" src="https://graphite.rs/libraries/bezier-rs#bezier/lookup-table/solo" title="Lookup-Table Demo"></iframe>
pub fn compute_lookup_table(&self, steps: Option<usize>, tvalue_type: Option<TValueType>) -> Vec<DVec2> {
let steps = steps.unwrap_or(DEFAULT_LUT_STEP_SIZE);
let tvalue_type = tvalue_type.unwrap_or(TValueType::Parametric);
(0..=steps)
.map(|t| {
let tvalue = match tvalue_type {
TValueType::Parametric => TValue::Parametric(t as f64 / steps as f64),
TValueType::Euclidean => TValue::Euclidean(t as f64 / steps as f64),
};
self.evaluate(tvalue)
})
.collect()
}
/// Return an approximation of the length of the bezier curve.
/// - `num_subdivisions` - Number of subdivisions used to approximate the curve. The default value is 1000.
/// <iframe frameBorder="0" width="100%" height="300px" src="https://graphite.rs/libraries/bezier-rs#bezier/length/solo" title="Length Demo"></iframe>
pub fn length(&self, num_subdivisions: Option<usize>) -> f64 {
match self.handles {
BezierHandles::Linear => self.start.distance(self.end),
_ => {
// Code example from <https://gamedev.stackexchange.com/questions/5373/moving-ships-between-two-planets-along-a-bezier-missing-some-equations-for-acce/5427#5427>.
// We will use an approximate approach where we split the curve into many subdivisions
// and calculate the euclidean distance between the two endpoints of the subdivision
let lookup_table = self.compute_lookup_table(Some(num_subdivisions.unwrap_or(DEFAULT_LENGTH_SUBDIVISIONS)), Some(TValueType::Parametric));
let mut approx_curve_length = 0.;
let mut previous_point = lookup_table[0];
// Calculate approximate distance between subdivision
for current_point in lookup_table.iter().skip(1) {
// Calculate distance of subdivision
approx_curve_length += (*current_point - previous_point).length();
// Update the previous point
previous_point = *current_point;
}
approx_curve_length
}
}
}
/// Returns the parametric `t`-value that corresponds to the closest point on the curve to the provided point.
/// Uses a searching algorithm akin to binary search that can be customized using the optional [ProjectionOptions] struct.
/// <iframe frameBorder="0" width="100%" height="300px" src="https://graphite.rs/libraries/bezier-rs#bezier/project/solo" title="Project Demo"></iframe>
pub fn project(&self, point: DVec2, options: Option<ProjectionOptions>) -> f64 {
let options = options.unwrap_or_default();
let ProjectionOptions {
lut_size,
convergence_epsilon,
convergence_limit,
iteration_limit,
} = options;
// TODO: Consider optimizations from precomputing useful values, or using the GPU
// First find the closest point from the results of a lookup table
let lut = self.compute_lookup_table(Some(lut_size), Some(TValueType::Parametric));
let (minimum_position, minimum_distance) = utils::get_closest_point_in_lut(&lut, point);
// Get the t values to the left and right of the closest result in the lookup table
let lut_size_f64 = lut_size as f64;
let minimum_position_f64 = minimum_position as f64;
let mut left_t = (minimum_position_f64 - 1.).max(0.) / lut_size_f64;
let mut right_t = (minimum_position_f64 + 1.).min(lut_size_f64) / lut_size_f64;
// Perform a finer search by finding closest t from 5 points between [left_t, right_t] inclusive
// Choose new left_t and right_t for a smaller range around the closest t and repeat the process
let mut final_t = left_t;
let mut distance;
// Increment minimum_distance to ensure that the distance < minimum_distance comparison will be true for at least one iteration
let mut new_minimum_distance = minimum_distance + 1.;
// Maintain the previous distance to identify convergence
let mut previous_distance;
// Counter to limit the number of iterations
let mut iteration_count = 0;
// Counter to identify how many iterations have had a similar result. Used for convergence test
let mut convergence_count = 0;
// Store calculated distances to minimize unnecessary recomputations
let mut distances: [f64; NUM_DISTANCES] = [
point.distance(lut[(minimum_position as i64 - 1).max(0) as usize]),
0.,
0.,
0.,
point.distance(lut[lut_size.min(minimum_position + 1)]),
];
while left_t <= right_t && convergence_count < convergence_limit && iteration_count < iteration_limit {
previous_distance = new_minimum_distance;
let step = (right_t - left_t) / (NUM_DISTANCES as f64 - 1.);
let mut iterator_t = left_t;
let mut target_index = 0;
// Iterate through first 4 points and will handle the right most point later
for (step_index, table_distance) in distances.iter_mut().enumerate().take(4) {
// Use previously computed distance for the left most point, and compute new values for the others
if step_index == 0 {
distance = *table_distance;
} else {
distance = point.distance(self.evaluate(TValue::Parametric(iterator_t)));
*table_distance = distance;
}
if distance < new_minimum_distance {
new_minimum_distance = distance;
target_index = step_index;
final_t = iterator_t
}
iterator_t += step;
}
// Check right most edge separately since step may not perfectly add up to it (floating point errors)
if distances[NUM_DISTANCES - 1] < new_minimum_distance {
new_minimum_distance = distances[NUM_DISTANCES - 1];
final_t = right_t;
}
// Update left_t and right_t to be the t values (final_t +/- step), while handling the edges (i.e. if final_t is 0, left_t will be 0 instead of -step)
// Ensure that the t values never exceed the [0, 1] range
left_t = (final_t - step).max(0.);
right_t = (final_t + step).min(1.);
// Re-use the corresponding computed distances (target_index is the index corresponding to final_t)
// Since target_index is a u_size, can't subtract one if it is zero
distances[0] = distances[if target_index == 0 { 0 } else { target_index - 1 }];
distances[NUM_DISTANCES - 1] = distances[(target_index + 1).min(NUM_DISTANCES - 1)];
iteration_count += 1;
// update count for consecutive iterations of similar minimum distances
if previous_distance - new_minimum_distance < convergence_epsilon {
convergence_count += 1;
} else {
convergence_count = 0;
}
}
final_t
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_evaluate() {
let p1 = DVec2::new(3., 5.);
let p2 = DVec2::new(14., 3.);
let p3 = DVec2::new(19., 14.);
let p4 = DVec2::new(30., 21.);
let bezier1 = Bezier::from_quadratic_dvec2(p1, p2, p3);
assert_eq!(bezier1.evaluate(TValue::Parametric(0.5)), DVec2::new(12.5, 6.25));
let bezier2 = Bezier::from_cubic_dvec2(p1, p2, p3, p4);
assert_eq!(bezier2.evaluate(TValue::Parametric(0.5)), DVec2::new(16.5, 9.625));
}
#[test]
fn test_compute_lookup_table() {
let bezier1 = Bezier::from_quadratic_coordinates(10., 10., 30., 30., 50., 10.);
let lookup_table1 = bezier1.compute_lookup_table(Some(2), Some(TValueType::Parametric));
assert_eq!(lookup_table1, vec![bezier1.start(), bezier1.evaluate(TValue::Parametric(0.5)), bezier1.end()]);
let bezier2 = Bezier::from_cubic_coordinates(10., 10., 30., 30., 70., 70., 90., 10.);
let lookup_table2 = bezier2.compute_lookup_table(Some(4), Some(TValueType::Parametric));
assert_eq!(
lookup_table2,
vec![
bezier2.start(),
bezier2.evaluate(TValue::Parametric(0.25)),
bezier2.evaluate(TValue::Parametric(0.50)),
bezier2.evaluate(TValue::Parametric(0.75)),
bezier2.end()
]
);
}
#[test]
fn test_length() {
let p1 = DVec2::new(30., 50.);
let p2 = DVec2::new(140., 30.);
let p3 = DVec2::new(160., 170.);
let p4 = DVec2::new(77., 129.);
let bezier_linear = Bezier::from_linear_dvec2(p1, p2);
assert!(utils::f64_compare(bezier_linear.length(None), p1.distance(p2), MAX_ABSOLUTE_DIFFERENCE));
let bezier_quadratic = Bezier::from_quadratic_dvec2(p1, p2, p3);
assert!(utils::f64_compare(bezier_quadratic.length(None), 204., 1e-2));
let bezier_cubic = Bezier::from_cubic_dvec2(p1, p2, p3, p4);
assert!(utils::f64_compare(bezier_cubic.length(None), 199., 1e-2));
}
#[test]
fn test_project() {
let bezier1 = Bezier::from_cubic_coordinates(4., 4., 23., 45., 10., 30., 56., 90.);
assert_eq!(bezier1.project(DVec2::ZERO, None), 0.);
assert_eq!(bezier1.project(DVec2::new(100., 100.), None), 1.);
let bezier2 = Bezier::from_quadratic_coordinates(0., 0., 0., 100., 100., 100.);
assert_eq!(bezier2.project(DVec2::new(100., 0.), None), 0.);
}
}
| {
if ratio < error {
return 0.;
}
if 1. - ratio < error {
return 1.;
}
let mut low = 0.;
let mut mid = 0.;
let mut high = 1.;
let total_length = self.length(None);
while low < high {
mid = (low + high) / 2.;
let test_ratio = self.trim(TValue::Parametric(0.), TValue::Parametric(mid)).length(None) / total_length;
if f64_compare(test_ratio, ratio, error) {
break;
} else if test_ratio < ratio {
low = mid; | identifier_body |
lookup.rs | use crate::utils::{f64_compare, TValue, TValueType};
use super::*;
/// Functionality relating to looking up properties of the `Bezier` or points along the `Bezier`.
impl Bezier {
/// Convert a euclidean distance ratio along the `Bezier` curve to a parametric `t`-value.
pub fn | (&self, ratio: f64, error: f64) -> f64 {
if ratio < error {
return 0.;
}
if 1. - ratio < error {
return 1.;
}
let mut low = 0.;
let mut mid = 0.;
let mut high = 1.;
let total_length = self.length(None);
while low < high {
mid = (low + high) / 2.;
let test_ratio = self.trim(TValue::Parametric(0.), TValue::Parametric(mid)).length(None) / total_length;
if f64_compare(test_ratio, ratio, error) {
break;
} else if test_ratio < ratio {
low = mid;
} else {
high = mid;
}
}
mid
}
/// Convert a [TValue] to a parametric `t`-value.
pub(crate) fn t_value_to_parametric(&self, t: TValue) -> f64 {
match t {
TValue::Parametric(t) => {
assert!((0.0..=1.).contains(&t));
t
}
TValue::Euclidean(t) => {
assert!((0.0..=1.).contains(&t));
self.euclidean_to_parametric(t, DEFAULT_EUCLIDEAN_ERROR_BOUND)
}
TValue::EuclideanWithinError { t, error } => {
assert!((0.0..=1.).contains(&t));
self.euclidean_to_parametric(t, error)
}
}
}
/// Calculate the point on the curve based on the `t`-value provided.
pub(crate) fn unrestricted_parametric_evaluate(&self, t: f64) -> DVec2 {
// Basis code based off of pseudocode found here: <https://pomax.github.io/bezierinfo/#explanation>.
let t_squared = t * t;
let one_minus_t = 1. - t;
let squared_one_minus_t = one_minus_t * one_minus_t;
match self.handles {
BezierHandles::Linear => self.start.lerp(self.end, t),
BezierHandles::Quadratic { handle } => squared_one_minus_t * self.start + 2. * one_minus_t * t * handle + t_squared * self.end,
BezierHandles::Cubic { handle_start, handle_end } => {
let t_cubed = t_squared * t;
let cubed_one_minus_t = squared_one_minus_t * one_minus_t;
cubed_one_minus_t * self.start + 3. * squared_one_minus_t * t * handle_start + 3. * one_minus_t * t_squared * handle_end + t_cubed * self.end
}
}
}
/// Calculate the coordinates of the point `t` along the curve.
/// Expects `t` to be within the inclusive range `[0, 1]`.
/// <iframe frameBorder="0" width="100%" height="350px" src="https://graphite.rs/libraries/bezier-rs#bezier/evaluate/solo" title="Evaluate Demo"></iframe>
pub fn evaluate(&self, t: TValue) -> DVec2 {
let t = self.t_value_to_parametric(t);
self.unrestricted_parametric_evaluate(t)
}
/// Return a selection of equidistant points on the bezier curve.
/// If no value is provided for `steps`, then the function will default `steps` to be 10.
/// <iframe frameBorder="0" width="100%" height="350px" src="https://graphite.rs/libraries/bezier-rs#bezier/lookup-table/solo" title="Lookup-Table Demo"></iframe>
pub fn compute_lookup_table(&self, steps: Option<usize>, tvalue_type: Option<TValueType>) -> Vec<DVec2> {
let steps = steps.unwrap_or(DEFAULT_LUT_STEP_SIZE);
let tvalue_type = tvalue_type.unwrap_or(TValueType::Parametric);
(0..=steps)
.map(|t| {
let tvalue = match tvalue_type {
TValueType::Parametric => TValue::Parametric(t as f64 / steps as f64),
TValueType::Euclidean => TValue::Euclidean(t as f64 / steps as f64),
};
self.evaluate(tvalue)
})
.collect()
}
/// Return an approximation of the length of the bezier curve.
/// - `num_subdivisions` - Number of subdivisions used to approximate the curve. The default value is 1000.
/// <iframe frameBorder="0" width="100%" height="300px" src="https://graphite.rs/libraries/bezier-rs#bezier/length/solo" title="Length Demo"></iframe>
pub fn length(&self, num_subdivisions: Option<usize>) -> f64 {
match self.handles {
BezierHandles::Linear => self.start.distance(self.end),
_ => {
// Code example from <https://gamedev.stackexchange.com/questions/5373/moving-ships-between-two-planets-along-a-bezier-missing-some-equations-for-acce/5427#5427>.
// We will use an approximate approach where we split the curve into many subdivisions
// and calculate the euclidean distance between the two endpoints of the subdivision
let lookup_table = self.compute_lookup_table(Some(num_subdivisions.unwrap_or(DEFAULT_LENGTH_SUBDIVISIONS)), Some(TValueType::Parametric));
let mut approx_curve_length = 0.;
let mut previous_point = lookup_table[0];
// Calculate approximate distance between subdivision
for current_point in lookup_table.iter().skip(1) {
// Calculate distance of subdivision
approx_curve_length += (*current_point - previous_point).length();
// Update the previous point
previous_point = *current_point;
}
approx_curve_length
}
}
}
/// Returns the parametric `t`-value that corresponds to the closest point on the curve to the provided point.
/// Uses a searching algorithm akin to binary search that can be customized using the optional [ProjectionOptions] struct.
/// <iframe frameBorder="0" width="100%" height="300px" src="https://graphite.rs/libraries/bezier-rs#bezier/project/solo" title="Project Demo"></iframe>
pub fn project(&self, point: DVec2, options: Option<ProjectionOptions>) -> f64 {
let options = options.unwrap_or_default();
let ProjectionOptions {
lut_size,
convergence_epsilon,
convergence_limit,
iteration_limit,
} = options;
// TODO: Consider optimizations from precomputing useful values, or using the GPU
// First find the closest point from the results of a lookup table
let lut = self.compute_lookup_table(Some(lut_size), Some(TValueType::Parametric));
let (minimum_position, minimum_distance) = utils::get_closest_point_in_lut(&lut, point);
// Get the t values to the left and right of the closest result in the lookup table
let lut_size_f64 = lut_size as f64;
let minimum_position_f64 = minimum_position as f64;
let mut left_t = (minimum_position_f64 - 1.).max(0.) / lut_size_f64;
let mut right_t = (minimum_position_f64 + 1.).min(lut_size_f64) / lut_size_f64;
// Perform a finer search by finding closest t from 5 points between [left_t, right_t] inclusive
// Choose new left_t and right_t for a smaller range around the closest t and repeat the process
let mut final_t = left_t;
let mut distance;
// Increment minimum_distance to ensure that the distance < minimum_distance comparison will be true for at least one iteration
let mut new_minimum_distance = minimum_distance + 1.;
// Maintain the previous distance to identify convergence
let mut previous_distance;
// Counter to limit the number of iterations
let mut iteration_count = 0;
// Counter to identify how many iterations have had a similar result. Used for convergence test
let mut convergence_count = 0;
// Store calculated distances to minimize unnecessary recomputations
let mut distances: [f64; NUM_DISTANCES] = [
point.distance(lut[(minimum_position as i64 - 1).max(0) as usize]),
0.,
0.,
0.,
point.distance(lut[lut_size.min(minimum_position + 1)]),
];
while left_t <= right_t && convergence_count < convergence_limit && iteration_count < iteration_limit {
previous_distance = new_minimum_distance;
let step = (right_t - left_t) / (NUM_DISTANCES as f64 - 1.);
let mut iterator_t = left_t;
let mut target_index = 0;
// Iterate through first 4 points and will handle the right most point later
for (step_index, table_distance) in distances.iter_mut().enumerate().take(4) {
// Use previously computed distance for the left most point, and compute new values for the others
if step_index == 0 {
distance = *table_distance;
} else {
distance = point.distance(self.evaluate(TValue::Parametric(iterator_t)));
*table_distance = distance;
}
if distance < new_minimum_distance {
new_minimum_distance = distance;
target_index = step_index;
final_t = iterator_t
}
iterator_t += step;
}
// Check right most edge separately since step may not perfectly add up to it (floating point errors)
if distances[NUM_DISTANCES - 1] < new_minimum_distance {
new_minimum_distance = distances[NUM_DISTANCES - 1];
final_t = right_t;
}
// Update left_t and right_t to be the t values (final_t +/- step), while handling the edges (i.e. if final_t is 0, left_t will be 0 instead of -step)
// Ensure that the t values never exceed the [0, 1] range
left_t = (final_t - step).max(0.);
right_t = (final_t + step).min(1.);
// Re-use the corresponding computed distances (target_index is the index corresponding to final_t)
// Since target_index is a u_size, can't subtract one if it is zero
distances[0] = distances[if target_index == 0 { 0 } else { target_index - 1 }];
distances[NUM_DISTANCES - 1] = distances[(target_index + 1).min(NUM_DISTANCES - 1)];
iteration_count += 1;
// update count for consecutive iterations of similar minimum distances
if previous_distance - new_minimum_distance < convergence_epsilon {
convergence_count += 1;
} else {
convergence_count = 0;
}
}
final_t
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_evaluate() {
let p1 = DVec2::new(3., 5.);
let p2 = DVec2::new(14., 3.);
let p3 = DVec2::new(19., 14.);
let p4 = DVec2::new(30., 21.);
let bezier1 = Bezier::from_quadratic_dvec2(p1, p2, p3);
assert_eq!(bezier1.evaluate(TValue::Parametric(0.5)), DVec2::new(12.5, 6.25));
let bezier2 = Bezier::from_cubic_dvec2(p1, p2, p3, p4);
assert_eq!(bezier2.evaluate(TValue::Parametric(0.5)), DVec2::new(16.5, 9.625));
}
#[test]
fn test_compute_lookup_table() {
let bezier1 = Bezier::from_quadratic_coordinates(10., 10., 30., 30., 50., 10.);
let lookup_table1 = bezier1.compute_lookup_table(Some(2), Some(TValueType::Parametric));
assert_eq!(lookup_table1, vec![bezier1.start(), bezier1.evaluate(TValue::Parametric(0.5)), bezier1.end()]);
let bezier2 = Bezier::from_cubic_coordinates(10., 10., 30., 30., 70., 70., 90., 10.);
let lookup_table2 = bezier2.compute_lookup_table(Some(4), Some(TValueType::Parametric));
assert_eq!(
lookup_table2,
vec![
bezier2.start(),
bezier2.evaluate(TValue::Parametric(0.25)),
bezier2.evaluate(TValue::Parametric(0.50)),
bezier2.evaluate(TValue::Parametric(0.75)),
bezier2.end()
]
);
}
#[test]
fn test_length() {
let p1 = DVec2::new(30., 50.);
let p2 = DVec2::new(140., 30.);
let p3 = DVec2::new(160., 170.);
let p4 = DVec2::new(77., 129.);
let bezier_linear = Bezier::from_linear_dvec2(p1, p2);
assert!(utils::f64_compare(bezier_linear.length(None), p1.distance(p2), MAX_ABSOLUTE_DIFFERENCE));
let bezier_quadratic = Bezier::from_quadratic_dvec2(p1, p2, p3);
assert!(utils::f64_compare(bezier_quadratic.length(None), 204., 1e-2));
let bezier_cubic = Bezier::from_cubic_dvec2(p1, p2, p3, p4);
assert!(utils::f64_compare(bezier_cubic.length(None), 199., 1e-2));
}
#[test]
fn test_project() {
let bezier1 = Bezier::from_cubic_coordinates(4., 4., 23., 45., 10., 30., 56., 90.);
assert_eq!(bezier1.project(DVec2::ZERO, None), 0.);
assert_eq!(bezier1.project(DVec2::new(100., 100.), None), 1.);
let bezier2 = Bezier::from_quadratic_coordinates(0., 0., 0., 100., 100., 100.);
assert_eq!(bezier2.project(DVec2::new(100., 0.), None), 0.);
}
}
| euclidean_to_parametric | identifier_name |
lookup.rs | use crate::utils::{f64_compare, TValue, TValueType};
use super::*;
/// Functionality relating to looking up properties of the `Bezier` or points along the `Bezier`.
impl Bezier {
/// Convert a euclidean distance ratio along the `Bezier` curve to a parametric `t`-value.
pub fn euclidean_to_parametric(&self, ratio: f64, error: f64) -> f64 {
if ratio < error {
return 0.;
}
if 1. - ratio < error {
return 1.;
}
let mut low = 0.;
let mut mid = 0.;
let mut high = 1.;
let total_length = self.length(None);
while low < high {
mid = (low + high) / 2.;
let test_ratio = self.trim(TValue::Parametric(0.), TValue::Parametric(mid)).length(None) / total_length;
if f64_compare(test_ratio, ratio, error) {
break;
} else if test_ratio < ratio {
low = mid;
} else |
}
mid
}
/// Convert a [TValue] to a parametric `t`-value.
pub(crate) fn t_value_to_parametric(&self, t: TValue) -> f64 {
match t {
TValue::Parametric(t) => {
assert!((0.0..=1.).contains(&t));
t
}
TValue::Euclidean(t) => {
assert!((0.0..=1.).contains(&t));
self.euclidean_to_parametric(t, DEFAULT_EUCLIDEAN_ERROR_BOUND)
}
TValue::EuclideanWithinError { t, error } => {
assert!((0.0..=1.).contains(&t));
self.euclidean_to_parametric(t, error)
}
}
}
/// Calculate the point on the curve based on the `t`-value provided.
pub(crate) fn unrestricted_parametric_evaluate(&self, t: f64) -> DVec2 {
// Basis code based off of pseudocode found here: <https://pomax.github.io/bezierinfo/#explanation>.
let t_squared = t * t;
let one_minus_t = 1. - t;
let squared_one_minus_t = one_minus_t * one_minus_t;
match self.handles {
BezierHandles::Linear => self.start.lerp(self.end, t),
BezierHandles::Quadratic { handle } => squared_one_minus_t * self.start + 2. * one_minus_t * t * handle + t_squared * self.end,
BezierHandles::Cubic { handle_start, handle_end } => {
let t_cubed = t_squared * t;
let cubed_one_minus_t = squared_one_minus_t * one_minus_t;
cubed_one_minus_t * self.start + 3. * squared_one_minus_t * t * handle_start + 3. * one_minus_t * t_squared * handle_end + t_cubed * self.end
}
}
}
/// Calculate the coordinates of the point `t` along the curve.
/// Expects `t` to be within the inclusive range `[0, 1]`.
/// <iframe frameBorder="0" width="100%" height="350px" src="https://graphite.rs/libraries/bezier-rs#bezier/evaluate/solo" title="Evaluate Demo"></iframe>
pub fn evaluate(&self, t: TValue) -> DVec2 {
let t = self.t_value_to_parametric(t);
self.unrestricted_parametric_evaluate(t)
}
/// Return a selection of equidistant points on the bezier curve.
/// If no value is provided for `steps`, then the function will default `steps` to be 10.
/// <iframe frameBorder="0" width="100%" height="350px" src="https://graphite.rs/libraries/bezier-rs#bezier/lookup-table/solo" title="Lookup-Table Demo"></iframe>
pub fn compute_lookup_table(&self, steps: Option<usize>, tvalue_type: Option<TValueType>) -> Vec<DVec2> {
let steps = steps.unwrap_or(DEFAULT_LUT_STEP_SIZE);
let tvalue_type = tvalue_type.unwrap_or(TValueType::Parametric);
(0..=steps)
.map(|t| {
let tvalue = match tvalue_type {
TValueType::Parametric => TValue::Parametric(t as f64 / steps as f64),
TValueType::Euclidean => TValue::Euclidean(t as f64 / steps as f64),
};
self.evaluate(tvalue)
})
.collect()
}
/// Return an approximation of the length of the bezier curve.
/// - `num_subdivisions` - Number of subdivisions used to approximate the curve. The default value is 1000.
/// <iframe frameBorder="0" width="100%" height="300px" src="https://graphite.rs/libraries/bezier-rs#bezier/length/solo" title="Length Demo"></iframe>
pub fn length(&self, num_subdivisions: Option<usize>) -> f64 {
match self.handles {
BezierHandles::Linear => self.start.distance(self.end),
_ => {
// Code example from <https://gamedev.stackexchange.com/questions/5373/moving-ships-between-two-planets-along-a-bezier-missing-some-equations-for-acce/5427#5427>.
// We will use an approximate approach where we split the curve into many subdivisions
// and calculate the euclidean distance between the two endpoints of the subdivision
let lookup_table = self.compute_lookup_table(Some(num_subdivisions.unwrap_or(DEFAULT_LENGTH_SUBDIVISIONS)), Some(TValueType::Parametric));
let mut approx_curve_length = 0.;
let mut previous_point = lookup_table[0];
// Calculate approximate distance between subdivision
for current_point in lookup_table.iter().skip(1) {
// Calculate distance of subdivision
approx_curve_length += (*current_point - previous_point).length();
// Update the previous point
previous_point = *current_point;
}
approx_curve_length
}
}
}
/// Returns the parametric `t`-value that corresponds to the closest point on the curve to the provided point.
/// Uses a searching algorithm akin to binary search that can be customized using the optional [ProjectionOptions] struct.
/// <iframe frameBorder="0" width="100%" height="300px" src="https://graphite.rs/libraries/bezier-rs#bezier/project/solo" title="Project Demo"></iframe>
pub fn project(&self, point: DVec2, options: Option<ProjectionOptions>) -> f64 {
let options = options.unwrap_or_default();
let ProjectionOptions {
lut_size,
convergence_epsilon,
convergence_limit,
iteration_limit,
} = options;
// TODO: Consider optimizations from precomputing useful values, or using the GPU
// First find the closest point from the results of a lookup table
let lut = self.compute_lookup_table(Some(lut_size), Some(TValueType::Parametric));
let (minimum_position, minimum_distance) = utils::get_closest_point_in_lut(&lut, point);
// Get the t values to the left and right of the closest result in the lookup table
let lut_size_f64 = lut_size as f64;
let minimum_position_f64 = minimum_position as f64;
let mut left_t = (minimum_position_f64 - 1.).max(0.) / lut_size_f64;
let mut right_t = (minimum_position_f64 + 1.).min(lut_size_f64) / lut_size_f64;
// Perform a finer search by finding closest t from 5 points between [left_t, right_t] inclusive
// Choose new left_t and right_t for a smaller range around the closest t and repeat the process
let mut final_t = left_t;
let mut distance;
// Increment minimum_distance to ensure that the distance < minimum_distance comparison will be true for at least one iteration
let mut new_minimum_distance = minimum_distance + 1.;
// Maintain the previous distance to identify convergence
let mut previous_distance;
// Counter to limit the number of iterations
let mut iteration_count = 0;
// Counter to identify how many iterations have had a similar result. Used for convergence test
let mut convergence_count = 0;
// Store calculated distances to minimize unnecessary recomputations
let mut distances: [f64; NUM_DISTANCES] = [
point.distance(lut[(minimum_position as i64 - 1).max(0) as usize]),
0.,
0.,
0.,
point.distance(lut[lut_size.min(minimum_position + 1)]),
];
while left_t <= right_t && convergence_count < convergence_limit && iteration_count < iteration_limit {
previous_distance = new_minimum_distance;
let step = (right_t - left_t) / (NUM_DISTANCES as f64 - 1.);
let mut iterator_t = left_t;
let mut target_index = 0;
// Iterate through first 4 points and will handle the right most point later
for (step_index, table_distance) in distances.iter_mut().enumerate().take(4) {
// Use previously computed distance for the left most point, and compute new values for the others
if step_index == 0 {
distance = *table_distance;
} else {
distance = point.distance(self.evaluate(TValue::Parametric(iterator_t)));
*table_distance = distance;
}
if distance < new_minimum_distance {
new_minimum_distance = distance;
target_index = step_index;
final_t = iterator_t
}
iterator_t += step;
}
// Check right most edge separately since step may not perfectly add up to it (floating point errors)
if distances[NUM_DISTANCES - 1] < new_minimum_distance {
new_minimum_distance = distances[NUM_DISTANCES - 1];
final_t = right_t;
}
// Update left_t and right_t to be the t values (final_t +/- step), while handling the edges (i.e. if final_t is 0, left_t will be 0 instead of -step)
// Ensure that the t values never exceed the [0, 1] range
left_t = (final_t - step).max(0.);
right_t = (final_t + step).min(1.);
// Re-use the corresponding computed distances (target_index is the index corresponding to final_t)
// Since target_index is a u_size, can't subtract one if it is zero
distances[0] = distances[if target_index == 0 { 0 } else { target_index - 1 }];
distances[NUM_DISTANCES - 1] = distances[(target_index + 1).min(NUM_DISTANCES - 1)];
iteration_count += 1;
// update count for consecutive iterations of similar minimum distances
if previous_distance - new_minimum_distance < convergence_epsilon {
convergence_count += 1;
} else {
convergence_count = 0;
}
}
final_t
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_evaluate() {
let p1 = DVec2::new(3., 5.);
let p2 = DVec2::new(14., 3.);
let p3 = DVec2::new(19., 14.);
let p4 = DVec2::new(30., 21.);
let bezier1 = Bezier::from_quadratic_dvec2(p1, p2, p3);
assert_eq!(bezier1.evaluate(TValue::Parametric(0.5)), DVec2::new(12.5, 6.25));
let bezier2 = Bezier::from_cubic_dvec2(p1, p2, p3, p4);
assert_eq!(bezier2.evaluate(TValue::Parametric(0.5)), DVec2::new(16.5, 9.625));
}
#[test]
fn test_compute_lookup_table() {
let bezier1 = Bezier::from_quadratic_coordinates(10., 10., 30., 30., 50., 10.);
let lookup_table1 = bezier1.compute_lookup_table(Some(2), Some(TValueType::Parametric));
assert_eq!(lookup_table1, vec![bezier1.start(), bezier1.evaluate(TValue::Parametric(0.5)), bezier1.end()]);
let bezier2 = Bezier::from_cubic_coordinates(10., 10., 30., 30., 70., 70., 90., 10.);
let lookup_table2 = bezier2.compute_lookup_table(Some(4), Some(TValueType::Parametric));
assert_eq!(
lookup_table2,
vec![
bezier2.start(),
bezier2.evaluate(TValue::Parametric(0.25)),
bezier2.evaluate(TValue::Parametric(0.50)),
bezier2.evaluate(TValue::Parametric(0.75)),
bezier2.end()
]
);
}
#[test]
fn test_length() {
let p1 = DVec2::new(30., 50.);
let p2 = DVec2::new(140., 30.);
let p3 = DVec2::new(160., 170.);
let p4 = DVec2::new(77., 129.);
let bezier_linear = Bezier::from_linear_dvec2(p1, p2);
assert!(utils::f64_compare(bezier_linear.length(None), p1.distance(p2), MAX_ABSOLUTE_DIFFERENCE));
let bezier_quadratic = Bezier::from_quadratic_dvec2(p1, p2, p3);
assert!(utils::f64_compare(bezier_quadratic.length(None), 204., 1e-2));
let bezier_cubic = Bezier::from_cubic_dvec2(p1, p2, p3, p4);
assert!(utils::f64_compare(bezier_cubic.length(None), 199., 1e-2));
}
#[test]
fn test_project() {
let bezier1 = Bezier::from_cubic_coordinates(4., 4., 23., 45., 10., 30., 56., 90.);
assert_eq!(bezier1.project(DVec2::ZERO, None), 0.);
assert_eq!(bezier1.project(DVec2::new(100., 100.), None), 1.);
let bezier2 = Bezier::from_quadratic_coordinates(0., 0., 0., 100., 100., 100.);
assert_eq!(bezier2.project(DVec2::new(100., 0.), None), 0.);
}
}
| {
high = mid;
} | conditional_block |
lookup.rs | use crate::utils::{f64_compare, TValue, TValueType};
use super::*;
/// Functionality relating to looking up properties of the `Bezier` or points along the `Bezier`.
impl Bezier {
/// Convert a euclidean distance ratio along the `Bezier` curve to a parametric `t`-value.
pub fn euclidean_to_parametric(&self, ratio: f64, error: f64) -> f64 {
if ratio < error {
return 0.;
}
if 1. - ratio < error {
return 1.;
}
let mut low = 0.;
let mut mid = 0.;
let mut high = 1.;
let total_length = self.length(None);
while low < high {
mid = (low + high) / 2.;
let test_ratio = self.trim(TValue::Parametric(0.), TValue::Parametric(mid)).length(None) / total_length;
if f64_compare(test_ratio, ratio, error) {
break;
} else if test_ratio < ratio {
low = mid;
} else {
high = mid;
}
}
mid
}
/// Convert a [TValue] to a parametric `t`-value.
pub(crate) fn t_value_to_parametric(&self, t: TValue) -> f64 {
match t {
TValue::Parametric(t) => {
assert!((0.0..=1.).contains(&t));
t
}
TValue::Euclidean(t) => {
assert!((0.0..=1.).contains(&t));
self.euclidean_to_parametric(t, DEFAULT_EUCLIDEAN_ERROR_BOUND)
}
TValue::EuclideanWithinError { t, error } => {
assert!((0.0..=1.).contains(&t));
self.euclidean_to_parametric(t, error)
}
}
}
/// Calculate the point on the curve based on the `t`-value provided.
pub(crate) fn unrestricted_parametric_evaluate(&self, t: f64) -> DVec2 {
// Basis code based off of pseudocode found here: <https://pomax.github.io/bezierinfo/#explanation>.
let t_squared = t * t;
let one_minus_t = 1. - t;
let squared_one_minus_t = one_minus_t * one_minus_t;
match self.handles {
BezierHandles::Linear => self.start.lerp(self.end, t),
BezierHandles::Quadratic { handle } => squared_one_minus_t * self.start + 2. * one_minus_t * t * handle + t_squared * self.end,
BezierHandles::Cubic { handle_start, handle_end } => {
let t_cubed = t_squared * t;
let cubed_one_minus_t = squared_one_minus_t * one_minus_t;
cubed_one_minus_t * self.start + 3. * squared_one_minus_t * t * handle_start + 3. * one_minus_t * t_squared * handle_end + t_cubed * self.end
}
}
}
/// Calculate the coordinates of the point `t` along the curve.
/// Expects `t` to be within the inclusive range `[0, 1]`.
/// <iframe frameBorder="0" width="100%" height="350px" src="https://graphite.rs/libraries/bezier-rs#bezier/evaluate/solo" title="Evaluate Demo"></iframe>
pub fn evaluate(&self, t: TValue) -> DVec2 {
let t = self.t_value_to_parametric(t);
self.unrestricted_parametric_evaluate(t)
}
/// Return a selection of equidistant points on the bezier curve.
/// If no value is provided for `steps`, then the function will default `steps` to be 10.
/// <iframe frameBorder="0" width="100%" height="350px" src="https://graphite.rs/libraries/bezier-rs#bezier/lookup-table/solo" title="Lookup-Table Demo"></iframe>
pub fn compute_lookup_table(&self, steps: Option<usize>, tvalue_type: Option<TValueType>) -> Vec<DVec2> {
let steps = steps.unwrap_or(DEFAULT_LUT_STEP_SIZE);
let tvalue_type = tvalue_type.unwrap_or(TValueType::Parametric);
(0..=steps)
.map(|t| {
let tvalue = match tvalue_type {
TValueType::Parametric => TValue::Parametric(t as f64 / steps as f64),
TValueType::Euclidean => TValue::Euclidean(t as f64 / steps as f64),
};
self.evaluate(tvalue)
})
.collect()
}
/// Return an approximation of the length of the bezier curve.
/// - `num_subdivisions` - Number of subdivisions used to approximate the curve. The default value is 1000.
/// <iframe frameBorder="0" width="100%" height="300px" src="https://graphite.rs/libraries/bezier-rs#bezier/length/solo" title="Length Demo"></iframe>
pub fn length(&self, num_subdivisions: Option<usize>) -> f64 {
match self.handles {
BezierHandles::Linear => self.start.distance(self.end),
_ => {
// Code example from <https://gamedev.stackexchange.com/questions/5373/moving-ships-between-two-planets-along-a-bezier-missing-some-equations-for-acce/5427#5427>.
// We will use an approximate approach where we split the curve into many subdivisions
// and calculate the euclidean distance between the two endpoints of the subdivision
let lookup_table = self.compute_lookup_table(Some(num_subdivisions.unwrap_or(DEFAULT_LENGTH_SUBDIVISIONS)), Some(TValueType::Parametric));
let mut approx_curve_length = 0.;
let mut previous_point = lookup_table[0];
// Calculate approximate distance between subdivision
for current_point in lookup_table.iter().skip(1) {
// Calculate distance of subdivision
approx_curve_length += (*current_point - previous_point).length();
// Update the previous point
previous_point = *current_point;
}
approx_curve_length
}
}
}
/// Returns the parametric `t`-value that corresponds to the closest point on the curve to the provided point.
/// Uses a searching algorithm akin to binary search that can be customized using the optional [ProjectionOptions] struct.
/// <iframe frameBorder="0" width="100%" height="300px" src="https://graphite.rs/libraries/bezier-rs#bezier/project/solo" title="Project Demo"></iframe>
pub fn project(&self, point: DVec2, options: Option<ProjectionOptions>) -> f64 {
let options = options.unwrap_or_default();
let ProjectionOptions {
lut_size,
convergence_epsilon,
convergence_limit,
iteration_limit,
} = options;
// TODO: Consider optimizations from precomputing useful values, or using the GPU
// First find the closest point from the results of a lookup table
let lut = self.compute_lookup_table(Some(lut_size), Some(TValueType::Parametric));
let (minimum_position, minimum_distance) = utils::get_closest_point_in_lut(&lut, point);
// Get the t values to the left and right of the closest result in the lookup table
let lut_size_f64 = lut_size as f64;
let minimum_position_f64 = minimum_position as f64;
let mut left_t = (minimum_position_f64 - 1.).max(0.) / lut_size_f64;
let mut right_t = (minimum_position_f64 + 1.).min(lut_size_f64) / lut_size_f64;
// Perform a finer search by finding closest t from 5 points between [left_t, right_t] inclusive
// Choose new left_t and right_t for a smaller range around the closest t and repeat the process
let mut final_t = left_t;
let mut distance;
// Increment minimum_distance to ensure that the distance < minimum_distance comparison will be true for at least one iteration
let mut new_minimum_distance = minimum_distance + 1.;
// Maintain the previous distance to identify convergence
let mut previous_distance;
// Counter to limit the number of iterations
let mut iteration_count = 0;
// Counter to identify how many iterations have had a similar result. Used for convergence test
let mut convergence_count = 0;
// Store calculated distances to minimize unnecessary recomputations
let mut distances: [f64; NUM_DISTANCES] = [
point.distance(lut[(minimum_position as i64 - 1).max(0) as usize]),
0.,
0.,
0.,
point.distance(lut[lut_size.min(minimum_position + 1)]),
];
while left_t <= right_t && convergence_count < convergence_limit && iteration_count < iteration_limit {
previous_distance = new_minimum_distance;
let step = (right_t - left_t) / (NUM_DISTANCES as f64 - 1.);
let mut iterator_t = left_t;
let mut target_index = 0;
// Iterate through first 4 points and will handle the right most point later
for (step_index, table_distance) in distances.iter_mut().enumerate().take(4) {
// Use previously computed distance for the left most point, and compute new values for the others
if step_index == 0 {
distance = *table_distance;
} else {
distance = point.distance(self.evaluate(TValue::Parametric(iterator_t)));
*table_distance = distance;
}
if distance < new_minimum_distance {
new_minimum_distance = distance;
target_index = step_index;
final_t = iterator_t
}
iterator_t += step;
}
// Check right most edge separately since step may not perfectly add up to it (floating point errors)
if distances[NUM_DISTANCES - 1] < new_minimum_distance {
new_minimum_distance = distances[NUM_DISTANCES - 1];
final_t = right_t;
}
// Update left_t and right_t to be the t values (final_t +/- step), while handling the edges (i.e. if final_t is 0, left_t will be 0 instead of -step)
// Ensure that the t values never exceed the [0, 1] range
left_t = (final_t - step).max(0.);
right_t = (final_t + step).min(1.);
// Re-use the corresponding computed distances (target_index is the index corresponding to final_t)
// Since target_index is a u_size, can't subtract one if it is zero
distances[0] = distances[if target_index == 0 { 0 } else { target_index - 1 }];
distances[NUM_DISTANCES - 1] = distances[(target_index + 1).min(NUM_DISTANCES - 1)];
iteration_count += 1;
// update count for consecutive iterations of similar minimum distances
if previous_distance - new_minimum_distance < convergence_epsilon { | } else {
convergence_count = 0;
}
}
final_t
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_evaluate() {
let p1 = DVec2::new(3., 5.);
let p2 = DVec2::new(14., 3.);
let p3 = DVec2::new(19., 14.);
let p4 = DVec2::new(30., 21.);
let bezier1 = Bezier::from_quadratic_dvec2(p1, p2, p3);
assert_eq!(bezier1.evaluate(TValue::Parametric(0.5)), DVec2::new(12.5, 6.25));
let bezier2 = Bezier::from_cubic_dvec2(p1, p2, p3, p4);
assert_eq!(bezier2.evaluate(TValue::Parametric(0.5)), DVec2::new(16.5, 9.625));
}
#[test]
fn test_compute_lookup_table() {
let bezier1 = Bezier::from_quadratic_coordinates(10., 10., 30., 30., 50., 10.);
let lookup_table1 = bezier1.compute_lookup_table(Some(2), Some(TValueType::Parametric));
assert_eq!(lookup_table1, vec![bezier1.start(), bezier1.evaluate(TValue::Parametric(0.5)), bezier1.end()]);
let bezier2 = Bezier::from_cubic_coordinates(10., 10., 30., 30., 70., 70., 90., 10.);
let lookup_table2 = bezier2.compute_lookup_table(Some(4), Some(TValueType::Parametric));
assert_eq!(
lookup_table2,
vec![
bezier2.start(),
bezier2.evaluate(TValue::Parametric(0.25)),
bezier2.evaluate(TValue::Parametric(0.50)),
bezier2.evaluate(TValue::Parametric(0.75)),
bezier2.end()
]
);
}
#[test]
fn test_length() {
let p1 = DVec2::new(30., 50.);
let p2 = DVec2::new(140., 30.);
let p3 = DVec2::new(160., 170.);
let p4 = DVec2::new(77., 129.);
let bezier_linear = Bezier::from_linear_dvec2(p1, p2);
assert!(utils::f64_compare(bezier_linear.length(None), p1.distance(p2), MAX_ABSOLUTE_DIFFERENCE));
let bezier_quadratic = Bezier::from_quadratic_dvec2(p1, p2, p3);
assert!(utils::f64_compare(bezier_quadratic.length(None), 204., 1e-2));
let bezier_cubic = Bezier::from_cubic_dvec2(p1, p2, p3, p4);
assert!(utils::f64_compare(bezier_cubic.length(None), 199., 1e-2));
}
#[test]
fn test_project() {
let bezier1 = Bezier::from_cubic_coordinates(4., 4., 23., 45., 10., 30., 56., 90.);
assert_eq!(bezier1.project(DVec2::ZERO, None), 0.);
assert_eq!(bezier1.project(DVec2::new(100., 100.), None), 1.);
let bezier2 = Bezier::from_quadratic_coordinates(0., 0., 0., 100., 100., 100.);
assert_eq!(bezier2.project(DVec2::new(100., 0.), None), 0.);
}
} | convergence_count += 1; | random_line_split |
agreement.rs | // Copyright 2015-2017 Brian Smith.
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
//! Key Agreement: ECDH, including X25519.
//!
//! # Example
//!
//! Note that this example uses X25519, but ECDH using NIST P-256/P-384 is done
//! exactly the same way, just substituting
//! `agreement::ECDH_P256`/`agreement::ECDH_P384` for `agreement::X25519`.
//!
//! ```
//! # fn x25519_agreement_example() -> Result<(), ring::error::Unspecified> {
//! use ring::{agreement, rand};
//! use untrusted;
//!
//! let rng = rand::SystemRandom::new();
//!
//! let my_private_key =
//! agreement::PrivateKey::<agreement::Ephemeral>::generate(&agreement::X25519, &rng)?;
//!
//! // Make `my_public_key` a byte slice containing my public key. In a real
//! // application, this would be sent to the peer in an encoded protocol
//! // message.
//! let my_public_key = my_private_key.compute_public_key()?;
//! let my_public_key = my_public_key.as_ref();
//!
//! // In a real application, the peer public key would be parsed out of a
//! // protocol message. Here we just generate one.
//! let mut peer_public_key_buf = [0u8; agreement::PUBLIC_KEY_MAX_LEN];
//! let peer_public_key = {
//! let peer_private_key =
//! agreement::PrivateKey::<agreement::Ephemeral>::generate(&agreement::X25519, &rng)?;
//! peer_private_key.compute_public_key()?
//! };
//! let peer_public_key = untrusted::Input::from(peer_public_key.as_ref());
//!
//! // In a real application, the protocol specifies how to determine what
//! // algorithm was used to generate the peer's private key. Here, we know it
//! // is X25519 since we just generated it.
//! let peer_public_key_alg = &agreement::X25519;
//!
//! let input_keying_material = my_private_key.agree(peer_public_key_alg, peer_public_key)?;
//! input_keying_material.derive(|_key_material| {
//! // In a real application, we'd apply a KDF to the key material and the
//! // public keys (as recommended in RFC 7748) and then derive session
//! // keys from the result. We omit all that here.
//! Ok(())
//! })
//! # }
//! # fn main() { x25519_agreement_example().unwrap() }
//! ```
// The "NSA Guide" steps here are from from section 3.1, "Ephemeral Unified
// Model."
use crate::{ec, error, rand};
use untrusted;
pub use crate::ec::{
curve25519::x25519::X25519,
suite_b::ecdh::{ECDH_P256, ECDH_P384},
PUBLIC_KEY_MAX_LEN,
};
use core::marker::PhantomData;
/// A key agreement algorithm.
pub struct Algorithm {
pub(crate) curve: &'static ec::Curve,
pub(crate) ecdh: fn(
out: &mut [u8],
private_key: &ec::PrivateKey,
peer_public_key: untrusted::Input,
) -> Result<(), error::Unspecified>,
}
derive_debug_via_self!(Algorithm, self.curve);
impl Eq for Algorithm {}
impl PartialEq for Algorithm {
fn eq(&self, other: &Algorithm) -> bool { self.curve.id == other.curve.id }
}
/// How many times the key may be used.
pub trait Lifetime: self::sealed::Sealed {}
/// The key may be used at most once.
pub struct Ephemeral {}
impl Lifetime for Ephemeral {}
impl self::sealed::Sealed for Ephemeral {}
/// The key may be used more than once.
pub struct Static {}
impl Lifetime for Static {}
impl self::sealed::Sealed for Static {}
/// A key pair for key agreement.
pub struct KeyPair<U: Lifetime> {
private_key: PrivateKey<U>,
public_key: PublicKey,
}
impl<U: Lifetime> KeyPair<U> {
/// Generate a new key pair for the given algorithm.
///
/// C analog: `EC_KEY_new_by_curve_name` + `EC_KEY_generate_key`.
pub fn generate(
alg: &'static Algorithm, rng: &rand::SecureRandom,
) -> Result<Self, error::Unspecified> |
/// The private key.
pub fn private_key(&self) -> &PrivateKey<U> { &self.private_key }
/// The public key.
pub fn public_key(&self) -> &PublicKey { &self.public_key }
/// Split the key pair apart.
pub fn split(self) -> (PrivateKey<U>, PublicKey) { (self.private_key, self.public_key) }
}
/// A public key for key agreement.
pub struct PublicKey {
bytes: [u8; PUBLIC_KEY_MAX_LEN],
alg: &'static Algorithm,
}
impl AsRef<[u8]> for PublicKey {
#[inline]
fn as_ref(&self) -> &[u8] { &self.bytes[..self.alg.curve.public_key_len] }
}
/// A private key for key agreement.
pub struct PrivateKey<U: Lifetime> {
private_key: ec::PrivateKey,
alg: &'static Algorithm,
usage: PhantomData<U>,
}
impl<U: Lifetime> PrivateKey<U> {
/// Generate a new private key for the given algorithm.
///
/// C analog: `EC_KEY_new_by_curve_name` + `EC_KEY_generate_key`.
pub fn generate(
alg: &'static Algorithm, rng: &rand::SecureRandom,
) -> Result<Self, error::Unspecified> {
// NSA Guide Step 1.
//
// This only handles the key generation part of step 1. The rest of
// step one is done by `compute_public_key()`.
let private_key = ec::PrivateKey::generate(&alg.curve, rng)?;
Ok(Self {
private_key,
alg,
usage: PhantomData,
})
}
/// The key exchange algorithm.
#[inline]
pub fn algorithm(&self) -> &'static Algorithm { self.alg }
/// Computes the public key from the private key's value and fills `out`
/// with the public point encoded in the standard form for the algorithm.
///
/// `out.len()` must be equal to the value returned by `public_key_len`.
#[inline(always)]
pub fn compute_public_key(&self) -> Result<PublicKey, error::Unspecified> {
// NSA Guide Step 1.
//
// Obviously, this only handles the part of Step 1 between the private
// key generation and the sending of the public key to the peer. `out`
// is what shouPrivateKeyld be sent to the peer.
let mut public_key = PublicKey {
bytes: [0; PUBLIC_KEY_MAX_LEN],
alg: self.alg,
};
self.private_key
.compute_public_key(&self.alg.curve, &mut public_key.bytes)?;
Ok(public_key)
}
/// Performs a key agreement with an private key and the given public key.
///
/// Since `self` is consumed, it will not be usable after calling `agree`.
///
/// `peer_public_key_alg` is the algorithm/curve for the peer's public key
/// point; `agree` will return `Err(error_value)` if it does not match this
/// private key's algorithm/curve.
///
/// `peer_public_key` is the peer's public key. `agree` verifies that it is
/// encoded in the standard form for the algorithm and that the key is
/// *valid*; see the algorithm's documentation for details on how keys are
/// to be encoded and what constitutes a valid key for that algorithm.
///
/// C analogs: `EC_POINT_oct2point` + `ECDH_compute_key`, `X25519`.
pub fn agree(
self, peer_public_key_alg: &Algorithm, peer_public_key: untrusted::Input,
) -> Result<InputKeyMaterial, error::Unspecified> {
agree_(
&self.private_key,
self.alg,
peer_public_key_alg,
peer_public_key,
)
}
#[cfg(test)]
pub(crate) fn bytes(&self, curve: &ec::Curve) -> &[u8] { self.private_key.bytes(curve) }
}
impl PrivateKey<Static> {
/// Performs a key agreement with a static private key and the given
/// public key.
///
/// `peer_public_key_alg` is the algorithm/curve for the peer's public key
/// point; `agree_static` will return `Err(error_value)` if it does not
/// match `my_private_key's` algorithm/curve.
///
/// `peer_public_key` is the peer's public key. `agree_static` verifies
/// that it is encoded in the standard form for the algorithm and that
/// the key is *valid*; see the algorithm's documentation for details on
/// how keys are to be encoded and what constitutes a valid key for that
/// algorithm.
///
/// C analogs: `EC_POINT_oct2point` + `ECDH_compute_key`, `X25519`.
pub fn agree_static(
&self, peer_public_key_alg: &Algorithm, peer_public_key: untrusted::Input,
) -> Result<InputKeyMaterial, error::Unspecified> {
agree_(
&self.private_key,
self.alg,
peer_public_key_alg,
peer_public_key,
)
}
pub fn from_bytes(
alg: &'static Algorithm, bytes: untrusted::Input
) -> Result<Self, error::Unspecified> {
let private_key = ec::PrivateKey::from_bytes(&alg.curve, bytes)?;
Ok(Self {
private_key,
alg,
usage: PhantomData,
})
}
pub fn bytes(
&self, alg: &'static Algorithm
) -> &[u8] {
self.private_key.bytes(&alg.curve)
}
}
fn agree_(
my_private_key: &ec::PrivateKey, my_alg: &Algorithm, peer_public_key_alg: &Algorithm,
peer_public_key: untrusted::Input,
) -> Result<InputKeyMaterial, error::Unspecified> {
let alg = &my_alg;
// NSA Guide Prerequisite 1.
//
// The domain parameters are hard-coded. This check verifies that the
// peer's public key's domain parameters match the domain parameters of
// this private key.
if peer_public_key_alg!= *alg {
return Err(error::Unspecified);
}
// NSA Guide Prerequisite 2, regarding which KDFs are allowed, is delegated
// to the caller.
// NSA Guide Prerequisite 3, "Prior to or during the key-agreement process,
// each party shall obtain the identifier associated with the other party
// during the key-agreement scheme," is delegated to the caller.
// NSA Guide Step 1 is handled by `Self::generate()` and
// `Self::compute_public_key()`.
// NSA Guide Steps 2, 3, and 4.
//
// We have a pretty liberal interpretation of the NIST's spec's "Destroy"
// that doesn't meet the NSA requirement to "zeroize."
let mut ikm = InputKeyMaterial {
bytes: [0; ec::ELEM_MAX_BYTES],
len: alg.curve.elem_and_scalar_len,
};
(alg.ecdh)(&mut ikm.bytes[..ikm.len], my_private_key, peer_public_key)?;
// NSA Guide Steps 5 and 6 are deferred to `InputKeyMaterial::derive`.
Ok(ikm)
}
/// The result of a key agreement operation, to be fed into a KDF.
///
/// Intentionally not `Clone` or `Copy` since the value should only be
/// used once.
#[must_use]
pub struct InputKeyMaterial {
bytes: [u8; ec::ELEM_MAX_BYTES],
len: usize,
}
mod sealed {
pub trait Sealed {}
}
impl InputKeyMaterial {
/// Calls `kdf` with the raw key material and then returns what `kdf`
/// returns, consuming `Self` so that the key material can only be used
/// once.
pub fn derive<F, R>(self, kdf: F) -> R
where
F: FnOnce(&[u8]) -> R,
{
kdf(&self.bytes[..self.len])
// NSA Guide Steps 5 and 6.
// Again, we have a pretty liberal interpretation of the NIST's spec's
// "Destroy" that doesn't meet the NSA requirement to "zeroize."
}
}
| {
// NSA Guide Step 1.
let private_key = ec::PrivateKey::generate(&alg.curve, rng)?;
let mut public_key = PublicKey {
bytes: [0; PUBLIC_KEY_MAX_LEN],
alg,
};
private_key.compute_public_key(&alg.curve, &mut public_key.bytes)?;
Ok(Self {
private_key: PrivateKey {
private_key,
alg,
usage: PhantomData,
},
public_key,
})
} | identifier_body |
agreement.rs | // Copyright 2015-2017 Brian Smith.
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
//! Key Agreement: ECDH, including X25519.
//!
//! # Example
//!
//! Note that this example uses X25519, but ECDH using NIST P-256/P-384 is done
//! exactly the same way, just substituting
//! `agreement::ECDH_P256`/`agreement::ECDH_P384` for `agreement::X25519`.
//!
//! ```
//! # fn x25519_agreement_example() -> Result<(), ring::error::Unspecified> {
//! use ring::{agreement, rand};
//! use untrusted;
//!
//! let rng = rand::SystemRandom::new();
//!
//! let my_private_key =
//! agreement::PrivateKey::<agreement::Ephemeral>::generate(&agreement::X25519, &rng)?;
//!
//! // Make `my_public_key` a byte slice containing my public key. In a real
//! // application, this would be sent to the peer in an encoded protocol
//! // message.
//! let my_public_key = my_private_key.compute_public_key()?;
//! let my_public_key = my_public_key.as_ref();
//!
//! // In a real application, the peer public key would be parsed out of a
//! // protocol message. Here we just generate one.
//! let mut peer_public_key_buf = [0u8; agreement::PUBLIC_KEY_MAX_LEN];
//! let peer_public_key = {
//! let peer_private_key =
//! agreement::PrivateKey::<agreement::Ephemeral>::generate(&agreement::X25519, &rng)?;
//! peer_private_key.compute_public_key()?
//! };
//! let peer_public_key = untrusted::Input::from(peer_public_key.as_ref());
//!
//! // In a real application, the protocol specifies how to determine what
//! // algorithm was used to generate the peer's private key. Here, we know it
//! // is X25519 since we just generated it.
//! let peer_public_key_alg = &agreement::X25519;
//!
//! let input_keying_material = my_private_key.agree(peer_public_key_alg, peer_public_key)?;
//! input_keying_material.derive(|_key_material| {
//! // In a real application, we'd apply a KDF to the key material and the
//! // public keys (as recommended in RFC 7748) and then derive session
//! // keys from the result. We omit all that here.
//! Ok(())
//! })
//! # }
//! # fn main() { x25519_agreement_example().unwrap() }
//! ```
// The "NSA Guide" steps here are from from section 3.1, "Ephemeral Unified
// Model."
use crate::{ec, error, rand};
use untrusted;
pub use crate::ec::{
curve25519::x25519::X25519,
suite_b::ecdh::{ECDH_P256, ECDH_P384},
PUBLIC_KEY_MAX_LEN,
};
use core::marker::PhantomData;
/// A key agreement algorithm.
pub struct Algorithm {
pub(crate) curve: &'static ec::Curve,
pub(crate) ecdh: fn(
out: &mut [u8],
private_key: &ec::PrivateKey,
peer_public_key: untrusted::Input,
) -> Result<(), error::Unspecified>,
}
derive_debug_via_self!(Algorithm, self.curve);
impl Eq for Algorithm {}
impl PartialEq for Algorithm {
fn eq(&self, other: &Algorithm) -> bool { self.curve.id == other.curve.id }
}
/// How many times the key may be used.
pub trait Lifetime: self::sealed::Sealed {}
/// The key may be used at most once.
pub struct Ephemeral {}
impl Lifetime for Ephemeral {}
impl self::sealed::Sealed for Ephemeral {}
/// The key may be used more than once.
pub struct Static {}
impl Lifetime for Static {}
impl self::sealed::Sealed for Static {}
/// A key pair for key agreement.
pub struct KeyPair<U: Lifetime> {
private_key: PrivateKey<U>,
public_key: PublicKey,
}
impl<U: Lifetime> KeyPair<U> {
/// Generate a new key pair for the given algorithm.
///
/// C analog: `EC_KEY_new_by_curve_name` + `EC_KEY_generate_key`.
pub fn generate(
alg: &'static Algorithm, rng: &rand::SecureRandom,
) -> Result<Self, error::Unspecified> {
// NSA Guide Step 1.
let private_key = ec::PrivateKey::generate(&alg.curve, rng)?;
let mut public_key = PublicKey {
bytes: [0; PUBLIC_KEY_MAX_LEN],
alg,
};
private_key.compute_public_key(&alg.curve, &mut public_key.bytes)?;
Ok(Self {
private_key: PrivateKey {
private_key,
alg,
usage: PhantomData,
},
public_key,
})
}
/// The private key.
pub fn private_key(&self) -> &PrivateKey<U> { &self.private_key }
/// The public key.
pub fn public_key(&self) -> &PublicKey { &self.public_key }
/// Split the key pair apart.
pub fn split(self) -> (PrivateKey<U>, PublicKey) { (self.private_key, self.public_key) }
}
/// A public key for key agreement.
pub struct PublicKey {
bytes: [u8; PUBLIC_KEY_MAX_LEN],
alg: &'static Algorithm,
}
impl AsRef<[u8]> for PublicKey {
#[inline]
fn as_ref(&self) -> &[u8] { &self.bytes[..self.alg.curve.public_key_len] }
}
/// A private key for key agreement.
pub struct PrivateKey<U: Lifetime> {
private_key: ec::PrivateKey,
alg: &'static Algorithm,
usage: PhantomData<U>,
}
impl<U: Lifetime> PrivateKey<U> {
/// Generate a new private key for the given algorithm.
///
/// C analog: `EC_KEY_new_by_curve_name` + `EC_KEY_generate_key`.
pub fn generate(
alg: &'static Algorithm, rng: &rand::SecureRandom,
) -> Result<Self, error::Unspecified> {
// NSA Guide Step 1.
//
// This only handles the key generation part of step 1. The rest of
// step one is done by `compute_public_key()`.
let private_key = ec::PrivateKey::generate(&alg.curve, rng)?;
Ok(Self {
private_key,
alg,
usage: PhantomData,
})
}
/// The key exchange algorithm.
#[inline]
pub fn algorithm(&self) -> &'static Algorithm { self.alg }
/// Computes the public key from the private key's value and fills `out`
/// with the public point encoded in the standard form for the algorithm.
///
/// `out.len()` must be equal to the value returned by `public_key_len`.
#[inline(always)]
pub fn compute_public_key(&self) -> Result<PublicKey, error::Unspecified> {
// NSA Guide Step 1.
//
// Obviously, this only handles the part of Step 1 between the private
// key generation and the sending of the public key to the peer. `out`
// is what shouPrivateKeyld be sent to the peer.
let mut public_key = PublicKey {
bytes: [0; PUBLIC_KEY_MAX_LEN],
alg: self.alg,
};
self.private_key
.compute_public_key(&self.alg.curve, &mut public_key.bytes)?;
Ok(public_key)
}
/// Performs a key agreement with an private key and the given public key.
///
/// Since `self` is consumed, it will not be usable after calling `agree`.
///
/// `peer_public_key_alg` is the algorithm/curve for the peer's public key
/// point; `agree` will return `Err(error_value)` if it does not match this
/// private key's algorithm/curve.
///
/// `peer_public_key` is the peer's public key. `agree` verifies that it is
/// encoded in the standard form for the algorithm and that the key is
/// *valid*; see the algorithm's documentation for details on how keys are
/// to be encoded and what constitutes a valid key for that algorithm.
///
/// C analogs: `EC_POINT_oct2point` + `ECDH_compute_key`, `X25519`.
pub fn agree(
self, peer_public_key_alg: &Algorithm, peer_public_key: untrusted::Input,
) -> Result<InputKeyMaterial, error::Unspecified> {
agree_(
&self.private_key,
self.alg,
peer_public_key_alg,
peer_public_key,
)
}
#[cfg(test)]
pub(crate) fn bytes(&self, curve: &ec::Curve) -> &[u8] { self.private_key.bytes(curve) }
}
impl PrivateKey<Static> {
/// Performs a key agreement with a static private key and the given
/// public key.
///
/// `peer_public_key_alg` is the algorithm/curve for the peer's public key
/// point; `agree_static` will return `Err(error_value)` if it does not
/// match `my_private_key's` algorithm/curve.
///
/// `peer_public_key` is the peer's public key. `agree_static` verifies
/// that it is encoded in the standard form for the algorithm and that
/// the key is *valid*; see the algorithm's documentation for details on
/// how keys are to be encoded and what constitutes a valid key for that
/// algorithm.
///
/// C analogs: `EC_POINT_oct2point` + `ECDH_compute_key`, `X25519`.
pub fn agree_static(
&self, peer_public_key_alg: &Algorithm, peer_public_key: untrusted::Input,
) -> Result<InputKeyMaterial, error::Unspecified> {
agree_(
&self.private_key,
self.alg,
peer_public_key_alg,
peer_public_key,
)
}
pub fn from_bytes(
alg: &'static Algorithm, bytes: untrusted::Input
) -> Result<Self, error::Unspecified> {
let private_key = ec::PrivateKey::from_bytes(&alg.curve, bytes)?;
Ok(Self {
private_key,
alg,
usage: PhantomData,
})
}
pub fn bytes(
&self, alg: &'static Algorithm
) -> &[u8] {
self.private_key.bytes(&alg.curve)
}
}
fn agree_(
my_private_key: &ec::PrivateKey, my_alg: &Algorithm, peer_public_key_alg: &Algorithm,
peer_public_key: untrusted::Input,
) -> Result<InputKeyMaterial, error::Unspecified> {
let alg = &my_alg;
// NSA Guide Prerequisite 1.
//
// The domain parameters are hard-coded. This check verifies that the
// peer's public key's domain parameters match the domain parameters of
// this private key.
if peer_public_key_alg!= *alg {
return Err(error::Unspecified);
}
// NSA Guide Prerequisite 2, regarding which KDFs are allowed, is delegated
// to the caller.
// NSA Guide Prerequisite 3, "Prior to or during the key-agreement process,
// each party shall obtain the identifier associated with the other party
// during the key-agreement scheme," is delegated to the caller.
// NSA Guide Step 1 is handled by `Self::generate()` and
// `Self::compute_public_key()`.
// NSA Guide Steps 2, 3, and 4.
//
// We have a pretty liberal interpretation of the NIST's spec's "Destroy"
// that doesn't meet the NSA requirement to "zeroize."
let mut ikm = InputKeyMaterial {
bytes: [0; ec::ELEM_MAX_BYTES],
len: alg.curve.elem_and_scalar_len,
};
(alg.ecdh)(&mut ikm.bytes[..ikm.len], my_private_key, peer_public_key)?;
// NSA Guide Steps 5 and 6 are deferred to `InputKeyMaterial::derive`.
Ok(ikm)
}
/// The result of a key agreement operation, to be fed into a KDF.
///
/// Intentionally not `Clone` or `Copy` since the value should only be
/// used once.
#[must_use]
pub struct | {
bytes: [u8; ec::ELEM_MAX_BYTES],
len: usize,
}
mod sealed {
pub trait Sealed {}
}
impl InputKeyMaterial {
/// Calls `kdf` with the raw key material and then returns what `kdf`
/// returns, consuming `Self` so that the key material can only be used
/// once.
pub fn derive<F, R>(self, kdf: F) -> R
where
F: FnOnce(&[u8]) -> R,
{
kdf(&self.bytes[..self.len])
// NSA Guide Steps 5 and 6.
// Again, we have a pretty liberal interpretation of the NIST's spec's
// "Destroy" that doesn't meet the NSA requirement to "zeroize."
}
}
| InputKeyMaterial | identifier_name |
agreement.rs | // Copyright 2015-2017 Brian Smith.
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
//! Key Agreement: ECDH, including X25519.
//!
//! # Example
//!
//! Note that this example uses X25519, but ECDH using NIST P-256/P-384 is done
//! exactly the same way, just substituting
//! `agreement::ECDH_P256`/`agreement::ECDH_P384` for `agreement::X25519`.
//!
//! ```
//! # fn x25519_agreement_example() -> Result<(), ring::error::Unspecified> {
//! use ring::{agreement, rand};
//! use untrusted;
//!
//! let rng = rand::SystemRandom::new();
//!
//! let my_private_key =
//! agreement::PrivateKey::<agreement::Ephemeral>::generate(&agreement::X25519, &rng)?;
//!
//! // Make `my_public_key` a byte slice containing my public key. In a real
//! // application, this would be sent to the peer in an encoded protocol
//! // message.
//! let my_public_key = my_private_key.compute_public_key()?;
//! let my_public_key = my_public_key.as_ref();
//!
//! // In a real application, the peer public key would be parsed out of a
//! // protocol message. Here we just generate one.
//! let mut peer_public_key_buf = [0u8; agreement::PUBLIC_KEY_MAX_LEN];
//! let peer_public_key = {
//! let peer_private_key =
//! agreement::PrivateKey::<agreement::Ephemeral>::generate(&agreement::X25519, &rng)?;
//! peer_private_key.compute_public_key()?
//! };
//! let peer_public_key = untrusted::Input::from(peer_public_key.as_ref());
//!
//! // In a real application, the protocol specifies how to determine what
//! // algorithm was used to generate the peer's private key. Here, we know it
//! // is X25519 since we just generated it.
//! let peer_public_key_alg = &agreement::X25519;
//!
//! let input_keying_material = my_private_key.agree(peer_public_key_alg, peer_public_key)?;
//! input_keying_material.derive(|_key_material| {
//! // In a real application, we'd apply a KDF to the key material and the
//! // public keys (as recommended in RFC 7748) and then derive session
//! // keys from the result. We omit all that here.
//! Ok(())
//! })
//! # }
//! # fn main() { x25519_agreement_example().unwrap() }
//! ```
// The "NSA Guide" steps here are from from section 3.1, "Ephemeral Unified
// Model."
use crate::{ec, error, rand};
use untrusted;
pub use crate::ec::{
curve25519::x25519::X25519,
suite_b::ecdh::{ECDH_P256, ECDH_P384},
PUBLIC_KEY_MAX_LEN,
};
use core::marker::PhantomData;
/// A key agreement algorithm.
pub struct Algorithm {
pub(crate) curve: &'static ec::Curve,
pub(crate) ecdh: fn(
out: &mut [u8],
private_key: &ec::PrivateKey,
peer_public_key: untrusted::Input,
) -> Result<(), error::Unspecified>,
}
derive_debug_via_self!(Algorithm, self.curve);
impl Eq for Algorithm {}
impl PartialEq for Algorithm {
fn eq(&self, other: &Algorithm) -> bool { self.curve.id == other.curve.id }
}
/// How many times the key may be used.
pub trait Lifetime: self::sealed::Sealed {}
/// The key may be used at most once.
pub struct Ephemeral {}
impl Lifetime for Ephemeral {}
impl self::sealed::Sealed for Ephemeral {}
/// The key may be used more than once.
pub struct Static {}
impl Lifetime for Static {}
impl self::sealed::Sealed for Static {}
/// A key pair for key agreement.
pub struct KeyPair<U: Lifetime> {
private_key: PrivateKey<U>,
public_key: PublicKey,
}
impl<U: Lifetime> KeyPair<U> {
/// Generate a new key pair for the given algorithm.
///
/// C analog: `EC_KEY_new_by_curve_name` + `EC_KEY_generate_key`.
pub fn generate(
alg: &'static Algorithm, rng: &rand::SecureRandom,
) -> Result<Self, error::Unspecified> {
// NSA Guide Step 1.
let private_key = ec::PrivateKey::generate(&alg.curve, rng)?;
let mut public_key = PublicKey {
bytes: [0; PUBLIC_KEY_MAX_LEN],
alg,
};
private_key.compute_public_key(&alg.curve, &mut public_key.bytes)?;
Ok(Self {
private_key: PrivateKey {
private_key,
alg,
usage: PhantomData,
},
public_key,
})
}
/// The private key.
pub fn private_key(&self) -> &PrivateKey<U> { &self.private_key }
/// The public key.
pub fn public_key(&self) -> &PublicKey { &self.public_key }
/// Split the key pair apart.
pub fn split(self) -> (PrivateKey<U>, PublicKey) { (self.private_key, self.public_key) }
}
/// A public key for key agreement.
pub struct PublicKey {
bytes: [u8; PUBLIC_KEY_MAX_LEN],
alg: &'static Algorithm,
}
impl AsRef<[u8]> for PublicKey {
#[inline]
fn as_ref(&self) -> &[u8] { &self.bytes[..self.alg.curve.public_key_len] }
}
/// A private key for key agreement.
pub struct PrivateKey<U: Lifetime> {
private_key: ec::PrivateKey,
alg: &'static Algorithm,
usage: PhantomData<U>,
}
impl<U: Lifetime> PrivateKey<U> {
/// Generate a new private key for the given algorithm.
///
/// C analog: `EC_KEY_new_by_curve_name` + `EC_KEY_generate_key`.
pub fn generate(
alg: &'static Algorithm, rng: &rand::SecureRandom,
) -> Result<Self, error::Unspecified> {
// NSA Guide Step 1.
//
// This only handles the key generation part of step 1. The rest of
// step one is done by `compute_public_key()`.
let private_key = ec::PrivateKey::generate(&alg.curve, rng)?;
Ok(Self {
private_key,
alg,
usage: PhantomData,
})
}
/// The key exchange algorithm.
#[inline]
pub fn algorithm(&self) -> &'static Algorithm { self.alg }
/// Computes the public key from the private key's value and fills `out`
/// with the public point encoded in the standard form for the algorithm.
///
/// `out.len()` must be equal to the value returned by `public_key_len`.
#[inline(always)]
pub fn compute_public_key(&self) -> Result<PublicKey, error::Unspecified> {
// NSA Guide Step 1.
//
// Obviously, this only handles the part of Step 1 between the private
// key generation and the sending of the public key to the peer. `out`
// is what shouPrivateKeyld be sent to the peer.
let mut public_key = PublicKey {
bytes: [0; PUBLIC_KEY_MAX_LEN], | };
self.private_key
.compute_public_key(&self.alg.curve, &mut public_key.bytes)?;
Ok(public_key)
}
/// Performs a key agreement with an private key and the given public key.
///
/// Since `self` is consumed, it will not be usable after calling `agree`.
///
/// `peer_public_key_alg` is the algorithm/curve for the peer's public key
/// point; `agree` will return `Err(error_value)` if it does not match this
/// private key's algorithm/curve.
///
/// `peer_public_key` is the peer's public key. `agree` verifies that it is
/// encoded in the standard form for the algorithm and that the key is
/// *valid*; see the algorithm's documentation for details on how keys are
/// to be encoded and what constitutes a valid key for that algorithm.
///
/// C analogs: `EC_POINT_oct2point` + `ECDH_compute_key`, `X25519`.
pub fn agree(
self, peer_public_key_alg: &Algorithm, peer_public_key: untrusted::Input,
) -> Result<InputKeyMaterial, error::Unspecified> {
agree_(
&self.private_key,
self.alg,
peer_public_key_alg,
peer_public_key,
)
}
#[cfg(test)]
pub(crate) fn bytes(&self, curve: &ec::Curve) -> &[u8] { self.private_key.bytes(curve) }
}
impl PrivateKey<Static> {
/// Performs a key agreement with a static private key and the given
/// public key.
///
/// `peer_public_key_alg` is the algorithm/curve for the peer's public key
/// point; `agree_static` will return `Err(error_value)` if it does not
/// match `my_private_key's` algorithm/curve.
///
/// `peer_public_key` is the peer's public key. `agree_static` verifies
/// that it is encoded in the standard form for the algorithm and that
/// the key is *valid*; see the algorithm's documentation for details on
/// how keys are to be encoded and what constitutes a valid key for that
/// algorithm.
///
/// C analogs: `EC_POINT_oct2point` + `ECDH_compute_key`, `X25519`.
pub fn agree_static(
&self, peer_public_key_alg: &Algorithm, peer_public_key: untrusted::Input,
) -> Result<InputKeyMaterial, error::Unspecified> {
agree_(
&self.private_key,
self.alg,
peer_public_key_alg,
peer_public_key,
)
}
pub fn from_bytes(
alg: &'static Algorithm, bytes: untrusted::Input
) -> Result<Self, error::Unspecified> {
let private_key = ec::PrivateKey::from_bytes(&alg.curve, bytes)?;
Ok(Self {
private_key,
alg,
usage: PhantomData,
})
}
pub fn bytes(
&self, alg: &'static Algorithm
) -> &[u8] {
self.private_key.bytes(&alg.curve)
}
}
fn agree_(
my_private_key: &ec::PrivateKey, my_alg: &Algorithm, peer_public_key_alg: &Algorithm,
peer_public_key: untrusted::Input,
) -> Result<InputKeyMaterial, error::Unspecified> {
let alg = &my_alg;
// NSA Guide Prerequisite 1.
//
// The domain parameters are hard-coded. This check verifies that the
// peer's public key's domain parameters match the domain parameters of
// this private key.
if peer_public_key_alg!= *alg {
return Err(error::Unspecified);
}
// NSA Guide Prerequisite 2, regarding which KDFs are allowed, is delegated
// to the caller.
// NSA Guide Prerequisite 3, "Prior to or during the key-agreement process,
// each party shall obtain the identifier associated with the other party
// during the key-agreement scheme," is delegated to the caller.
// NSA Guide Step 1 is handled by `Self::generate()` and
// `Self::compute_public_key()`.
// NSA Guide Steps 2, 3, and 4.
//
// We have a pretty liberal interpretation of the NIST's spec's "Destroy"
// that doesn't meet the NSA requirement to "zeroize."
let mut ikm = InputKeyMaterial {
bytes: [0; ec::ELEM_MAX_BYTES],
len: alg.curve.elem_and_scalar_len,
};
(alg.ecdh)(&mut ikm.bytes[..ikm.len], my_private_key, peer_public_key)?;
// NSA Guide Steps 5 and 6 are deferred to `InputKeyMaterial::derive`.
Ok(ikm)
}
/// The result of a key agreement operation, to be fed into a KDF.
///
/// Intentionally not `Clone` or `Copy` since the value should only be
/// used once.
#[must_use]
pub struct InputKeyMaterial {
bytes: [u8; ec::ELEM_MAX_BYTES],
len: usize,
}
mod sealed {
pub trait Sealed {}
}
impl InputKeyMaterial {
/// Calls `kdf` with the raw key material and then returns what `kdf`
/// returns, consuming `Self` so that the key material can only be used
/// once.
pub fn derive<F, R>(self, kdf: F) -> R
where
F: FnOnce(&[u8]) -> R,
{
kdf(&self.bytes[..self.len])
// NSA Guide Steps 5 and 6.
// Again, we have a pretty liberal interpretation of the NIST's spec's
// "Destroy" that doesn't meet the NSA requirement to "zeroize."
}
} | alg: self.alg, | random_line_split |
agreement.rs | // Copyright 2015-2017 Brian Smith.
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
//! Key Agreement: ECDH, including X25519.
//!
//! # Example
//!
//! Note that this example uses X25519, but ECDH using NIST P-256/P-384 is done
//! exactly the same way, just substituting
//! `agreement::ECDH_P256`/`agreement::ECDH_P384` for `agreement::X25519`.
//!
//! ```
//! # fn x25519_agreement_example() -> Result<(), ring::error::Unspecified> {
//! use ring::{agreement, rand};
//! use untrusted;
//!
//! let rng = rand::SystemRandom::new();
//!
//! let my_private_key =
//! agreement::PrivateKey::<agreement::Ephemeral>::generate(&agreement::X25519, &rng)?;
//!
//! // Make `my_public_key` a byte slice containing my public key. In a real
//! // application, this would be sent to the peer in an encoded protocol
//! // message.
//! let my_public_key = my_private_key.compute_public_key()?;
//! let my_public_key = my_public_key.as_ref();
//!
//! // In a real application, the peer public key would be parsed out of a
//! // protocol message. Here we just generate one.
//! let mut peer_public_key_buf = [0u8; agreement::PUBLIC_KEY_MAX_LEN];
//! let peer_public_key = {
//! let peer_private_key =
//! agreement::PrivateKey::<agreement::Ephemeral>::generate(&agreement::X25519, &rng)?;
//! peer_private_key.compute_public_key()?
//! };
//! let peer_public_key = untrusted::Input::from(peer_public_key.as_ref());
//!
//! // In a real application, the protocol specifies how to determine what
//! // algorithm was used to generate the peer's private key. Here, we know it
//! // is X25519 since we just generated it.
//! let peer_public_key_alg = &agreement::X25519;
//!
//! let input_keying_material = my_private_key.agree(peer_public_key_alg, peer_public_key)?;
//! input_keying_material.derive(|_key_material| {
//! // In a real application, we'd apply a KDF to the key material and the
//! // public keys (as recommended in RFC 7748) and then derive session
//! // keys from the result. We omit all that here.
//! Ok(())
//! })
//! # }
//! # fn main() { x25519_agreement_example().unwrap() }
//! ```
// The "NSA Guide" steps here are from from section 3.1, "Ephemeral Unified
// Model."
use crate::{ec, error, rand};
use untrusted;
pub use crate::ec::{
curve25519::x25519::X25519,
suite_b::ecdh::{ECDH_P256, ECDH_P384},
PUBLIC_KEY_MAX_LEN,
};
use core::marker::PhantomData;
/// A key agreement algorithm.
pub struct Algorithm {
pub(crate) curve: &'static ec::Curve,
pub(crate) ecdh: fn(
out: &mut [u8],
private_key: &ec::PrivateKey,
peer_public_key: untrusted::Input,
) -> Result<(), error::Unspecified>,
}
derive_debug_via_self!(Algorithm, self.curve);
impl Eq for Algorithm {}
impl PartialEq for Algorithm {
fn eq(&self, other: &Algorithm) -> bool { self.curve.id == other.curve.id }
}
/// How many times the key may be used.
pub trait Lifetime: self::sealed::Sealed {}
/// The key may be used at most once.
pub struct Ephemeral {}
impl Lifetime for Ephemeral {}
impl self::sealed::Sealed for Ephemeral {}
/// The key may be used more than once.
pub struct Static {}
impl Lifetime for Static {}
impl self::sealed::Sealed for Static {}
/// A key pair for key agreement.
pub struct KeyPair<U: Lifetime> {
private_key: PrivateKey<U>,
public_key: PublicKey,
}
impl<U: Lifetime> KeyPair<U> {
/// Generate a new key pair for the given algorithm.
///
/// C analog: `EC_KEY_new_by_curve_name` + `EC_KEY_generate_key`.
pub fn generate(
alg: &'static Algorithm, rng: &rand::SecureRandom,
) -> Result<Self, error::Unspecified> {
// NSA Guide Step 1.
let private_key = ec::PrivateKey::generate(&alg.curve, rng)?;
let mut public_key = PublicKey {
bytes: [0; PUBLIC_KEY_MAX_LEN],
alg,
};
private_key.compute_public_key(&alg.curve, &mut public_key.bytes)?;
Ok(Self {
private_key: PrivateKey {
private_key,
alg,
usage: PhantomData,
},
public_key,
})
}
/// The private key.
pub fn private_key(&self) -> &PrivateKey<U> { &self.private_key }
/// The public key.
pub fn public_key(&self) -> &PublicKey { &self.public_key }
/// Split the key pair apart.
pub fn split(self) -> (PrivateKey<U>, PublicKey) { (self.private_key, self.public_key) }
}
/// A public key for key agreement.
pub struct PublicKey {
bytes: [u8; PUBLIC_KEY_MAX_LEN],
alg: &'static Algorithm,
}
impl AsRef<[u8]> for PublicKey {
#[inline]
fn as_ref(&self) -> &[u8] { &self.bytes[..self.alg.curve.public_key_len] }
}
/// A private key for key agreement.
pub struct PrivateKey<U: Lifetime> {
private_key: ec::PrivateKey,
alg: &'static Algorithm,
usage: PhantomData<U>,
}
impl<U: Lifetime> PrivateKey<U> {
/// Generate a new private key for the given algorithm.
///
/// C analog: `EC_KEY_new_by_curve_name` + `EC_KEY_generate_key`.
pub fn generate(
alg: &'static Algorithm, rng: &rand::SecureRandom,
) -> Result<Self, error::Unspecified> {
// NSA Guide Step 1.
//
// This only handles the key generation part of step 1. The rest of
// step one is done by `compute_public_key()`.
let private_key = ec::PrivateKey::generate(&alg.curve, rng)?;
Ok(Self {
private_key,
alg,
usage: PhantomData,
})
}
/// The key exchange algorithm.
#[inline]
pub fn algorithm(&self) -> &'static Algorithm { self.alg }
/// Computes the public key from the private key's value and fills `out`
/// with the public point encoded in the standard form for the algorithm.
///
/// `out.len()` must be equal to the value returned by `public_key_len`.
#[inline(always)]
pub fn compute_public_key(&self) -> Result<PublicKey, error::Unspecified> {
// NSA Guide Step 1.
//
// Obviously, this only handles the part of Step 1 between the private
// key generation and the sending of the public key to the peer. `out`
// is what shouPrivateKeyld be sent to the peer.
let mut public_key = PublicKey {
bytes: [0; PUBLIC_KEY_MAX_LEN],
alg: self.alg,
};
self.private_key
.compute_public_key(&self.alg.curve, &mut public_key.bytes)?;
Ok(public_key)
}
/// Performs a key agreement with an private key and the given public key.
///
/// Since `self` is consumed, it will not be usable after calling `agree`.
///
/// `peer_public_key_alg` is the algorithm/curve for the peer's public key
/// point; `agree` will return `Err(error_value)` if it does not match this
/// private key's algorithm/curve.
///
/// `peer_public_key` is the peer's public key. `agree` verifies that it is
/// encoded in the standard form for the algorithm and that the key is
/// *valid*; see the algorithm's documentation for details on how keys are
/// to be encoded and what constitutes a valid key for that algorithm.
///
/// C analogs: `EC_POINT_oct2point` + `ECDH_compute_key`, `X25519`.
pub fn agree(
self, peer_public_key_alg: &Algorithm, peer_public_key: untrusted::Input,
) -> Result<InputKeyMaterial, error::Unspecified> {
agree_(
&self.private_key,
self.alg,
peer_public_key_alg,
peer_public_key,
)
}
#[cfg(test)]
pub(crate) fn bytes(&self, curve: &ec::Curve) -> &[u8] { self.private_key.bytes(curve) }
}
impl PrivateKey<Static> {
/// Performs a key agreement with a static private key and the given
/// public key.
///
/// `peer_public_key_alg` is the algorithm/curve for the peer's public key
/// point; `agree_static` will return `Err(error_value)` if it does not
/// match `my_private_key's` algorithm/curve.
///
/// `peer_public_key` is the peer's public key. `agree_static` verifies
/// that it is encoded in the standard form for the algorithm and that
/// the key is *valid*; see the algorithm's documentation for details on
/// how keys are to be encoded and what constitutes a valid key for that
/// algorithm.
///
/// C analogs: `EC_POINT_oct2point` + `ECDH_compute_key`, `X25519`.
pub fn agree_static(
&self, peer_public_key_alg: &Algorithm, peer_public_key: untrusted::Input,
) -> Result<InputKeyMaterial, error::Unspecified> {
agree_(
&self.private_key,
self.alg,
peer_public_key_alg,
peer_public_key,
)
}
pub fn from_bytes(
alg: &'static Algorithm, bytes: untrusted::Input
) -> Result<Self, error::Unspecified> {
let private_key = ec::PrivateKey::from_bytes(&alg.curve, bytes)?;
Ok(Self {
private_key,
alg,
usage: PhantomData,
})
}
pub fn bytes(
&self, alg: &'static Algorithm
) -> &[u8] {
self.private_key.bytes(&alg.curve)
}
}
fn agree_(
my_private_key: &ec::PrivateKey, my_alg: &Algorithm, peer_public_key_alg: &Algorithm,
peer_public_key: untrusted::Input,
) -> Result<InputKeyMaterial, error::Unspecified> {
let alg = &my_alg;
// NSA Guide Prerequisite 1.
//
// The domain parameters are hard-coded. This check verifies that the
// peer's public key's domain parameters match the domain parameters of
// this private key.
if peer_public_key_alg!= *alg |
// NSA Guide Prerequisite 2, regarding which KDFs are allowed, is delegated
// to the caller.
// NSA Guide Prerequisite 3, "Prior to or during the key-agreement process,
// each party shall obtain the identifier associated with the other party
// during the key-agreement scheme," is delegated to the caller.
// NSA Guide Step 1 is handled by `Self::generate()` and
// `Self::compute_public_key()`.
// NSA Guide Steps 2, 3, and 4.
//
// We have a pretty liberal interpretation of the NIST's spec's "Destroy"
// that doesn't meet the NSA requirement to "zeroize."
let mut ikm = InputKeyMaterial {
bytes: [0; ec::ELEM_MAX_BYTES],
len: alg.curve.elem_and_scalar_len,
};
(alg.ecdh)(&mut ikm.bytes[..ikm.len], my_private_key, peer_public_key)?;
// NSA Guide Steps 5 and 6 are deferred to `InputKeyMaterial::derive`.
Ok(ikm)
}
/// The result of a key agreement operation, to be fed into a KDF.
///
/// Intentionally not `Clone` or `Copy` since the value should only be
/// used once.
#[must_use]
pub struct InputKeyMaterial {
bytes: [u8; ec::ELEM_MAX_BYTES],
len: usize,
}
mod sealed {
pub trait Sealed {}
}
impl InputKeyMaterial {
/// Calls `kdf` with the raw key material and then returns what `kdf`
/// returns, consuming `Self` so that the key material can only be used
/// once.
pub fn derive<F, R>(self, kdf: F) -> R
where
F: FnOnce(&[u8]) -> R,
{
kdf(&self.bytes[..self.len])
// NSA Guide Steps 5 and 6.
// Again, we have a pretty liberal interpretation of the NIST's spec's
// "Destroy" that doesn't meet the NSA requirement to "zeroize."
}
}
| {
return Err(error::Unspecified);
} | conditional_block |
read.rs | : PhantomData<U>,
}
impl Reader<File, UnifyVertices> {
/// Tries to open the file specified by the given path and creates a new
/// `Reader` from that file.
pub fn open(path: impl AsRef<Path>) -> Result<Self, Error> {
// We don't need a `BufReader` here, because we will use our internal
// parse buffer anyway.
Self::new(File::open(path)?)
}
}
impl<R: io::Read> Reader<R, UnifyVertices> {
/// Creates a new `Reader` from the given `io::Read` instance and parses
/// the header of the given input.
///
/// If you want to open a file, rather use [`Reader::open`].
pub fn new(mut reader: R) -> Result<Self, Error> {
// First, we have to find out the encoding of this file. Since STL is a
// pretty bad format, there is no clear indicator for the encoding.
// There are only certain hints in one or the other direction. We
// consider four data points:
//
// - Starts with "solid"? (`starts_with_solid`)
// - No => binary (or corrupted)
// - Yes => Could be both
// - Are there some non-ASCII chars in first 1024 bytes? (`non_ascii_bytes`)
// - No => Could be both (though, binary is very unlikely)
// - Yes => Binary (or corrupted)
// - Does the triangle count matches the file length? (`count_match`)
// - `TooShort` => ASCII (or corrupted)
// - `Mismatch` => ASCII (or corrupted)
// - `Match` => Could be both (though, ASCII is very unlikely)
//
// First, we check all of these four things. One problem: the latter
// two points can only be checked if `R` is also `io::Seek`. To get
// that information we use the `SeekHelper` trait.
enum CountMatch {
/// When `R` does not implement `Seek`
NoInfo,
/// File is shorter than 84 bytes.
TooShort,
/// The file length expected from the triangle count does *not*
/// match the actual file length.
Mismatch,
/// The file length expected from the triangle count matches the
/// actual file length.
Match,
}
// ===================================================================
// ===== Helper trait to specialize for `Seek` readers
// ===================================================================
trait SeekHelper {
/// Returns whether the file length is >= 84 and whether or not the
/// triangle count at offset 80 matches the file length. Or in
/// other words: returns `(longer_than_84, count_match)`. If the
/// type `R` does not implement `Seek`, `None` is returned.
fn check_length(&mut self) -> Result<CountMatch, Error>;
}
impl<R: io::Read> SeekHelper for R {
default fn check_length(&mut self) -> Result<CountMatch, Error> {
Ok(CountMatch::NoInfo)
}
}
impl<R: io::Read + io::Seek> SeekHelper for R {
fn check_length(&mut self) -> Result<CountMatch, Error> {
// Determine length of input.
let input_len = self.seek(io::SeekFrom::End(0))?;
if input_len < 84 {
return Ok(CountMatch::TooShort);
}
// Pretend the file is binary and read the number of triangles
// at offset 80.
self.seek(io::SeekFrom::Start(80))?;
let num_triangles = self.read_u32::<LittleEndian>()?;
self.seek(io::SeekFrom::Start(0))?; // Jump back to the start
// In binary format, each triangle is stored with 50 bytes:
// - 3 * 3 = 9 position floats => 36 bytes
// - 3 normal floats => 12 bytes
// - 2 bytes "attribute byte count"
//
// The binary header is 84 bytes long.
let expected_len_if_binary = num_triangles as u64 * 50 + 84;
if expected_len_if_binary == input_len {
Ok(CountMatch::Match)
} else {
Ok(CountMatch::Mismatch)
}
}
}
let count_match = reader.check_length()?;
// Wrap reader into parse buffer.
let mut buf = Buffer::new(reader)?;
// Load the first 1K bytes (or the whole file, if the file is shorter
// than that). We want to inspect those bytes.
buf.saturating_prepare(1024)?;
let starts_with_solid = buf.raw_buf().starts_with(b"solid");
let non_ascii_bytes = buf.raw_buf().iter().take(1024).any(|b|!b.is_ascii());
let is_binary = match (starts_with_solid, non_ascii_bytes, count_match) {
// ----- Binary --------------------------------------------------
// Even if we have no length info, non-ASCII bytes are strong
// indicator.
(true, true, CountMatch::NoInfo) => true,
(false, true, CountMatch::NoInfo) => true,
// A count/length match is a very strong indicator and we don't
// cary about anything else.
(_, _, CountMatch::Match) => true,
// Is binary or corrupted -> we assume binary.
(false, false, CountMatch::NoInfo) => false,
// ----- ASCII ---------------------------------------------------
(true, false, CountMatch::NoInfo) => false,
(true, false, CountMatch::TooShort) => false,
(true, false, CountMatch::Mismatch) => false,
// ----- Assume binary, but error --------------------------------
(_, _, CountMatch::TooShort) => {
return Err(ParseError::Custom(
"corrupted binary STL file: file is shorter than 84 bytes".into(),
Span::new(0, 0),
).into());
}
(_, _, CountMatch::Mismatch) => {
return Err(ParseError::Custom(
"corrupted binary STL file: triangle count at offset 80 disagrees with \
file length".into(),
Span::new(80, 84),
).into());
}
};
// Check if the file starts with `solid`. If yes, a string (the solid
// name) is stored next.
let solid_name = if buf.is_next(b"solid")? {
// Consume `solid`
buf.consume(5);
// Read the solid name (until line break in ASCII case, 80 chars in
// binary case).
let solid_name = if is_binary {
buf.with_bytes(
80 - buf.offset(),
|sd| {
sd.assert_ascii()
.map(|name| name.trim().to_string())
.map_err(|e| e.into())
},
)?
} else {
let name = buf.take_until(b'\n', |sd| {
sd.assert_ascii()
.map(|name| name.trim().to_string())
.map_err(|e| e.into())
})?;
parse::linebreak(&mut buf)?;
name
};
Some(solid_name)
} else {
None
};
// In the binary case, we still need to skip the remaining header.
let triangle_count = if is_binary {
buf.skip(80 - buf.offset())?;
Some(buf.read_u32::<LittleEndian>()?)
} else {
None
};
Ok(Self {
buf,
solid_name,
triangle_count,
_dummy: PhantomData,
})
}
/// Configures the reader to not unify vertices with the exact same
/// position into one.
///
/// An STL file is a simple list of triangles. Each triangle specifies the
/// position of its three vertices. This means that vertices of adjacent
/// triangles are stored once per triangle. When reading the file, we only
/// know the vertex positions and have no idea which vertices are actually
/// the same one and which are two different vertices that have the same
/// position.
///
/// It's common to unify vertices when reading an STL file to get a real
/// mesh and not just a collection of unconnected triangles. You only need
/// to disable unification in very special cases, mainly because:
/// - Your mesh has vertices that have the exact same position but should
/// be treated as separate vertices (this is very rare)
/// - Unifying the vertices is too slow for you (unifying makes the whole
/// read process a couple of times slower)
///
/// But if any of this is a problem for you, you should rather use a better
/// file format instead of STL.
///
/// When vertices are unified, `NaN` values in vertices are not allowed. So
/// in that case, if your file contains `NaN` values, the reading method
/// will panic.
pub fn without_vertex_unification(self) -> Reader<R, VerbatimVertices> {
Reader {
buf: self.buf,
solid_name: self.solid_name,
triangle_count: self.triangle_count,
_dummy: PhantomData,
}
}
}
impl<R: io::Read, U: UnifyingMarker> Reader<R, U> {
/// Returns the name of the solid. If no solid name was stored in the file,
/// `None` is returned.
pub fn solid_name(&self) -> Option<&str> {
self.solid_name.as_ref().map(|s| s.as_str())
}
/// Returns whether or not the file is a binary STL file (as opposed to
/// ASCII).
pub fn is_binary(&self) -> bool {
self.triangle_count.is_some()
}
/// Returns the encoding of this STL file.
pub fn encoding(&self) -> Encoding {
if self.is_binary() {
Encoding::Binary
} else {
Encoding::Ascii
}
}
/// Returns the triangle count stored in the file. That number is stored if
/// and only if the file is binary.
pub fn triangle_count(&self) -> Option<u32> {
self.triangle_count
}
/// Reads the whole file into a [`RawStorage`].
///
/// Usually you either want to use a higher level interface (via
/// [`StreamSource`]) or the method [`Reader::read_raw`]. The latter is the
/// streaming version of this method which doesn't require a temporary
/// storage ([`RawStorage`]).
pub fn into_raw_storage(self) -> Result<RawStorage, Error> {
// Prepare the raw result with metadata and memory allocations.
let mut out = RawStorage::empty();
out.solid_name = self.solid_name.clone();
if let Some(tri_count) = self.triangle_count {
out.triangles.reserve(tri_count as usize);
}
// Read the all triangles into the raw result
self.read_raw(|tri| {
out.triangles.push(tri);
Ok(())
})?;
Ok(out)
}
/// Reads the whole file, passing each triangle to the `add_triangle`
/// callback.
///
/// This is a low level building block that you usually don't want to use
/// directly. In particular, **this method itself never performs any vertex
/// unification** (regardless of the type parameter `U`). You usually want
/// to use the [`StreamSource`]) interface to actually read meshes from
/// this reader.
#[inline(never)]
pub fn read_raw<F>(self, add_triangle: F) -> Result<(), Error>
where
F: FnMut(RawTriangle) -> Result<(), Error>,
{
if let Some(triangle_count) = self.triangle_count {
self.read_raw_binary(triangle_count, add_triangle)
} else {
self.read_raw_ascii(add_triangle)
}
}
/// The `read_raw` implementation for binary bodies.
#[inline(never)]
fn read_raw_binary<F>(self, triangle_count: u32, mut add_triangle: F) -> Result<(), Error>
where
F: FnMut(RawTriangle) -> Result<(), Error>,
{
const BYTES_PER_TRI: usize = 4 * 3 * 4 + 2;
let mut buf = self.buf;
// We attempt to read as many triangles as specified. If the
// specified number was too high and we reach EOF early, we will
// return an error.
for _ in 0..triangle_count {
// We don't use `with_bytes` here because it isn't inlined and
// we can improve performance significantly by doing it
// manually here.
buf.prepare(BYTES_PER_TRI)?;
let data = &buf.raw_buf()[..BYTES_PER_TRI];
/// Reads three consecutive `f32`s.
#[inline(always)]
fn vec3(data: &[u8]) -> [f32; 3] {
[
LittleEndian::read_f32(&data[0..4]),
LittleEndian::read_f32(&data[4..8]),
LittleEndian::read_f32(&data[8..12]),
]
}
let triangle = RawTriangle {
normal: vec3(&data[0..12]),
vertices: [
vec3(&data[12..24]),
vec3(&data[24..36]),
vec3(&data[36..48]),
],
attribute_byte_count: LittleEndian::read_u16(&data[48..50]),
};
add_triangle(triangle)?;
buf.consume(BYTES_PER_TRI);
}
// If the specified number of triangles was too small and there is
// still data left, we also error.
buf.assert_eof()?;
Ok(())
}
/// The `read_raw` implementation for ASCII bodies.
#[inline(never)]
pub fn read_raw_ascii<F>(self, mut add_triangle: F) -> Result<(), Error>
where
F: FnMut(RawTriangle) -> Result<(), Error>,
{
/// Parses three floats separated by whitespace. No leading or trailing
/// whitespace is handled.
fn vec3(buf: &mut impl ParseBuf) -> Result<[f32; 3], Error> {
let x = parse::ascii_f32(buf)?;
parse::whitespace(buf)?;
let y = parse::ascii_f32(buf)?;
parse::whitespace(buf)?;
let z = parse::ascii_f32(buf)?;
Ok([x, y, z])
}
/// Parses one ASCII line with a vertex (e.g. `vertex 2.0 0.1 1`)
fn vertex(buf: &mut impl ParseBuf) -> Result<[f32; 3], Error> {
parse::line(buf, |buf| {
buf.expect_tag(b"vertex")?;
parse::whitespace(buf)?;
vec3(buf)
})
}
let mut buf = self.buf;
// Parse facets
loop {
// First line (`facet normal 0.0 1.0 0.0`)
let normal = parse::line(&mut buf, |buf| {
buf.expect_tag(b"facet normal")?;
parse::whitespace(buf)?;
vec3(buf)
})?;
// Parse vertices
parse::line(&mut buf, |buf| buf.expect_tag(b"outer loop"))?;
let vertices = [
vertex(&mut buf)?,
vertex(&mut buf)?,
vertex(&mut buf)?,
];
parse::line(&mut buf, |buf| buf.expect_tag(b"endloop"))?;
// Pass parsed triangle to callback
add_triangle(RawTriangle {
normal,
vertices,
attribute_byte_count: 0,
})?;
// Parse last line (`endfacet`)
parse::line(&mut buf, |buf| buf.expect_tag(b"endfacet"))?;
// Check if the next line starts with `endsolid` and break loop
// in that case.
parse::opt_whitespace(&mut buf)?;
if buf.is_next(b"endsolid")? {
// We've seen `endsolid`: we just stop here. There could be
// junk afterwards, but we don't care.
break;
}
}
Ok(())
} | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Reader")
.field("buf", &self.buf)
.field("solid_name", &self.solid_name)
.field("is_binary", &self.triangle_count.is_some())
.field("triangle_count", &self.triangle_count)
.finish()
}
}
impl<R: io::Read + Clone, U: UnifyingMarker> Clone for Reader<R, U> {
fn clone(&self) -> Self {
Self {
buf: self.buf.clone(),
solid_name: self.solid_name.clone(),
triangle_count: self.triangle_count.clone(),
_dummy: PhantomData,
}
}
}
// ===========================================================================
// ===== Definition of unifying dummy types. Not actually public.
// ===========================================================================
pub trait UnifyingMarker {
type Adder: VertexAdder;
const UNIFY: bool;
}
#[derive(Debug)]
pub enum UnifyVertices {}
impl UnifyingMarker for UnifyVertices {
type Adder = UnifyingAdder;
const UNIFY: bool = true;
}
#[derive(Debug)]
pub enum VerbatimVertices {}
impl UnifyingMarker for VerbatimVertices {
type Adder = NonUnifyingAdder;
const UNIFY: bool = false;
}
// ===========================================================================
// ===== VertexAdders: unify vertices or not. Not actually public.
// ===========================================================================
pub trait VertexAdder {
fn new() -> Self;
fn size_hint(&mut self, _vertex_count: hsize) {}
fn add_vertex<S: MemSink>(
&mut self,
sink: &mut S,
pos: [f32; 3],
) -> VertexHandle;
}
/// Adds every incoming vertex as new unique vertex. No unifying is done.
#[derive(Debug)]
pub struct NonUnifyingAdder;
impl VertexAdder for NonUnifyingAdder {
fn new() -> Self {
NonUnifyingAdder
}
fn add_vertex<S: MemSink>(
&mut self,
sink: &mut S,
pos: [f32; 3],
) -> VertexHandle {
let handle = sink.add_vertex();
sink.set_vertex_position(handle, pos.to_point3());
handle
}
}
/// The key of the hashmap: three `f32`. Implementing this like this is
/// faster than using `[OrderedFloat<f32>; 3]`. The values inside must
/// not be NaN, because those values won't be normalized (i.e. there
/// are 2^23 different NaN values) which will confuse the hash map.
#[derive(Debug, PartialEq)]
struct PosKey([f32; 3]);
impl Eq for PosKey {}
impl Hash for PosKey {
fn hash<H: Hasher>(&self, state: &mut H) {
self.0[0].to_bits().hash(state);
self.0[1].to_bits().hash(state);
self.0[2].to_bits().hash(state);
}
}
/// Unifies incoming vertices with the exact same position into a single one.
///
/// We use the `FxHash` here to be as fast as possible. This hash function is
/// very fast but has some problems. When hashing an `u32` it leads to only one
/// multiplication, meaning that upper bits | }
impl<R: io::Read, U: UnifyingMarker> fmt::Debug for Reader<R, U> { | random_line_split |
read.rs | PhantomData<U>,
}
impl Reader<File, UnifyVertices> {
/// Tries to open the file specified by the given path and creates a new
/// `Reader` from that file.
pub fn open(path: impl AsRef<Path>) -> Result<Self, Error> {
// We don't need a `BufReader` here, because we will use our internal
// parse buffer anyway.
Self::new(File::open(path)?)
}
}
impl<R: io::Read> Reader<R, UnifyVertices> {
/// Creates a new `Reader` from the given `io::Read` instance and parses
/// the header of the given input.
///
/// If you want to open a file, rather use [`Reader::open`].
pub fn new(mut reader: R) -> Result<Self, Error> {
// First, we have to find out the encoding of this file. Since STL is a
// pretty bad format, there is no clear indicator for the encoding.
// There are only certain hints in one or the other direction. We
// consider four data points:
//
// - Starts with "solid"? (`starts_with_solid`)
// - No => binary (or corrupted)
// - Yes => Could be both
// - Are there some non-ASCII chars in first 1024 bytes? (`non_ascii_bytes`)
// - No => Could be both (though, binary is very unlikely)
// - Yes => Binary (or corrupted)
// - Does the triangle count matches the file length? (`count_match`)
// - `TooShort` => ASCII (or corrupted)
// - `Mismatch` => ASCII (or corrupted)
// - `Match` => Could be both (though, ASCII is very unlikely)
//
// First, we check all of these four things. One problem: the latter
// two points can only be checked if `R` is also `io::Seek`. To get
// that information we use the `SeekHelper` trait.
enum CountMatch {
/// When `R` does not implement `Seek`
NoInfo,
/// File is shorter than 84 bytes.
TooShort,
/// The file length expected from the triangle count does *not*
/// match the actual file length.
Mismatch,
/// The file length expected from the triangle count matches the
/// actual file length.
Match,
}
// ===================================================================
// ===== Helper trait to specialize for `Seek` readers
// ===================================================================
trait SeekHelper {
/// Returns whether the file length is >= 84 and whether or not the
/// triangle count at offset 80 matches the file length. Or in
/// other words: returns `(longer_than_84, count_match)`. If the
/// type `R` does not implement `Seek`, `None` is returned.
fn check_length(&mut self) -> Result<CountMatch, Error>;
}
impl<R: io::Read> SeekHelper for R {
default fn check_length(&mut self) -> Result<CountMatch, Error> {
Ok(CountMatch::NoInfo)
}
}
impl<R: io::Read + io::Seek> SeekHelper for R {
fn check_length(&mut self) -> Result<CountMatch, Error> {
// Determine length of input.
let input_len = self.seek(io::SeekFrom::End(0))?;
if input_len < 84 {
return Ok(CountMatch::TooShort);
}
// Pretend the file is binary and read the number of triangles
// at offset 80.
self.seek(io::SeekFrom::Start(80))?;
let num_triangles = self.read_u32::<LittleEndian>()?;
self.seek(io::SeekFrom::Start(0))?; // Jump back to the start
// In binary format, each triangle is stored with 50 bytes:
// - 3 * 3 = 9 position floats => 36 bytes
// - 3 normal floats => 12 bytes
// - 2 bytes "attribute byte count"
//
// The binary header is 84 bytes long.
let expected_len_if_binary = num_triangles as u64 * 50 + 84;
if expected_len_if_binary == input_len {
Ok(CountMatch::Match)
} else {
Ok(CountMatch::Mismatch)
}
}
}
let count_match = reader.check_length()?;
// Wrap reader into parse buffer.
let mut buf = Buffer::new(reader)?;
// Load the first 1K bytes (or the whole file, if the file is shorter
// than that). We want to inspect those bytes.
buf.saturating_prepare(1024)?;
let starts_with_solid = buf.raw_buf().starts_with(b"solid");
let non_ascii_bytes = buf.raw_buf().iter().take(1024).any(|b|!b.is_ascii());
let is_binary = match (starts_with_solid, non_ascii_bytes, count_match) {
// ----- Binary --------------------------------------------------
// Even if we have no length info, non-ASCII bytes are strong
// indicator.
(true, true, CountMatch::NoInfo) => true,
(false, true, CountMatch::NoInfo) => true,
// A count/length match is a very strong indicator and we don't
// cary about anything else.
(_, _, CountMatch::Match) => true,
// Is binary or corrupted -> we assume binary.
(false, false, CountMatch::NoInfo) => false,
// ----- ASCII ---------------------------------------------------
(true, false, CountMatch::NoInfo) => false,
(true, false, CountMatch::TooShort) => false,
(true, false, CountMatch::Mismatch) => false,
// ----- Assume binary, but error --------------------------------
(_, _, CountMatch::TooShort) => {
return Err(ParseError::Custom(
"corrupted binary STL file: file is shorter than 84 bytes".into(),
Span::new(0, 0),
).into());
}
(_, _, CountMatch::Mismatch) => {
return Err(ParseError::Custom(
"corrupted binary STL file: triangle count at offset 80 disagrees with \
file length".into(),
Span::new(80, 84),
).into());
}
};
// Check if the file starts with `solid`. If yes, a string (the solid
// name) is stored next.
let solid_name = if buf.is_next(b"solid")? {
// Consume `solid`
buf.consume(5);
// Read the solid name (until line break in ASCII case, 80 chars in
// binary case).
let solid_name = if is_binary {
buf.with_bytes(
80 - buf.offset(),
|sd| {
sd.assert_ascii()
.map(|name| name.trim().to_string())
.map_err(|e| e.into())
},
)?
} else {
let name = buf.take_until(b'\n', |sd| {
sd.assert_ascii()
.map(|name| name.trim().to_string())
.map_err(|e| e.into())
})?;
parse::linebreak(&mut buf)?;
name
};
Some(solid_name)
} else {
None
};
// In the binary case, we still need to skip the remaining header.
let triangle_count = if is_binary {
buf.skip(80 - buf.offset())?;
Some(buf.read_u32::<LittleEndian>()?)
} else {
None
};
Ok(Self {
buf,
solid_name,
triangle_count,
_dummy: PhantomData,
})
}
/// Configures the reader to not unify vertices with the exact same
/// position into one.
///
/// An STL file is a simple list of triangles. Each triangle specifies the
/// position of its three vertices. This means that vertices of adjacent
/// triangles are stored once per triangle. When reading the file, we only
/// know the vertex positions and have no idea which vertices are actually
/// the same one and which are two different vertices that have the same
/// position.
///
/// It's common to unify vertices when reading an STL file to get a real
/// mesh and not just a collection of unconnected triangles. You only need
/// to disable unification in very special cases, mainly because:
/// - Your mesh has vertices that have the exact same position but should
/// be treated as separate vertices (this is very rare)
/// - Unifying the vertices is too slow for you (unifying makes the whole
/// read process a couple of times slower)
///
/// But if any of this is a problem for you, you should rather use a better
/// file format instead of STL.
///
/// When vertices are unified, `NaN` values in vertices are not allowed. So
/// in that case, if your file contains `NaN` values, the reading method
/// will panic.
pub fn without_vertex_unification(self) -> Reader<R, VerbatimVertices> {
Reader {
buf: self.buf,
solid_name: self.solid_name,
triangle_count: self.triangle_count,
_dummy: PhantomData,
}
}
}
impl<R: io::Read, U: UnifyingMarker> Reader<R, U> {
/// Returns the name of the solid. If no solid name was stored in the file,
/// `None` is returned.
pub fn solid_name(&self) -> Option<&str> {
self.solid_name.as_ref().map(|s| s.as_str())
}
/// Returns whether or not the file is a binary STL file (as opposed to
/// ASCII).
pub fn is_binary(&self) -> bool {
self.triangle_count.is_some()
}
/// Returns the encoding of this STL file.
pub fn encoding(&self) -> Encoding {
if self.is_binary() {
Encoding::Binary
} else {
Encoding::Ascii
}
}
/// Returns the triangle count stored in the file. That number is stored if
/// and only if the file is binary.
pub fn triangle_count(&self) -> Option<u32> {
self.triangle_count
}
/// Reads the whole file into a [`RawStorage`].
///
/// Usually you either want to use a higher level interface (via
/// [`StreamSource`]) or the method [`Reader::read_raw`]. The latter is the
/// streaming version of this method which doesn't require a temporary
/// storage ([`RawStorage`]).
pub fn into_raw_storage(self) -> Result<RawStorage, Error> {
// Prepare the raw result with metadata and memory allocations.
let mut out = RawStorage::empty();
out.solid_name = self.solid_name.clone();
if let Some(tri_count) = self.triangle_count {
out.triangles.reserve(tri_count as usize);
}
// Read the all triangles into the raw result
self.read_raw(|tri| {
out.triangles.push(tri);
Ok(())
})?;
Ok(out)
}
/// Reads the whole file, passing each triangle to the `add_triangle`
/// callback.
///
/// This is a low level building block that you usually don't want to use
/// directly. In particular, **this method itself never performs any vertex
/// unification** (regardless of the type parameter `U`). You usually want
/// to use the [`StreamSource`]) interface to actually read meshes from
/// this reader.
#[inline(never)]
pub fn read_raw<F>(self, add_triangle: F) -> Result<(), Error>
where
F: FnMut(RawTriangle) -> Result<(), Error>,
{
if let Some(triangle_count) = self.triangle_count {
self.read_raw_binary(triangle_count, add_triangle)
} else {
self.read_raw_ascii(add_triangle)
}
}
/// The `read_raw` implementation for binary bodies.
#[inline(never)]
fn | <F>(self, triangle_count: u32, mut add_triangle: F) -> Result<(), Error>
where
F: FnMut(RawTriangle) -> Result<(), Error>,
{
const BYTES_PER_TRI: usize = 4 * 3 * 4 + 2;
let mut buf = self.buf;
// We attempt to read as many triangles as specified. If the
// specified number was too high and we reach EOF early, we will
// return an error.
for _ in 0..triangle_count {
// We don't use `with_bytes` here because it isn't inlined and
// we can improve performance significantly by doing it
// manually here.
buf.prepare(BYTES_PER_TRI)?;
let data = &buf.raw_buf()[..BYTES_PER_TRI];
/// Reads three consecutive `f32`s.
#[inline(always)]
fn vec3(data: &[u8]) -> [f32; 3] {
[
LittleEndian::read_f32(&data[0..4]),
LittleEndian::read_f32(&data[4..8]),
LittleEndian::read_f32(&data[8..12]),
]
}
let triangle = RawTriangle {
normal: vec3(&data[0..12]),
vertices: [
vec3(&data[12..24]),
vec3(&data[24..36]),
vec3(&data[36..48]),
],
attribute_byte_count: LittleEndian::read_u16(&data[48..50]),
};
add_triangle(triangle)?;
buf.consume(BYTES_PER_TRI);
}
// If the specified number of triangles was too small and there is
// still data left, we also error.
buf.assert_eof()?;
Ok(())
}
/// The `read_raw` implementation for ASCII bodies.
#[inline(never)]
pub fn read_raw_ascii<F>(self, mut add_triangle: F) -> Result<(), Error>
where
F: FnMut(RawTriangle) -> Result<(), Error>,
{
/// Parses three floats separated by whitespace. No leading or trailing
/// whitespace is handled.
fn vec3(buf: &mut impl ParseBuf) -> Result<[f32; 3], Error> {
let x = parse::ascii_f32(buf)?;
parse::whitespace(buf)?;
let y = parse::ascii_f32(buf)?;
parse::whitespace(buf)?;
let z = parse::ascii_f32(buf)?;
Ok([x, y, z])
}
/// Parses one ASCII line with a vertex (e.g. `vertex 2.0 0.1 1`)
fn vertex(buf: &mut impl ParseBuf) -> Result<[f32; 3], Error> {
parse::line(buf, |buf| {
buf.expect_tag(b"vertex")?;
parse::whitespace(buf)?;
vec3(buf)
})
}
let mut buf = self.buf;
// Parse facets
loop {
// First line (`facet normal 0.0 1.0 0.0`)
let normal = parse::line(&mut buf, |buf| {
buf.expect_tag(b"facet normal")?;
parse::whitespace(buf)?;
vec3(buf)
})?;
// Parse vertices
parse::line(&mut buf, |buf| buf.expect_tag(b"outer loop"))?;
let vertices = [
vertex(&mut buf)?,
vertex(&mut buf)?,
vertex(&mut buf)?,
];
parse::line(&mut buf, |buf| buf.expect_tag(b"endloop"))?;
// Pass parsed triangle to callback
add_triangle(RawTriangle {
normal,
vertices,
attribute_byte_count: 0,
})?;
// Parse last line (`endfacet`)
parse::line(&mut buf, |buf| buf.expect_tag(b"endfacet"))?;
// Check if the next line starts with `endsolid` and break loop
// in that case.
parse::opt_whitespace(&mut buf)?;
if buf.is_next(b"endsolid")? {
// We've seen `endsolid`: we just stop here. There could be
// junk afterwards, but we don't care.
break;
}
}
Ok(())
}
}
impl<R: io::Read, U: UnifyingMarker> fmt::Debug for Reader<R, U> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Reader")
.field("buf", &self.buf)
.field("solid_name", &self.solid_name)
.field("is_binary", &self.triangle_count.is_some())
.field("triangle_count", &self.triangle_count)
.finish()
}
}
impl<R: io::Read + Clone, U: UnifyingMarker> Clone for Reader<R, U> {
fn clone(&self) -> Self {
Self {
buf: self.buf.clone(),
solid_name: self.solid_name.clone(),
triangle_count: self.triangle_count.clone(),
_dummy: PhantomData,
}
}
}
// ===========================================================================
// ===== Definition of unifying dummy types. Not actually public.
// ===========================================================================
pub trait UnifyingMarker {
type Adder: VertexAdder;
const UNIFY: bool;
}
#[derive(Debug)]
pub enum UnifyVertices {}
impl UnifyingMarker for UnifyVertices {
type Adder = UnifyingAdder;
const UNIFY: bool = true;
}
#[derive(Debug)]
pub enum VerbatimVertices {}
impl UnifyingMarker for VerbatimVertices {
type Adder = NonUnifyingAdder;
const UNIFY: bool = false;
}
// ===========================================================================
// ===== VertexAdders: unify vertices or not. Not actually public.
// ===========================================================================
pub trait VertexAdder {
fn new() -> Self;
fn size_hint(&mut self, _vertex_count: hsize) {}
fn add_vertex<S: MemSink>(
&mut self,
sink: &mut S,
pos: [f32; 3],
) -> VertexHandle;
}
/// Adds every incoming vertex as new unique vertex. No unifying is done.
#[derive(Debug)]
pub struct NonUnifyingAdder;
impl VertexAdder for NonUnifyingAdder {
fn new() -> Self {
NonUnifyingAdder
}
fn add_vertex<S: MemSink>(
&mut self,
sink: &mut S,
pos: [f32; 3],
) -> VertexHandle {
let handle = sink.add_vertex();
sink.set_vertex_position(handle, pos.to_point3());
handle
}
}
/// The key of the hashmap: three `f32`. Implementing this like this is
/// faster than using `[OrderedFloat<f32>; 3]`. The values inside must
/// not be NaN, because those values won't be normalized (i.e. there
/// are 2^23 different NaN values) which will confuse the hash map.
#[derive(Debug, PartialEq)]
struct PosKey([f32; 3]);
impl Eq for PosKey {}
impl Hash for PosKey {
fn hash<H: Hasher>(&self, state: &mut H) {
self.0[0].to_bits().hash(state);
self.0[1].to_bits().hash(state);
self.0[2].to_bits().hash(state);
}
}
/// Unifies incoming vertices with the exact same position into a single one.
///
/// We use the `FxHash` here to be as fast as possible. This hash function is
/// very fast but has some problems. When hashing an `u32` it leads to only one
/// multiplication, meaning that | read_raw_binary | identifier_name |
read.rs | PhantomData<U>,
}
impl Reader<File, UnifyVertices> {
/// Tries to open the file specified by the given path and creates a new
/// `Reader` from that file.
pub fn open(path: impl AsRef<Path>) -> Result<Self, Error> {
// We don't need a `BufReader` here, because we will use our internal
// parse buffer anyway.
Self::new(File::open(path)?)
}
}
impl<R: io::Read> Reader<R, UnifyVertices> {
/// Creates a new `Reader` from the given `io::Read` instance and parses
/// the header of the given input.
///
/// If you want to open a file, rather use [`Reader::open`].
pub fn new(mut reader: R) -> Result<Self, Error> {
// First, we have to find out the encoding of this file. Since STL is a
// pretty bad format, there is no clear indicator for the encoding.
// There are only certain hints in one or the other direction. We
// consider four data points:
//
// - Starts with "solid"? (`starts_with_solid`)
// - No => binary (or corrupted)
// - Yes => Could be both
// - Are there some non-ASCII chars in first 1024 bytes? (`non_ascii_bytes`)
// - No => Could be both (though, binary is very unlikely)
// - Yes => Binary (or corrupted)
// - Does the triangle count matches the file length? (`count_match`)
// - `TooShort` => ASCII (or corrupted)
// - `Mismatch` => ASCII (or corrupted)
// - `Match` => Could be both (though, ASCII is very unlikely)
//
// First, we check all of these four things. One problem: the latter
// two points can only be checked if `R` is also `io::Seek`. To get
// that information we use the `SeekHelper` trait.
enum CountMatch {
/// When `R` does not implement `Seek`
NoInfo,
/// File is shorter than 84 bytes.
TooShort,
/// The file length expected from the triangle count does *not*
/// match the actual file length.
Mismatch,
/// The file length expected from the triangle count matches the
/// actual file length.
Match,
}
// ===================================================================
// ===== Helper trait to specialize for `Seek` readers
// ===================================================================
trait SeekHelper {
/// Returns whether the file length is >= 84 and whether or not the
/// triangle count at offset 80 matches the file length. Or in
/// other words: returns `(longer_than_84, count_match)`. If the
/// type `R` does not implement `Seek`, `None` is returned.
fn check_length(&mut self) -> Result<CountMatch, Error>;
}
impl<R: io::Read> SeekHelper for R {
default fn check_length(&mut self) -> Result<CountMatch, Error> {
Ok(CountMatch::NoInfo)
}
}
impl<R: io::Read + io::Seek> SeekHelper for R {
fn check_length(&mut self) -> Result<CountMatch, Error> {
// Determine length of input.
let input_len = self.seek(io::SeekFrom::End(0))?;
if input_len < 84 {
return Ok(CountMatch::TooShort);
}
// Pretend the file is binary and read the number of triangles
// at offset 80.
self.seek(io::SeekFrom::Start(80))?;
let num_triangles = self.read_u32::<LittleEndian>()?;
self.seek(io::SeekFrom::Start(0))?; // Jump back to the start
// In binary format, each triangle is stored with 50 bytes:
// - 3 * 3 = 9 position floats => 36 bytes
// - 3 normal floats => 12 bytes
// - 2 bytes "attribute byte count"
//
// The binary header is 84 bytes long.
let expected_len_if_binary = num_triangles as u64 * 50 + 84;
if expected_len_if_binary == input_len {
Ok(CountMatch::Match)
} else {
Ok(CountMatch::Mismatch)
}
}
}
let count_match = reader.check_length()?;
// Wrap reader into parse buffer.
let mut buf = Buffer::new(reader)?;
// Load the first 1K bytes (or the whole file, if the file is shorter
// than that). We want to inspect those bytes.
buf.saturating_prepare(1024)?;
let starts_with_solid = buf.raw_buf().starts_with(b"solid");
let non_ascii_bytes = buf.raw_buf().iter().take(1024).any(|b|!b.is_ascii());
let is_binary = match (starts_with_solid, non_ascii_bytes, count_match) {
// ----- Binary --------------------------------------------------
// Even if we have no length info, non-ASCII bytes are strong
// indicator.
(true, true, CountMatch::NoInfo) => true,
(false, true, CountMatch::NoInfo) => true,
// A count/length match is a very strong indicator and we don't
// cary about anything else.
(_, _, CountMatch::Match) => true,
// Is binary or corrupted -> we assume binary.
(false, false, CountMatch::NoInfo) => false,
// ----- ASCII ---------------------------------------------------
(true, false, CountMatch::NoInfo) => false,
(true, false, CountMatch::TooShort) => false,
(true, false, CountMatch::Mismatch) => false,
// ----- Assume binary, but error --------------------------------
(_, _, CountMatch::TooShort) => {
return Err(ParseError::Custom(
"corrupted binary STL file: file is shorter than 84 bytes".into(),
Span::new(0, 0),
).into());
}
(_, _, CountMatch::Mismatch) => {
return Err(ParseError::Custom(
"corrupted binary STL file: triangle count at offset 80 disagrees with \
file length".into(),
Span::new(80, 84),
).into());
}
};
// Check if the file starts with `solid`. If yes, a string (the solid
// name) is stored next.
let solid_name = if buf.is_next(b"solid")? {
// Consume `solid`
buf.consume(5);
// Read the solid name (until line break in ASCII case, 80 chars in
// binary case).
let solid_name = if is_binary {
buf.with_bytes(
80 - buf.offset(),
|sd| {
sd.assert_ascii()
.map(|name| name.trim().to_string())
.map_err(|e| e.into())
},
)?
} else {
let name = buf.take_until(b'\n', |sd| {
sd.assert_ascii()
.map(|name| name.trim().to_string())
.map_err(|e| e.into())
})?;
parse::linebreak(&mut buf)?;
name
};
Some(solid_name)
} else {
None
};
// In the binary case, we still need to skip the remaining header.
let triangle_count = if is_binary {
buf.skip(80 - buf.offset())?;
Some(buf.read_u32::<LittleEndian>()?)
} else {
None
};
Ok(Self {
buf,
solid_name,
triangle_count,
_dummy: PhantomData,
})
}
/// Configures the reader to not unify vertices with the exact same
/// position into one.
///
/// An STL file is a simple list of triangles. Each triangle specifies the
/// position of its three vertices. This means that vertices of adjacent
/// triangles are stored once per triangle. When reading the file, we only
/// know the vertex positions and have no idea which vertices are actually
/// the same one and which are two different vertices that have the same
/// position.
///
/// It's common to unify vertices when reading an STL file to get a real
/// mesh and not just a collection of unconnected triangles. You only need
/// to disable unification in very special cases, mainly because:
/// - Your mesh has vertices that have the exact same position but should
/// be treated as separate vertices (this is very rare)
/// - Unifying the vertices is too slow for you (unifying makes the whole
/// read process a couple of times slower)
///
/// But if any of this is a problem for you, you should rather use a better
/// file format instead of STL.
///
/// When vertices are unified, `NaN` values in vertices are not allowed. So
/// in that case, if your file contains `NaN` values, the reading method
/// will panic.
pub fn without_vertex_unification(self) -> Reader<R, VerbatimVertices> {
Reader {
buf: self.buf,
solid_name: self.solid_name,
triangle_count: self.triangle_count,
_dummy: PhantomData,
}
}
}
impl<R: io::Read, U: UnifyingMarker> Reader<R, U> {
/// Returns the name of the solid. If no solid name was stored in the file,
/// `None` is returned.
pub fn solid_name(&self) -> Option<&str> {
self.solid_name.as_ref().map(|s| s.as_str())
}
/// Returns whether or not the file is a binary STL file (as opposed to
/// ASCII).
pub fn is_binary(&self) -> bool {
self.triangle_count.is_some()
}
/// Returns the encoding of this STL file.
pub fn encoding(&self) -> Encoding {
if self.is_binary() {
Encoding::Binary
} else {
Encoding::Ascii
}
}
/// Returns the triangle count stored in the file. That number is stored if
/// and only if the file is binary.
pub fn triangle_count(&self) -> Option<u32> {
self.triangle_count
}
/// Reads the whole file into a [`RawStorage`].
///
/// Usually you either want to use a higher level interface (via
/// [`StreamSource`]) or the method [`Reader::read_raw`]. The latter is the
/// streaming version of this method which doesn't require a temporary
/// storage ([`RawStorage`]).
pub fn into_raw_storage(self) -> Result<RawStorage, Error> {
// Prepare the raw result with metadata and memory allocations.
let mut out = RawStorage::empty();
out.solid_name = self.solid_name.clone();
if let Some(tri_count) = self.triangle_count |
// Read the all triangles into the raw result
self.read_raw(|tri| {
out.triangles.push(tri);
Ok(())
})?;
Ok(out)
}
/// Reads the whole file, passing each triangle to the `add_triangle`
/// callback.
///
/// This is a low level building block that you usually don't want to use
/// directly. In particular, **this method itself never performs any vertex
/// unification** (regardless of the type parameter `U`). You usually want
/// to use the [`StreamSource`]) interface to actually read meshes from
/// this reader.
#[inline(never)]
pub fn read_raw<F>(self, add_triangle: F) -> Result<(), Error>
where
F: FnMut(RawTriangle) -> Result<(), Error>,
{
if let Some(triangle_count) = self.triangle_count {
self.read_raw_binary(triangle_count, add_triangle)
} else {
self.read_raw_ascii(add_triangle)
}
}
/// The `read_raw` implementation for binary bodies.
#[inline(never)]
fn read_raw_binary<F>(self, triangle_count: u32, mut add_triangle: F) -> Result<(), Error>
where
F: FnMut(RawTriangle) -> Result<(), Error>,
{
const BYTES_PER_TRI: usize = 4 * 3 * 4 + 2;
let mut buf = self.buf;
// We attempt to read as many triangles as specified. If the
// specified number was too high and we reach EOF early, we will
// return an error.
for _ in 0..triangle_count {
// We don't use `with_bytes` here because it isn't inlined and
// we can improve performance significantly by doing it
// manually here.
buf.prepare(BYTES_PER_TRI)?;
let data = &buf.raw_buf()[..BYTES_PER_TRI];
/// Reads three consecutive `f32`s.
#[inline(always)]
fn vec3(data: &[u8]) -> [f32; 3] {
[
LittleEndian::read_f32(&data[0..4]),
LittleEndian::read_f32(&data[4..8]),
LittleEndian::read_f32(&data[8..12]),
]
}
let triangle = RawTriangle {
normal: vec3(&data[0..12]),
vertices: [
vec3(&data[12..24]),
vec3(&data[24..36]),
vec3(&data[36..48]),
],
attribute_byte_count: LittleEndian::read_u16(&data[48..50]),
};
add_triangle(triangle)?;
buf.consume(BYTES_PER_TRI);
}
// If the specified number of triangles was too small and there is
// still data left, we also error.
buf.assert_eof()?;
Ok(())
}
/// The `read_raw` implementation for ASCII bodies.
#[inline(never)]
pub fn read_raw_ascii<F>(self, mut add_triangle: F) -> Result<(), Error>
where
F: FnMut(RawTriangle) -> Result<(), Error>,
{
/// Parses three floats separated by whitespace. No leading or trailing
/// whitespace is handled.
fn vec3(buf: &mut impl ParseBuf) -> Result<[f32; 3], Error> {
let x = parse::ascii_f32(buf)?;
parse::whitespace(buf)?;
let y = parse::ascii_f32(buf)?;
parse::whitespace(buf)?;
let z = parse::ascii_f32(buf)?;
Ok([x, y, z])
}
/// Parses one ASCII line with a vertex (e.g. `vertex 2.0 0.1 1`)
fn vertex(buf: &mut impl ParseBuf) -> Result<[f32; 3], Error> {
parse::line(buf, |buf| {
buf.expect_tag(b"vertex")?;
parse::whitespace(buf)?;
vec3(buf)
})
}
let mut buf = self.buf;
// Parse facets
loop {
// First line (`facet normal 0.0 1.0 0.0`)
let normal = parse::line(&mut buf, |buf| {
buf.expect_tag(b"facet normal")?;
parse::whitespace(buf)?;
vec3(buf)
})?;
// Parse vertices
parse::line(&mut buf, |buf| buf.expect_tag(b"outer loop"))?;
let vertices = [
vertex(&mut buf)?,
vertex(&mut buf)?,
vertex(&mut buf)?,
];
parse::line(&mut buf, |buf| buf.expect_tag(b"endloop"))?;
// Pass parsed triangle to callback
add_triangle(RawTriangle {
normal,
vertices,
attribute_byte_count: 0,
})?;
// Parse last line (`endfacet`)
parse::line(&mut buf, |buf| buf.expect_tag(b"endfacet"))?;
// Check if the next line starts with `endsolid` and break loop
// in that case.
parse::opt_whitespace(&mut buf)?;
if buf.is_next(b"endsolid")? {
// We've seen `endsolid`: we just stop here. There could be
// junk afterwards, but we don't care.
break;
}
}
Ok(())
}
}
impl<R: io::Read, U: UnifyingMarker> fmt::Debug for Reader<R, U> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Reader")
.field("buf", &self.buf)
.field("solid_name", &self.solid_name)
.field("is_binary", &self.triangle_count.is_some())
.field("triangle_count", &self.triangle_count)
.finish()
}
}
impl<R: io::Read + Clone, U: UnifyingMarker> Clone for Reader<R, U> {
fn clone(&self) -> Self {
Self {
buf: self.buf.clone(),
solid_name: self.solid_name.clone(),
triangle_count: self.triangle_count.clone(),
_dummy: PhantomData,
}
}
}
// ===========================================================================
// ===== Definition of unifying dummy types. Not actually public.
// ===========================================================================
pub trait UnifyingMarker {
type Adder: VertexAdder;
const UNIFY: bool;
}
#[derive(Debug)]
pub enum UnifyVertices {}
impl UnifyingMarker for UnifyVertices {
type Adder = UnifyingAdder;
const UNIFY: bool = true;
}
#[derive(Debug)]
pub enum VerbatimVertices {}
impl UnifyingMarker for VerbatimVertices {
type Adder = NonUnifyingAdder;
const UNIFY: bool = false;
}
// ===========================================================================
// ===== VertexAdders: unify vertices or not. Not actually public.
// ===========================================================================
pub trait VertexAdder {
fn new() -> Self;
fn size_hint(&mut self, _vertex_count: hsize) {}
fn add_vertex<S: MemSink>(
&mut self,
sink: &mut S,
pos: [f32; 3],
) -> VertexHandle;
}
/// Adds every incoming vertex as new unique vertex. No unifying is done.
#[derive(Debug)]
pub struct NonUnifyingAdder;
impl VertexAdder for NonUnifyingAdder {
fn new() -> Self {
NonUnifyingAdder
}
fn add_vertex<S: MemSink>(
&mut self,
sink: &mut S,
pos: [f32; 3],
) -> VertexHandle {
let handle = sink.add_vertex();
sink.set_vertex_position(handle, pos.to_point3());
handle
}
}
/// The key of the hashmap: three `f32`. Implementing this like this is
/// faster than using `[OrderedFloat<f32>; 3]`. The values inside must
/// not be NaN, because those values won't be normalized (i.e. there
/// are 2^23 different NaN values) which will confuse the hash map.
#[derive(Debug, PartialEq)]
struct PosKey([f32; 3]);
impl Eq for PosKey {}
impl Hash for PosKey {
fn hash<H: Hasher>(&self, state: &mut H) {
self.0[0].to_bits().hash(state);
self.0[1].to_bits().hash(state);
self.0[2].to_bits().hash(state);
}
}
/// Unifies incoming vertices with the exact same position into a single one.
///
/// We use the `FxHash` here to be as fast as possible. This hash function is
/// very fast but has some problems. When hashing an `u32` it leads to only one
/// multiplication, meaning that | {
out.triangles.reserve(tri_count as usize);
} | conditional_block |
rsync.rs | use std::{
fs::File,
io::{BufReader, Read},
path::{Path, PathBuf},
};
use anyhow::{anyhow, Context, Result};
use filetime::{set_file_mtime, FileTime};
use log::{info, warn};
use rpki::{
repository::{sigobj::SignedObject, Cert, Crl, Manifest, Roa},
rrdp::ProcessSnapshot,
};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::{
config::{self, Config},
file_ops,
rrdp::RrdpState,
util::{self, Time},
};
pub fn | (
rrdp_state: &RrdpState,
changed: bool,
config: &Config,
) -> Result<()> {
// Check that there is a current snapshot, if not, there is no work
if rrdp_state.snapshot_path().is_none() {
return Ok(());
}
// We can assume now that there is a snapshot and unwrap things for it
let snapshot_path = rrdp_state.snapshot_path().unwrap();
let snapshot = rrdp_state.snapshot().unwrap();
let session_id = snapshot.session_id();
let serial = snapshot.serial();
let mut rsync_state = RsyncDirState::recover(config)?;
let new_revision = RsyncRevision { session_id, serial };
if changed {
let mut writer = RsyncFromSnapshotWriter {
out_path: new_revision.path(config),
include_host_and_module: config.rsync_include_host,
};
writer.create_out_path_if_missing()?;
writer.for_snapshot_path(&snapshot_path)?;
if config.rsync_dir_use_symlinks() {
symlink_current_to_new_revision_dir(&new_revision, config)?;
} else {
rename_new_revision_dir_to_current(&new_revision, &rsync_state, config)?;
}
rsync_state.update_current(new_revision);
}
rsync_state.clean_old(config)?;
rsync_state.persist(config)?;
Ok(())
}
/// Create a new symlink then rename it. We need to do this because the std library
/// refuses to overwrite an existing symlink. And if we were to remove it first, then
/// we would introduce a race condition for clients accessing.
fn symlink_current_to_new_revision_dir(
new_revision: &RsyncRevision,
config: &Config,
) -> Result<()> {
info!(
"Updating symlink 'current' to '{}' under rsync dir '{}'",
new_revision.dir_name(),
config.rsync_dir.display()
);
let current_path = config.rsync_dir_current();
let tmp_name = file_ops::path_with_extension(¤t_path, config::TMP_FILE_EXT);
if tmp_name.exists() {
std::fs::remove_file(&tmp_name).with_context(|| {
format!(
"Could not remove lingering temporary symlink for current rsync dir at '{}'",
tmp_name.display()
)
})?;
}
std::os::unix::fs::symlink(new_revision.dir_name(), &tmp_name).with_context(|| {
format!(
"Could not create temporary symlink for new rsync content at '{}'",
tmp_name.display()
)
})?;
std::fs::rename(&tmp_name, ¤t_path).with_context(|| {
format!(
"Could not rename symlink for current rsync dir from '{}' to '{}'",
tmp_name.display(),
current_path.display()
)
})?;
Ok(())
}
/// Rename the path for the new revision to the current rsync path, *after*
/// renaming any existing current path to the serial and session for that
/// revision.
fn rename_new_revision_dir_to_current(
new_revision: &RsyncRevision,
rsync_state: &RsyncDirState,
config: &Config,
) -> Result<()> {
info!("Renaming rsync folders for close to atomic update of the rsync module dir");
let current_path = config.rsync_dir_current();
if let Some(current) = &rsync_state.current {
let current_preserve_path = current.path(config);
if current_path.exists() {
info!(
"Renaming the rsync directory for previous revision to: {}",
current_preserve_path.display()
);
std::fs::rename(¤t_path, ¤t_preserve_path).with_context(|| {
format!(
"Could not rename current rsync dir from '{}' to '{}'",
current_path.display(),
current_preserve_path.display()
)
})?;
}
}
info!(
"Rename rsync dir for new revision to '{}'",
current_path.display()
);
std::fs::rename(&new_revision.path(config), ¤t_path).with_context(|| {
format!(
"Could not rename new rsync dir from '{}' to '{}'",
new_revision.path(config).display(),
current_path.display()
)
})?;
Ok(())
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
struct RsyncDirState {
current: Option<RsyncRevision>,
old: Vec<DeprecatedRsyncRevision>,
}
impl RsyncDirState {
/// Gets the current state from disk, if a state file exists. Otherwise returns
/// a new blank state.
fn recover(config: &Config) -> Result<Self> {
let state_path = config.rsync_state_path();
if state_path.exists() {
let json_bytes = file_ops::read_file(&state_path).with_context(|| {
format!("Cannot read rsync state file at: {}", state_path.display())
})?;
serde_json::from_slice(json_bytes.as_ref()).with_context(|| {
format!(
"Cannot deserialize json for current state from {}",
state_path.display()
)
})
} else {
Ok(RsyncDirState {
current: None,
old: vec![],
})
}
}
/// Persists the state to disk
fn persist(&self, config: &Config) -> Result<()> {
let state_path = config.rsync_state_path();
let json = serde_json::to_string_pretty(&self)?;
file_ops::write_buf(&state_path, json.as_bytes()).with_context(|| "Could not save state.")
}
/// Updates the current revision for this state, moves a possible
/// existing current state to old.
fn update_current(&mut self, current: RsyncRevision) {
let existing = std::mem::replace(&mut self.current, Some(current));
if let Some(existing) = existing {
self.old.push(existing.deprecate());
}
}
/// Cleans old directories from disk when their time has come, and updates
/// this state (forgets these old versions). Will throw an error if removing
/// an old dir fails, but will simply skip removing old dirs if they had
/// already been removed.
fn clean_old(&mut self, config: &Config) -> Result<()> {
let clean_before = Time::seconds_ago(config.cleanup_after);
for old in self
.old
.iter()
.filter(|deprecated| deprecated.since <= clean_before)
{
let path = old.revision.path(config);
if path.exists() {
info!(
"Removing rsync directory: {}, deprecated since: {}",
path.display(),
old.since
);
// Try to remove the old directory if it still exists
std::fs::remove_dir_all(&path).with_context(|| {
format!(
"Could not remove rsync dir for old revision at: {}",
path.display()
)
})?;
}
}
self.old
.retain(|deprecated| deprecated.since > clean_before);
Ok(())
}
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
struct RsyncRevision {
#[serde(deserialize_with = "util::de_uuid", serialize_with = "util::ser_uuid")]
session_id: Uuid,
serial: u64,
}
impl RsyncRevision {
fn dir_name(&self) -> String {
format!("session_{}_serial_{}", self.session_id, self.serial)
}
fn path(&self, config: &Config) -> PathBuf {
config.rsync_dir.join(&self.dir_name())
}
fn deprecate(self) -> DeprecatedRsyncRevision {
DeprecatedRsyncRevision {
since: Time::now(),
revision: self,
}
}
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
struct DeprecatedRsyncRevision {
since: Time,
revision: RsyncRevision,
}
struct RsyncFromSnapshotWriter {
out_path: PathBuf,
include_host_and_module: bool,
}
impl RsyncFromSnapshotWriter {
/// Creates an empty directory for the rsync out_path. Particularly needed if the snapshot
/// is empty since no files (and parent dirs) would be created in that case - and we want to
/// see an empty directory. See issue #62.
fn create_out_path_if_missing(&self) -> Result<()> {
if!self.out_path.exists() {
std::fs::create_dir_all(&self.out_path).with_context(|| {
format!(
"Cannot create output directory for rsync at {}",
&self.out_path.display()
)
})
} else {
Ok(())
}
}
/// Processes the given snapshot and writes any published files under the
/// rsync out_path directory
fn for_snapshot_path(&mut self, snapshot: &Path) -> Result<()> {
let source_file = File::open(snapshot)?;
let buf_reader = BufReader::new(source_file);
self.process(buf_reader)?;
Ok(())
}
}
impl ProcessSnapshot for RsyncFromSnapshotWriter {
type Err = anyhow::Error;
fn meta(&mut self, _session_id: Uuid, _serial: u64) -> Result<()> {
Ok(()) // nothing to do
}
fn publish(
&mut self,
uri: rpki::uri::Rsync,
data: &mut rpki::rrdp::ObjectReader,
) -> Result<()> {
let path = if self.include_host_and_module {
self.out_path.join(format!(
"{}/{}/{}",
uri.authority(),
uri.module_name(),
uri.path()
))
} else {
self.out_path.join(uri.path())
};
// Read the bytes into memory, we will need to parse this in order
// to fix the mtime of the file. In other words.. we _could_ copy
// the bytes from the reader into a file on disk, but then we would
// have to re-read them to parse them anyway.
let mut bytes: Vec<u8> = vec![];
data.read_to_end(&mut bytes)?;
file_ops::write_buf(&path, &bytes).with_context(|| {
format!(
"Could not copy element for uri: {}, to path: {}",
uri,
path.to_string_lossy()
)
})?;
if let Err(e) = fix_since(&path, &bytes) {
warn!("{}", e);
}
Ok(())
}
}
// Try to fix the modification time for a repository object.
// This is needed because otherwise some clients will always think
// there is an update.
fn fix_since(path: &Path, data: &[u8]) -> Result<()> {
let path_str = path.to_string_lossy();
let time = if path_str.ends_with(".cer") {
Cert::decode(data).map(|cert| cert.validity().not_before())
} else if path_str.ends_with(".crl") {
Crl::decode(data).map(|crl| crl.this_update())
} else if path_str.ends_with(".mft") {
Manifest::decode(data, false).map(|mft| mft.this_update())
} else if path_str.ends_with(".roa") {
Roa::decode(data, false).map(|roa| roa.cert().validity().not_before())
} else {
// Try to parse this as a generic RPKI signed object
SignedObject::decode(data, false).map(|signed| signed.cert().validity().not_before())
}
.map_err(|_| anyhow!("Cannot parse object at: {} to derive mtime", path_str))?;
let mtime = FileTime::from_unix_time(time.timestamp(), 0);
set_file_mtime(&path, mtime).map_err(|e| {
anyhow!(
"Cannot modify mtime for object at: {}, error: {}",
path_str,
e
)
})?;
Ok(())
}
#[cfg(test)]
mod tests {
use filetime::FileTime;
use std::{
fs,
path::{Path, PathBuf},
};
use crate::util::test_with_dir;
use super::RsyncFromSnapshotWriter;
#[test]
fn write_rsync_from_snapshot() {
test_with_dir("write_rsync_from_snapshot", |dir| {
let snapshot_path = PathBuf::from("./test-resources/rrdp-rev2658/e9be21e7-c537-4564-b742-64700978c6b4/2658/rnd-sn/snapshot.xml");
let out_path = dir.join("rsync");
let include_host_and_module = false;
let mut writer = RsyncFromSnapshotWriter {
out_path,
include_host_and_module,
};
writer.for_snapshot_path(&snapshot_path).unwrap();
fn check_mtime(dir: &Path, path: &str, timestamp: i64) {
let path = dir.join(path);
let metadata = fs::metadata(path).unwrap();
let mtime = FileTime::from_last_modification_time(&metadata);
assert_eq!(timestamp, mtime.unix_seconds())
}
check_mtime(
&dir,
"rsync/ta/0/3490C0DEEA1F2E5605230550130F12D42FDE1FCD.cer",
1600268228,
);
check_mtime(
&dir,
"rsync/Acme-Corp-Intl/3/A4E953A4133AC82A46AE19C2E7CC635B51CD11D3.mft",
1622637098,
);
check_mtime(
&dir,
"rsync/Acme-Corp-Intl/5/D2E73D77B71B22FAAB38F5A62DF488283FE97932.crl",
1622621702,
);
check_mtime(&dir, "rsync/Acme-Corp-Intl/3/AS40224.roa", 1620657233);
});
}
}
| update_from_rrdp_state | identifier_name |
rsync.rs | use std::{
fs::File,
io::{BufReader, Read},
path::{Path, PathBuf},
};
use anyhow::{anyhow, Context, Result};
use filetime::{set_file_mtime, FileTime};
use log::{info, warn};
use rpki::{
repository::{sigobj::SignedObject, Cert, Crl, Manifest, Roa},
rrdp::ProcessSnapshot,
};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::{
config::{self, Config},
file_ops,
rrdp::RrdpState,
util::{self, Time},
};
pub fn update_from_rrdp_state(
rrdp_state: &RrdpState,
changed: bool,
config: &Config,
) -> Result<()> {
// Check that there is a current snapshot, if not, there is no work
if rrdp_state.snapshot_path().is_none() {
return Ok(());
}
// We can assume now that there is a snapshot and unwrap things for it
let snapshot_path = rrdp_state.snapshot_path().unwrap();
let snapshot = rrdp_state.snapshot().unwrap();
let session_id = snapshot.session_id();
let serial = snapshot.serial();
let mut rsync_state = RsyncDirState::recover(config)?;
let new_revision = RsyncRevision { session_id, serial };
if changed {
let mut writer = RsyncFromSnapshotWriter {
out_path: new_revision.path(config),
include_host_and_module: config.rsync_include_host,
};
writer.create_out_path_if_missing()?;
writer.for_snapshot_path(&snapshot_path)?;
if config.rsync_dir_use_symlinks() {
symlink_current_to_new_revision_dir(&new_revision, config)?;
} else {
rename_new_revision_dir_to_current(&new_revision, &rsync_state, config)?;
}
rsync_state.update_current(new_revision);
}
rsync_state.clean_old(config)?;
rsync_state.persist(config)?;
Ok(())
}
/// Create a new symlink then rename it. We need to do this because the std library
/// refuses to overwrite an existing symlink. And if we were to remove it first, then
/// we would introduce a race condition for clients accessing.
fn symlink_current_to_new_revision_dir(
new_revision: &RsyncRevision,
config: &Config,
) -> Result<()> {
info!(
"Updating symlink 'current' to '{}' under rsync dir '{}'",
new_revision.dir_name(),
config.rsync_dir.display()
);
let current_path = config.rsync_dir_current();
let tmp_name = file_ops::path_with_extension(¤t_path, config::TMP_FILE_EXT);
if tmp_name.exists() {
std::fs::remove_file(&tmp_name).with_context(|| {
format!(
"Could not remove lingering temporary symlink for current rsync dir at '{}'",
tmp_name.display()
)
})?;
}
std::os::unix::fs::symlink(new_revision.dir_name(), &tmp_name).with_context(|| {
format!(
"Could not create temporary symlink for new rsync content at '{}'",
tmp_name.display()
)
})?;
std::fs::rename(&tmp_name, ¤t_path).with_context(|| {
format!(
"Could not rename symlink for current rsync dir from '{}' to '{}'",
tmp_name.display(),
current_path.display()
)
})?;
Ok(())
}
/// Rename the path for the new revision to the current rsync path, *after*
/// renaming any existing current path to the serial and session for that
/// revision.
fn rename_new_revision_dir_to_current(
new_revision: &RsyncRevision,
rsync_state: &RsyncDirState,
config: &Config,
) -> Result<()> {
info!("Renaming rsync folders for close to atomic update of the rsync module dir");
let current_path = config.rsync_dir_current();
if let Some(current) = &rsync_state.current {
let current_preserve_path = current.path(config);
if current_path.exists() {
info!( | "Renaming the rsync directory for previous revision to: {}",
current_preserve_path.display()
);
std::fs::rename(¤t_path, ¤t_preserve_path).with_context(|| {
format!(
"Could not rename current rsync dir from '{}' to '{}'",
current_path.display(),
current_preserve_path.display()
)
})?;
}
}
info!(
"Rename rsync dir for new revision to '{}'",
current_path.display()
);
std::fs::rename(&new_revision.path(config), ¤t_path).with_context(|| {
format!(
"Could not rename new rsync dir from '{}' to '{}'",
new_revision.path(config).display(),
current_path.display()
)
})?;
Ok(())
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
struct RsyncDirState {
current: Option<RsyncRevision>,
old: Vec<DeprecatedRsyncRevision>,
}
impl RsyncDirState {
/// Gets the current state from disk, if a state file exists. Otherwise returns
/// a new blank state.
fn recover(config: &Config) -> Result<Self> {
let state_path = config.rsync_state_path();
if state_path.exists() {
let json_bytes = file_ops::read_file(&state_path).with_context(|| {
format!("Cannot read rsync state file at: {}", state_path.display())
})?;
serde_json::from_slice(json_bytes.as_ref()).with_context(|| {
format!(
"Cannot deserialize json for current state from {}",
state_path.display()
)
})
} else {
Ok(RsyncDirState {
current: None,
old: vec![],
})
}
}
/// Persists the state to disk
fn persist(&self, config: &Config) -> Result<()> {
let state_path = config.rsync_state_path();
let json = serde_json::to_string_pretty(&self)?;
file_ops::write_buf(&state_path, json.as_bytes()).with_context(|| "Could not save state.")
}
/// Updates the current revision for this state, moves a possible
/// existing current state to old.
fn update_current(&mut self, current: RsyncRevision) {
let existing = std::mem::replace(&mut self.current, Some(current));
if let Some(existing) = existing {
self.old.push(existing.deprecate());
}
}
/// Cleans old directories from disk when their time has come, and updates
/// this state (forgets these old versions). Will throw an error if removing
/// an old dir fails, but will simply skip removing old dirs if they had
/// already been removed.
fn clean_old(&mut self, config: &Config) -> Result<()> {
let clean_before = Time::seconds_ago(config.cleanup_after);
for old in self
.old
.iter()
.filter(|deprecated| deprecated.since <= clean_before)
{
let path = old.revision.path(config);
if path.exists() {
info!(
"Removing rsync directory: {}, deprecated since: {}",
path.display(),
old.since
);
// Try to remove the old directory if it still exists
std::fs::remove_dir_all(&path).with_context(|| {
format!(
"Could not remove rsync dir for old revision at: {}",
path.display()
)
})?;
}
}
self.old
.retain(|deprecated| deprecated.since > clean_before);
Ok(())
}
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
struct RsyncRevision {
#[serde(deserialize_with = "util::de_uuid", serialize_with = "util::ser_uuid")]
session_id: Uuid,
serial: u64,
}
impl RsyncRevision {
fn dir_name(&self) -> String {
format!("session_{}_serial_{}", self.session_id, self.serial)
}
fn path(&self, config: &Config) -> PathBuf {
config.rsync_dir.join(&self.dir_name())
}
fn deprecate(self) -> DeprecatedRsyncRevision {
DeprecatedRsyncRevision {
since: Time::now(),
revision: self,
}
}
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
struct DeprecatedRsyncRevision {
since: Time,
revision: RsyncRevision,
}
struct RsyncFromSnapshotWriter {
out_path: PathBuf,
include_host_and_module: bool,
}
impl RsyncFromSnapshotWriter {
/// Creates an empty directory for the rsync out_path. Particularly needed if the snapshot
/// is empty since no files (and parent dirs) would be created in that case - and we want to
/// see an empty directory. See issue #62.
fn create_out_path_if_missing(&self) -> Result<()> {
if!self.out_path.exists() {
std::fs::create_dir_all(&self.out_path).with_context(|| {
format!(
"Cannot create output directory for rsync at {}",
&self.out_path.display()
)
})
} else {
Ok(())
}
}
/// Processes the given snapshot and writes any published files under the
/// rsync out_path directory
fn for_snapshot_path(&mut self, snapshot: &Path) -> Result<()> {
let source_file = File::open(snapshot)?;
let buf_reader = BufReader::new(source_file);
self.process(buf_reader)?;
Ok(())
}
}
impl ProcessSnapshot for RsyncFromSnapshotWriter {
type Err = anyhow::Error;
fn meta(&mut self, _session_id: Uuid, _serial: u64) -> Result<()> {
Ok(()) // nothing to do
}
fn publish(
&mut self,
uri: rpki::uri::Rsync,
data: &mut rpki::rrdp::ObjectReader,
) -> Result<()> {
let path = if self.include_host_and_module {
self.out_path.join(format!(
"{}/{}/{}",
uri.authority(),
uri.module_name(),
uri.path()
))
} else {
self.out_path.join(uri.path())
};
// Read the bytes into memory, we will need to parse this in order
// to fix the mtime of the file. In other words.. we _could_ copy
// the bytes from the reader into a file on disk, but then we would
// have to re-read them to parse them anyway.
let mut bytes: Vec<u8> = vec![];
data.read_to_end(&mut bytes)?;
file_ops::write_buf(&path, &bytes).with_context(|| {
format!(
"Could not copy element for uri: {}, to path: {}",
uri,
path.to_string_lossy()
)
})?;
if let Err(e) = fix_since(&path, &bytes) {
warn!("{}", e);
}
Ok(())
}
}
// Try to fix the modification time for a repository object.
// This is needed because otherwise some clients will always think
// there is an update.
fn fix_since(path: &Path, data: &[u8]) -> Result<()> {
let path_str = path.to_string_lossy();
let time = if path_str.ends_with(".cer") {
Cert::decode(data).map(|cert| cert.validity().not_before())
} else if path_str.ends_with(".crl") {
Crl::decode(data).map(|crl| crl.this_update())
} else if path_str.ends_with(".mft") {
Manifest::decode(data, false).map(|mft| mft.this_update())
} else if path_str.ends_with(".roa") {
Roa::decode(data, false).map(|roa| roa.cert().validity().not_before())
} else {
// Try to parse this as a generic RPKI signed object
SignedObject::decode(data, false).map(|signed| signed.cert().validity().not_before())
}
.map_err(|_| anyhow!("Cannot parse object at: {} to derive mtime", path_str))?;
let mtime = FileTime::from_unix_time(time.timestamp(), 0);
set_file_mtime(&path, mtime).map_err(|e| {
anyhow!(
"Cannot modify mtime for object at: {}, error: {}",
path_str,
e
)
})?;
Ok(())
}
#[cfg(test)]
mod tests {
use filetime::FileTime;
use std::{
fs,
path::{Path, PathBuf},
};
use crate::util::test_with_dir;
use super::RsyncFromSnapshotWriter;
#[test]
fn write_rsync_from_snapshot() {
test_with_dir("write_rsync_from_snapshot", |dir| {
let snapshot_path = PathBuf::from("./test-resources/rrdp-rev2658/e9be21e7-c537-4564-b742-64700978c6b4/2658/rnd-sn/snapshot.xml");
let out_path = dir.join("rsync");
let include_host_and_module = false;
let mut writer = RsyncFromSnapshotWriter {
out_path,
include_host_and_module,
};
writer.for_snapshot_path(&snapshot_path).unwrap();
fn check_mtime(dir: &Path, path: &str, timestamp: i64) {
let path = dir.join(path);
let metadata = fs::metadata(path).unwrap();
let mtime = FileTime::from_last_modification_time(&metadata);
assert_eq!(timestamp, mtime.unix_seconds())
}
check_mtime(
&dir,
"rsync/ta/0/3490C0DEEA1F2E5605230550130F12D42FDE1FCD.cer",
1600268228,
);
check_mtime(
&dir,
"rsync/Acme-Corp-Intl/3/A4E953A4133AC82A46AE19C2E7CC635B51CD11D3.mft",
1622637098,
);
check_mtime(
&dir,
"rsync/Acme-Corp-Intl/5/D2E73D77B71B22FAAB38F5A62DF488283FE97932.crl",
1622621702,
);
check_mtime(&dir, "rsync/Acme-Corp-Intl/3/AS40224.roa", 1620657233);
});
}
} | random_line_split |
|
rsync.rs | use std::{
fs::File,
io::{BufReader, Read},
path::{Path, PathBuf},
};
use anyhow::{anyhow, Context, Result};
use filetime::{set_file_mtime, FileTime};
use log::{info, warn};
use rpki::{
repository::{sigobj::SignedObject, Cert, Crl, Manifest, Roa},
rrdp::ProcessSnapshot,
};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::{
config::{self, Config},
file_ops,
rrdp::RrdpState,
util::{self, Time},
};
pub fn update_from_rrdp_state(
rrdp_state: &RrdpState,
changed: bool,
config: &Config,
) -> Result<()> {
// Check that there is a current snapshot, if not, there is no work
if rrdp_state.snapshot_path().is_none() {
return Ok(());
}
// We can assume now that there is a snapshot and unwrap things for it
let snapshot_path = rrdp_state.snapshot_path().unwrap();
let snapshot = rrdp_state.snapshot().unwrap();
let session_id = snapshot.session_id();
let serial = snapshot.serial();
let mut rsync_state = RsyncDirState::recover(config)?;
let new_revision = RsyncRevision { session_id, serial };
if changed {
let mut writer = RsyncFromSnapshotWriter {
out_path: new_revision.path(config),
include_host_and_module: config.rsync_include_host,
};
writer.create_out_path_if_missing()?;
writer.for_snapshot_path(&snapshot_path)?;
if config.rsync_dir_use_symlinks() {
symlink_current_to_new_revision_dir(&new_revision, config)?;
} else {
rename_new_revision_dir_to_current(&new_revision, &rsync_state, config)?;
}
rsync_state.update_current(new_revision);
}
rsync_state.clean_old(config)?;
rsync_state.persist(config)?;
Ok(())
}
/// Create a new symlink then rename it. We need to do this because the std library
/// refuses to overwrite an existing symlink. And if we were to remove it first, then
/// we would introduce a race condition for clients accessing.
fn symlink_current_to_new_revision_dir(
new_revision: &RsyncRevision,
config: &Config,
) -> Result<()> {
info!(
"Updating symlink 'current' to '{}' under rsync dir '{}'",
new_revision.dir_name(),
config.rsync_dir.display()
);
let current_path = config.rsync_dir_current();
let tmp_name = file_ops::path_with_extension(¤t_path, config::TMP_FILE_EXT);
if tmp_name.exists() {
std::fs::remove_file(&tmp_name).with_context(|| {
format!(
"Could not remove lingering temporary symlink for current rsync dir at '{}'",
tmp_name.display()
)
})?;
}
std::os::unix::fs::symlink(new_revision.dir_name(), &tmp_name).with_context(|| {
format!(
"Could not create temporary symlink for new rsync content at '{}'",
tmp_name.display()
)
})?;
std::fs::rename(&tmp_name, ¤t_path).with_context(|| {
format!(
"Could not rename symlink for current rsync dir from '{}' to '{}'",
tmp_name.display(),
current_path.display()
)
})?;
Ok(())
}
/// Rename the path for the new revision to the current rsync path, *after*
/// renaming any existing current path to the serial and session for that
/// revision.
fn rename_new_revision_dir_to_current(
new_revision: &RsyncRevision,
rsync_state: &RsyncDirState,
config: &Config,
) -> Result<()> {
info!("Renaming rsync folders for close to atomic update of the rsync module dir");
let current_path = config.rsync_dir_current();
if let Some(current) = &rsync_state.current {
let current_preserve_path = current.path(config);
if current_path.exists() {
info!(
"Renaming the rsync directory for previous revision to: {}",
current_preserve_path.display()
);
std::fs::rename(¤t_path, ¤t_preserve_path).with_context(|| {
format!(
"Could not rename current rsync dir from '{}' to '{}'",
current_path.display(),
current_preserve_path.display()
)
})?;
}
}
info!(
"Rename rsync dir for new revision to '{}'",
current_path.display()
);
std::fs::rename(&new_revision.path(config), ¤t_path).with_context(|| {
format!(
"Could not rename new rsync dir from '{}' to '{}'",
new_revision.path(config).display(),
current_path.display()
)
})?;
Ok(())
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
struct RsyncDirState {
current: Option<RsyncRevision>,
old: Vec<DeprecatedRsyncRevision>,
}
impl RsyncDirState {
/// Gets the current state from disk, if a state file exists. Otherwise returns
/// a new blank state.
fn recover(config: &Config) -> Result<Self> {
let state_path = config.rsync_state_path();
if state_path.exists() {
let json_bytes = file_ops::read_file(&state_path).with_context(|| {
format!("Cannot read rsync state file at: {}", state_path.display())
})?;
serde_json::from_slice(json_bytes.as_ref()).with_context(|| {
format!(
"Cannot deserialize json for current state from {}",
state_path.display()
)
})
} else {
Ok(RsyncDirState {
current: None,
old: vec![],
})
}
}
/// Persists the state to disk
fn persist(&self, config: &Config) -> Result<()> {
let state_path = config.rsync_state_path();
let json = serde_json::to_string_pretty(&self)?;
file_ops::write_buf(&state_path, json.as_bytes()).with_context(|| "Could not save state.")
}
/// Updates the current revision for this state, moves a possible
/// existing current state to old.
fn update_current(&mut self, current: RsyncRevision) {
let existing = std::mem::replace(&mut self.current, Some(current));
if let Some(existing) = existing {
self.old.push(existing.deprecate());
}
}
/// Cleans old directories from disk when their time has come, and updates
/// this state (forgets these old versions). Will throw an error if removing
/// an old dir fails, but will simply skip removing old dirs if they had
/// already been removed.
fn clean_old(&mut self, config: &Config) -> Result<()> {
let clean_before = Time::seconds_ago(config.cleanup_after);
for old in self
.old
.iter()
.filter(|deprecated| deprecated.since <= clean_before)
{
let path = old.revision.path(config);
if path.exists() {
info!(
"Removing rsync directory: {}, deprecated since: {}",
path.display(),
old.since
);
// Try to remove the old directory if it still exists
std::fs::remove_dir_all(&path).with_context(|| {
format!(
"Could not remove rsync dir for old revision at: {}",
path.display()
)
})?;
}
}
self.old
.retain(|deprecated| deprecated.since > clean_before);
Ok(())
}
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
struct RsyncRevision {
#[serde(deserialize_with = "util::de_uuid", serialize_with = "util::ser_uuid")]
session_id: Uuid,
serial: u64,
}
impl RsyncRevision {
fn dir_name(&self) -> String {
format!("session_{}_serial_{}", self.session_id, self.serial)
}
fn path(&self, config: &Config) -> PathBuf {
config.rsync_dir.join(&self.dir_name())
}
fn deprecate(self) -> DeprecatedRsyncRevision {
DeprecatedRsyncRevision {
since: Time::now(),
revision: self,
}
}
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
struct DeprecatedRsyncRevision {
since: Time,
revision: RsyncRevision,
}
struct RsyncFromSnapshotWriter {
out_path: PathBuf,
include_host_and_module: bool,
}
impl RsyncFromSnapshotWriter {
/// Creates an empty directory for the rsync out_path. Particularly needed if the snapshot
/// is empty since no files (and parent dirs) would be created in that case - and we want to
/// see an empty directory. See issue #62.
fn create_out_path_if_missing(&self) -> Result<()> {
if!self.out_path.exists() {
std::fs::create_dir_all(&self.out_path).with_context(|| {
format!(
"Cannot create output directory for rsync at {}",
&self.out_path.display()
)
})
} else {
Ok(())
}
}
/// Processes the given snapshot and writes any published files under the
/// rsync out_path directory
fn for_snapshot_path(&mut self, snapshot: &Path) -> Result<()> {
let source_file = File::open(snapshot)?;
let buf_reader = BufReader::new(source_file);
self.process(buf_reader)?;
Ok(())
}
}
impl ProcessSnapshot for RsyncFromSnapshotWriter {
type Err = anyhow::Error;
fn meta(&mut self, _session_id: Uuid, _serial: u64) -> Result<()> {
Ok(()) // nothing to do
}
fn publish(
&mut self,
uri: rpki::uri::Rsync,
data: &mut rpki::rrdp::ObjectReader,
) -> Result<()> {
let path = if self.include_host_and_module {
self.out_path.join(format!(
"{}/{}/{}",
uri.authority(),
uri.module_name(),
uri.path()
))
} else {
self.out_path.join(uri.path())
};
// Read the bytes into memory, we will need to parse this in order
// to fix the mtime of the file. In other words.. we _could_ copy
// the bytes from the reader into a file on disk, but then we would
// have to re-read them to parse them anyway.
let mut bytes: Vec<u8> = vec![];
data.read_to_end(&mut bytes)?;
file_ops::write_buf(&path, &bytes).with_context(|| {
format!(
"Could not copy element for uri: {}, to path: {}",
uri,
path.to_string_lossy()
)
})?;
if let Err(e) = fix_since(&path, &bytes) {
warn!("{}", e);
}
Ok(())
}
}
// Try to fix the modification time for a repository object.
// This is needed because otherwise some clients will always think
// there is an update.
fn fix_since(path: &Path, data: &[u8]) -> Result<()> {
let path_str = path.to_string_lossy();
let time = if path_str.ends_with(".cer") {
Cert::decode(data).map(|cert| cert.validity().not_before())
} else if path_str.ends_with(".crl") {
Crl::decode(data).map(|crl| crl.this_update())
} else if path_str.ends_with(".mft") {
Manifest::decode(data, false).map(|mft| mft.this_update())
} else if path_str.ends_with(".roa") {
Roa::decode(data, false).map(|roa| roa.cert().validity().not_before())
} else |
.map_err(|_| anyhow!("Cannot parse object at: {} to derive mtime", path_str))?;
let mtime = FileTime::from_unix_time(time.timestamp(), 0);
set_file_mtime(&path, mtime).map_err(|e| {
anyhow!(
"Cannot modify mtime for object at: {}, error: {}",
path_str,
e
)
})?;
Ok(())
}
#[cfg(test)]
mod tests {
use filetime::FileTime;
use std::{
fs,
path::{Path, PathBuf},
};
use crate::util::test_with_dir;
use super::RsyncFromSnapshotWriter;
#[test]
fn write_rsync_from_snapshot() {
test_with_dir("write_rsync_from_snapshot", |dir| {
let snapshot_path = PathBuf::from("./test-resources/rrdp-rev2658/e9be21e7-c537-4564-b742-64700978c6b4/2658/rnd-sn/snapshot.xml");
let out_path = dir.join("rsync");
let include_host_and_module = false;
let mut writer = RsyncFromSnapshotWriter {
out_path,
include_host_and_module,
};
writer.for_snapshot_path(&snapshot_path).unwrap();
fn check_mtime(dir: &Path, path: &str, timestamp: i64) {
let path = dir.join(path);
let metadata = fs::metadata(path).unwrap();
let mtime = FileTime::from_last_modification_time(&metadata);
assert_eq!(timestamp, mtime.unix_seconds())
}
check_mtime(
&dir,
"rsync/ta/0/3490C0DEEA1F2E5605230550130F12D42FDE1FCD.cer",
1600268228,
);
check_mtime(
&dir,
"rsync/Acme-Corp-Intl/3/A4E953A4133AC82A46AE19C2E7CC635B51CD11D3.mft",
1622637098,
);
check_mtime(
&dir,
"rsync/Acme-Corp-Intl/5/D2E73D77B71B22FAAB38F5A62DF488283FE97932.crl",
1622621702,
);
check_mtime(&dir, "rsync/Acme-Corp-Intl/3/AS40224.roa", 1620657233);
});
}
}
| {
// Try to parse this as a generic RPKI signed object
SignedObject::decode(data, false).map(|signed| signed.cert().validity().not_before())
} | conditional_block |
tcp.rs | //! # TCP handling
//!
//! This file contains the TCP handling for `clobber`. The loop here is that we connect, write,
//! and then read. If the client is in repeat mode then it will repeatedly write/read while the
//! connection is open.
//!
//! ## Performance Notes
//!
//! ### Perform allocations at startup
//!
//! The pool of connections is created up front, and then connections begin sending requests
//! to match the defined rate. (Or in the case of no defined, they start immediately.) In general
//! we try to to limit significant allocations to startup rather than doing them on the fly.
//! More specifically, you shouldn't see any of these behaviors inside the tight `while` loop
//! inside the `connection()` method.
//!
//! ### Limit open ports and files
//!
//! Two of the key limiting factors for high TCP client throughput are running out of ports, or
//! opening more files than the underlying OS will allow. `clobber` tries to minimize issues here
//! by giving users control over the max connections. (It's also a good idea to check out your
//! specific `ulimit -n` settings and raise the max number of open files.)
//!
//! #### Avoid cross-thread communication
//! This library uses no cross-thread communication via `std::sync` or `crossbeam`. All futures
//! are executed on a `LocalPool`, and the number of OS threads used is user configurable. This
//! has a number of design impacts. For example, it becomes more difficult to aggregate what each
//! connection is doing. This is simple if you just pass the results to a channel, but this has a
//! non-trivial impact on performance.
//!
//! *Note: This is currently violated by the way we accomplish rate limiting, which relies on a
//! global thread that manages timers. This ends up putting disproportionate load on that thread at
//! some point. But if you're relying on rate limiting you're trying to slow it down, so we're
//! putting this in the 'feature' column. (If anyone would like to contribute a thread-local
//! futures timer it'd be a great contribution to the Rust community!)
//!
use std::net::SocketAddr;
use std::time::Instant;
use async_std::io::{self};
use async_std::net::{TcpStream};
use async_std::prelude::*;
// I'd like to remove this dependency, but async-std doesn't currently have a LocalPool executor
// todo: Revisit
use futures::executor::LocalPool;
use futures::task::SpawnExt;
use futures_timer::Delay;
use log::{debug, error, info, warn};
use crate::{Config};
use byte_mutator::ByteMutator;
use byte_mutator::fuzz_config::FuzzConfig;
/// The overall test runner
///
/// This method contains the main core loop.
///
/// `clobber` will create `connections` number of async futures, distribute them across `threads`
/// threads (defaults to num_cpus), and each future will perform requests in a tight loop. If
/// there is a `rate` specified, there will be an optional delay to stay under the requested rate.
/// The futures are driven by a LocalPool executor, and there is no cross-thread synchronization
/// or communication with the default config. Note: for maximum performance avoid use of the
/// `rate`, `connect_timeout`, and `read_timeout` options.
///
pub fn clobber(config: Config, message: Vec<u8>) -> std::io::Result<()> {
info!("Starting: {:#?}", config);
let mut threads = Vec::with_capacity(config.num_threads() as usize);
// configure fuzzing if a file has been provided in the config
let message = match &config.fuzz_path {
None => ByteMutator::new(&message),
Some(path) => {
match FuzzConfig::from_file(&path) {
Ok(fuzz_config) => ByteMutator::new_from_config(&message, fuzz_config),
Err(e) => {
return Err(e)
},
}
},
};
for _ in 0..config.num_threads() {
// per-thread clones
let message = message.clone();
let config = config.clone();
// start OS thread which will contain a chunk of connections
let thread = std::thread::spawn(move || {
let mut pool = LocalPool::new();
let mut spawner = pool.spawner();
// all connection futures are spawned up front
for i in 0..config.connections_per_thread() {
// per-connection clones
let message = message.clone();
let config = config.clone();
spawner
.spawn(async move {
if config.rate.is_some() {
Delay::new(i * config.connection_delay());
}
connection(message, config)
.await
.expect("Failed to run connection");
}).unwrap();
}
pool.run();
});
threads.push(thread);
}
for handle in threads {
handle.join().unwrap();
}
Ok(())
}
/// Handles a single connection
///
/// This method infinitely loops, performing a connect/write/read transaction against the
/// configured target. If `repeat` is true in `config`, the loop will keep the connection alive.
/// Otherwise, it will drop the connection after successfully completing a read, and then it will
/// start over and reconnect. If it does not successfully read, it will block until the underlying
/// TCP read fails unless `read-timeout` is configured.
///
/// This is a long-running function that will continue making calls until it hits a time or total
/// loop count limit.
///
/// todo: This ignores both read-timeout and repeat
async fn connection(mut message: ByteMutator, config: Config) -> io::Result<()> {
let start = Instant::now();
let mut count = 0;
let mut loop_complete = |config:&Config| {
count += 1;
if let Some(duration) = config.duration {
if Instant::now() >= start + duration {
return true;
}
}
if let Some(limit) = config.limit_per_connection() {
if count > limit {
return true;
}
}
false
};
let should_delay = |elapsed, config: &Config| {
match config.rate {
Some(_) => {
if elapsed < config.connection_delay() {
true
} else {
warn!("running behind; consider adding more connections");
false
}
}
None => false,
}
};
// This is the guts of the application; the tight loop that executes requests
let mut read_buffer = [0u8; 1024]; // todo variable size? :(
while!loop_complete(&config) {
// todo: add optional timeouts back
let request_start = Instant::now();
if let Ok(mut stream) = connect(&config.target).await {
// one write/read transaction per repeat
for _ in 0..config.repeat {
if write(&mut stream, message.read()).await.is_ok() {
read(&mut stream, &mut read_buffer).await.ok();
}
}
// todo: analysis
// advance mutator state (no-op with no fuzzer config)
message.next();
}
if config.rate.is_some() {
let elapsed = Instant::now() - request_start;
if should_delay(elapsed, &config) {
Delay::new(config.connection_delay() - elapsed)
.await
.unwrap();
}
}
}
Ok(())
}
/// Connects to the provided address, logs, returns Result<TcpStream, io::Error>
async fn connect(addr: &SocketAddr) -> io::Result<TcpStream> {
match TcpStream::connect(addr).await {
Ok(stream) => {
debug!("connected to {}", addr);
Ok(stream)
}
Err(e) => {
if e.kind()!= io::ErrorKind::TimedOut {
error!("unknown connect error: '{}'", e);
}
Err(e)
}
}
}
/// Writes provided buffer to the provided address, logs, returns Result<bytes_written, io::Error>
async fn write(stream: &mut TcpStream, buf: &[u8]) -> io::Result<usize> {
match stream.write_all(buf).await {
Ok(_) => {
let n = buf.len();
debug!("{} bytes written", n);
Ok(n)
}
Err(e) => {
error!("write error: '{}'", e);
Err(e)
}
}
}
/// Reads from stream, logs, returns Result<num_bytes_read, io::Error>
async fn read(stream: &mut TcpStream, mut read_buffer: &mut [u8]) -> io::Result<usize> {
match stream.read(&mut read_buffer).await {
Ok(n) => {
debug!("{} bytes read ", n);
Ok(n)
}
Err(e) => {
error!("read error: '{}'", e);
Err(e)
}
}
// todo: Do something with the read_buffer?
// todo: More verbose logging; dump to stdout, do post-run analysis on demand
}
#[cfg(test)]
mod tests {
use super::*;
use crate::server::echo_server;
#[test]
fn test_connect() {
let result = async_std::task::block_on(async {
let addr = echo_server().unwrap();
let result = connect(&addr).await;
result
});
assert!(result.is_ok());
}
#[test]
fn test_write() {
let addr = echo_server().unwrap();
let input = "test".as_bytes();
let want = input.len();
let result = async_std::task::block_on(async move {
let mut stream = connect(&addr).await?;
let bytes_written = write(&mut stream, &input).await?;
Ok::<_, io::Error>(bytes_written)
});
assert!(result.is_ok());
assert_eq!(result.unwrap(), want);
}
#[test]
fn test_read() |
}
| {
let addr = echo_server().unwrap();
let input = "test\n\r\n".as_bytes();
let want = input.len();
let result = async_std::task::block_on(async move {
let mut stream = connect(&addr).await?;
let mut read_buffer = [0u8; 1024];
let _ = write(&mut stream, &input).await?;
let bytes_read = read(&mut stream, &mut read_buffer).await?;
Ok::<_, io::Error>(bytes_read)
});
assert!(result.is_ok());
assert_eq!(want, result.unwrap());
} | identifier_body |
tcp.rs | //! # TCP handling
//!
//! This file contains the TCP handling for `clobber`. The loop here is that we connect, write,
//! and then read. If the client is in repeat mode then it will repeatedly write/read while the
//! connection is open.
//!
//! ## Performance Notes
//!
//! ### Perform allocations at startup
//!
//! The pool of connections is created up front, and then connections begin sending requests
//! to match the defined rate. (Or in the case of no defined, they start immediately.) In general
//! we try to to limit significant allocations to startup rather than doing them on the fly.
//! More specifically, you shouldn't see any of these behaviors inside the tight `while` loop
//! inside the `connection()` method.
//!
//! ### Limit open ports and files
//!
//! Two of the key limiting factors for high TCP client throughput are running out of ports, or
//! opening more files than the underlying OS will allow. `clobber` tries to minimize issues here
//! by giving users control over the max connections. (It's also a good idea to check out your
//! specific `ulimit -n` settings and raise the max number of open files.)
//!
//! #### Avoid cross-thread communication
//! This library uses no cross-thread communication via `std::sync` or `crossbeam`. All futures
//! are executed on a `LocalPool`, and the number of OS threads used is user configurable. This
//! has a number of design impacts. For example, it becomes more difficult to aggregate what each
//! connection is doing. This is simple if you just pass the results to a channel, but this has a
//! non-trivial impact on performance.
//!
//! *Note: This is currently violated by the way we accomplish rate limiting, which relies on a
//! global thread that manages timers. This ends up putting disproportionate load on that thread at
//! some point. But if you're relying on rate limiting you're trying to slow it down, so we're
//! putting this in the 'feature' column. (If anyone would like to contribute a thread-local
//! futures timer it'd be a great contribution to the Rust community!)
//!
use std::net::SocketAddr;
use std::time::Instant;
use async_std::io::{self};
use async_std::net::{TcpStream};
use async_std::prelude::*;
// I'd like to remove this dependency, but async-std doesn't currently have a LocalPool executor
// todo: Revisit
use futures::executor::LocalPool;
use futures::task::SpawnExt;
use futures_timer::Delay;
use log::{debug, error, info, warn};
use crate::{Config};
use byte_mutator::ByteMutator;
use byte_mutator::fuzz_config::FuzzConfig;
/// The overall test runner
///
/// This method contains the main core loop.
///
/// `clobber` will create `connections` number of async futures, distribute them across `threads`
/// threads (defaults to num_cpus), and each future will perform requests in a tight loop. If
/// there is a `rate` specified, there will be an optional delay to stay under the requested rate.
/// The futures are driven by a LocalPool executor, and there is no cross-thread synchronization
/// or communication with the default config. Note: for maximum performance avoid use of the
/// `rate`, `connect_timeout`, and `read_timeout` options.
///
pub fn clobber(config: Config, message: Vec<u8>) -> std::io::Result<()> {
info!("Starting: {:#?}", config);
let mut threads = Vec::with_capacity(config.num_threads() as usize);
// configure fuzzing if a file has been provided in the config
let message = match &config.fuzz_path {
None => ByteMutator::new(&message),
Some(path) => {
match FuzzConfig::from_file(&path) {
Ok(fuzz_config) => ByteMutator::new_from_config(&message, fuzz_config),
Err(e) => {
return Err(e)
},
}
},
};
for _ in 0..config.num_threads() {
// per-thread clones
let message = message.clone();
let config = config.clone();
// start OS thread which will contain a chunk of connections
let thread = std::thread::spawn(move || {
let mut pool = LocalPool::new();
let mut spawner = pool.spawner();
// all connection futures are spawned up front
for i in 0..config.connections_per_thread() {
// per-connection clones
let message = message.clone();
let config = config.clone();
spawner
.spawn(async move {
if config.rate.is_some() {
Delay::new(i * config.connection_delay());
}
connection(message, config)
.await
.expect("Failed to run connection");
}).unwrap();
}
pool.run();
});
threads.push(thread);
}
for handle in threads {
handle.join().unwrap();
}
Ok(())
}
/// Handles a single connection
///
/// This method infinitely loops, performing a connect/write/read transaction against the
/// configured target. If `repeat` is true in `config`, the loop will keep the connection alive.
/// Otherwise, it will drop the connection after successfully completing a read, and then it will
/// start over and reconnect. If it does not successfully read, it will block until the underlying
/// TCP read fails unless `read-timeout` is configured.
///
/// This is a long-running function that will continue making calls until it hits a time or total
/// loop count limit.
///
/// todo: This ignores both read-timeout and repeat
async fn connection(mut message: ByteMutator, config: Config) -> io::Result<()> {
let start = Instant::now();
let mut count = 0;
let mut loop_complete = |config:&Config| {
count += 1;
if let Some(duration) = config.duration {
if Instant::now() >= start + duration {
return true;
}
}
if let Some(limit) = config.limit_per_connection() {
if count > limit {
return true;
}
}
false
};
let should_delay = |elapsed, config: &Config| {
match config.rate {
Some(_) => {
if elapsed < config.connection_delay() {
true
} else {
warn!("running behind; consider adding more connections");
false
}
}
None => false,
}
};
// This is the guts of the application; the tight loop that executes requests
let mut read_buffer = [0u8; 1024]; // todo variable size? :(
while!loop_complete(&config) {
// todo: add optional timeouts back
let request_start = Instant::now();
if let Ok(mut stream) = connect(&config.target).await {
// one write/read transaction per repeat
for _ in 0..config.repeat {
if write(&mut stream, message.read()).await.is_ok() {
read(&mut stream, &mut read_buffer).await.ok();
}
}
// todo: analysis
// advance mutator state (no-op with no fuzzer config)
message.next();
}
if config.rate.is_some() {
let elapsed = Instant::now() - request_start;
if should_delay(elapsed, &config) {
Delay::new(config.connection_delay() - elapsed)
.await
.unwrap();
}
}
}
Ok(())
}
/// Connects to the provided address, logs, returns Result<TcpStream, io::Error>
async fn connect(addr: &SocketAddr) -> io::Result<TcpStream> {
match TcpStream::connect(addr).await {
Ok(stream) => {
debug!("connected to {}", addr);
Ok(stream)
}
Err(e) => {
if e.kind()!= io::ErrorKind::TimedOut {
error!("unknown connect error: '{}'", e);
}
Err(e)
}
}
}
/// Writes provided buffer to the provided address, logs, returns Result<bytes_written, io::Error>
async fn write(stream: &mut TcpStream, buf: &[u8]) -> io::Result<usize> {
match stream.write_all(buf).await {
Ok(_) => {
let n = buf.len();
debug!("{} bytes written", n);
Ok(n)
}
Err(e) => {
error!("write error: '{}'", e);
Err(e)
}
}
}
/// Reads from stream, logs, returns Result<num_bytes_read, io::Error>
async fn read(stream: &mut TcpStream, mut read_buffer: &mut [u8]) -> io::Result<usize> {
match stream.read(&mut read_buffer).await {
Ok(n) => {
debug!("{} bytes read ", n);
Ok(n)
}
Err(e) => {
error!("read error: '{}'", e);
Err(e)
}
}
// todo: Do something with the read_buffer?
// todo: More verbose logging; dump to stdout, do post-run analysis on demand
}
#[cfg(test)]
mod tests {
use super::*;
use crate::server::echo_server;
#[test]
fn | () {
let result = async_std::task::block_on(async {
let addr = echo_server().unwrap();
let result = connect(&addr).await;
result
});
assert!(result.is_ok());
}
#[test]
fn test_write() {
let addr = echo_server().unwrap();
let input = "test".as_bytes();
let want = input.len();
let result = async_std::task::block_on(async move {
let mut stream = connect(&addr).await?;
let bytes_written = write(&mut stream, &input).await?;
Ok::<_, io::Error>(bytes_written)
});
assert!(result.is_ok());
assert_eq!(result.unwrap(), want);
}
#[test]
fn test_read() {
let addr = echo_server().unwrap();
let input = "test\n\r\n".as_bytes();
let want = input.len();
let result = async_std::task::block_on(async move {
let mut stream = connect(&addr).await?;
let mut read_buffer = [0u8; 1024];
let _ = write(&mut stream, &input).await?;
let bytes_read = read(&mut stream, &mut read_buffer).await?;
Ok::<_, io::Error>(bytes_read)
});
assert!(result.is_ok());
assert_eq!(want, result.unwrap());
}
}
| test_connect | identifier_name |
tcp.rs | //! # TCP handling
//!
//! This file contains the TCP handling for `clobber`. The loop here is that we connect, write,
//! and then read. If the client is in repeat mode then it will repeatedly write/read while the
//! connection is open.
//!
//! ## Performance Notes
//!
//! ### Perform allocations at startup
//!
//! The pool of connections is created up front, and then connections begin sending requests
//! to match the defined rate. (Or in the case of no defined, they start immediately.) In general
//! we try to to limit significant allocations to startup rather than doing them on the fly.
//! More specifically, you shouldn't see any of these behaviors inside the tight `while` loop
//! inside the `connection()` method.
//!
//! ### Limit open ports and files
//!
//! Two of the key limiting factors for high TCP client throughput are running out of ports, or
//! opening more files than the underlying OS will allow. `clobber` tries to minimize issues here
//! by giving users control over the max connections. (It's also a good idea to check out your
//! specific `ulimit -n` settings and raise the max number of open files.)
//!
//! #### Avoid cross-thread communication
//! This library uses no cross-thread communication via `std::sync` or `crossbeam`. All futures
//! are executed on a `LocalPool`, and the number of OS threads used is user configurable. This
//! has a number of design impacts. For example, it becomes more difficult to aggregate what each
//! connection is doing. This is simple if you just pass the results to a channel, but this has a
//! non-trivial impact on performance.
//!
//! *Note: This is currently violated by the way we accomplish rate limiting, which relies on a
//! global thread that manages timers. This ends up putting disproportionate load on that thread at
//! some point. But if you're relying on rate limiting you're trying to slow it down, so we're
//! putting this in the 'feature' column. (If anyone would like to contribute a thread-local
//! futures timer it'd be a great contribution to the Rust community!)
//!
use std::net::SocketAddr;
use std::time::Instant;
use async_std::io::{self};
use async_std::net::{TcpStream};
use async_std::prelude::*;
// I'd like to remove this dependency, but async-std doesn't currently have a LocalPool executor
// todo: Revisit
use futures::executor::LocalPool;
use futures::task::SpawnExt;
use futures_timer::Delay;
use log::{debug, error, info, warn};
use crate::{Config};
use byte_mutator::ByteMutator;
use byte_mutator::fuzz_config::FuzzConfig;
/// The overall test runner
///
/// This method contains the main core loop.
///
/// `clobber` will create `connections` number of async futures, distribute them across `threads`
/// threads (defaults to num_cpus), and each future will perform requests in a tight loop. If
/// there is a `rate` specified, there will be an optional delay to stay under the requested rate.
/// The futures are driven by a LocalPool executor, and there is no cross-thread synchronization
/// or communication with the default config. Note: for maximum performance avoid use of the
/// `rate`, `connect_timeout`, and `read_timeout` options.
///
pub fn clobber(config: Config, message: Vec<u8>) -> std::io::Result<()> {
info!("Starting: {:#?}", config);
let mut threads = Vec::with_capacity(config.num_threads() as usize);
// configure fuzzing if a file has been provided in the config
let message = match &config.fuzz_path {
None => ByteMutator::new(&message),
Some(path) => {
match FuzzConfig::from_file(&path) {
Ok(fuzz_config) => ByteMutator::new_from_config(&message, fuzz_config),
Err(e) => {
return Err(e)
},
}
},
};
for _ in 0..config.num_threads() {
// per-thread clones
let message = message.clone();
let config = config.clone();
// start OS thread which will contain a chunk of connections
let thread = std::thread::spawn(move || {
let mut pool = LocalPool::new();
let mut spawner = pool.spawner();
// all connection futures are spawned up front
for i in 0..config.connections_per_thread() {
// per-connection clones
let message = message.clone();
let config = config.clone();
spawner
.spawn(async move {
if config.rate.is_some() {
Delay::new(i * config.connection_delay());
}
connection(message, config)
.await
.expect("Failed to run connection");
}).unwrap();
}
pool.run();
});
threads.push(thread);
}
for handle in threads {
handle.join().unwrap();
}
Ok(())
}
/// Handles a single connection
///
/// This method infinitely loops, performing a connect/write/read transaction against the
/// configured target. If `repeat` is true in `config`, the loop will keep the connection alive.
/// Otherwise, it will drop the connection after successfully completing a read, and then it will
/// start over and reconnect. If it does not successfully read, it will block until the underlying
/// TCP read fails unless `read-timeout` is configured.
///
/// This is a long-running function that will continue making calls until it hits a time or total
/// loop count limit.
///
/// todo: This ignores both read-timeout and repeat
async fn connection(mut message: ByteMutator, config: Config) -> io::Result<()> {
let start = Instant::now();
let mut count = 0;
let mut loop_complete = |config:&Config| {
count += 1;
if let Some(duration) = config.duration {
if Instant::now() >= start + duration {
return true;
}
}
if let Some(limit) = config.limit_per_connection() {
if count > limit {
return true;
}
}
false
};
let should_delay = |elapsed, config: &Config| {
match config.rate {
Some(_) => {
if elapsed < config.connection_delay() {
true
} else {
warn!("running behind; consider adding more connections");
false
}
}
None => false,
}
};
// This is the guts of the application; the tight loop that executes requests
let mut read_buffer = [0u8; 1024]; // todo variable size? :(
while!loop_complete(&config) {
// todo: add optional timeouts back
let request_start = Instant::now();
if let Ok(mut stream) = connect(&config.target).await {
// one write/read transaction per repeat
for _ in 0..config.repeat {
if write(&mut stream, message.read()).await.is_ok() {
read(&mut stream, &mut read_buffer).await.ok();
}
}
// todo: analysis
// advance mutator state (no-op with no fuzzer config)
message.next();
}
if config.rate.is_some() {
let elapsed = Instant::now() - request_start;
if should_delay(elapsed, &config) {
Delay::new(config.connection_delay() - elapsed)
.await
.unwrap();
}
}
}
Ok(())
}
/// Connects to the provided address, logs, returns Result<TcpStream, io::Error>
async fn connect(addr: &SocketAddr) -> io::Result<TcpStream> {
match TcpStream::connect(addr).await {
Ok(stream) => {
debug!("connected to {}", addr);
Ok(stream)
}
Err(e) => {
if e.kind()!= io::ErrorKind::TimedOut {
error!("unknown connect error: '{}'", e);
}
Err(e)
}
}
}
/// Writes provided buffer to the provided address, logs, returns Result<bytes_written, io::Error>
async fn write(stream: &mut TcpStream, buf: &[u8]) -> io::Result<usize> {
match stream.write_all(buf).await {
Ok(_) => {
let n = buf.len();
debug!("{} bytes written", n);
Ok(n)
}
Err(e) => {
error!("write error: '{}'", e);
Err(e)
}
}
}
/// Reads from stream, logs, returns Result<num_bytes_read, io::Error>
async fn read(stream: &mut TcpStream, mut read_buffer: &mut [u8]) -> io::Result<usize> {
match stream.read(&mut read_buffer).await {
Ok(n) => {
debug!("{} bytes read ", n);
Ok(n)
}
Err(e) => {
error!("read error: '{}'", e);
Err(e)
}
}
| }
#[cfg(test)]
mod tests {
use super::*;
use crate::server::echo_server;
#[test]
fn test_connect() {
let result = async_std::task::block_on(async {
let addr = echo_server().unwrap();
let result = connect(&addr).await;
result
});
assert!(result.is_ok());
}
#[test]
fn test_write() {
let addr = echo_server().unwrap();
let input = "test".as_bytes();
let want = input.len();
let result = async_std::task::block_on(async move {
let mut stream = connect(&addr).await?;
let bytes_written = write(&mut stream, &input).await?;
Ok::<_, io::Error>(bytes_written)
});
assert!(result.is_ok());
assert_eq!(result.unwrap(), want);
}
#[test]
fn test_read() {
let addr = echo_server().unwrap();
let input = "test\n\r\n".as_bytes();
let want = input.len();
let result = async_std::task::block_on(async move {
let mut stream = connect(&addr).await?;
let mut read_buffer = [0u8; 1024];
let _ = write(&mut stream, &input).await?;
let bytes_read = read(&mut stream, &mut read_buffer).await?;
Ok::<_, io::Error>(bytes_read)
});
assert!(result.is_ok());
assert_eq!(want, result.unwrap());
}
} | // todo: Do something with the read_buffer?
// todo: More verbose logging; dump to stdout, do post-run analysis on demand | random_line_split |
epoll_file.rs | ::epoll_waiter::EpollWaiter;
use super::host_file_epoller::HostFileEpoller;
use super::{EpollCtl, EpollEvent, EpollFlags};
use crate::events::{Observer, Waiter, WaiterQueue};
use crate::fs::{
AtomicIoEvents, File, FileTableEvent, FileTableNotifier, HostFd, IoEvents, IoNotifier,
};
use crate::prelude::*;
// TODO: Prevent two epoll files from monitoring each other, which may cause
// deadlock in the current implementation.
// TODO: Fix unreliable EpollFiles after process spawning. EpollFile is connected
// with the current process's file table by regitering itself as an observer
// to the file table. But if an EpollFile is cloned or inherited by a child
// process, then this EpollFile still has connection with the parent process's
// file table, which is problematic.
/// A file that provides epoll API.
///
/// Conceptually, we maintain two lists: one consists of all interesting files,
/// which can be managed by the epoll ctl commands; the other are for ready files,
/// which are files that have some events. A epoll wait only needs to iterate the
/// ready list and poll each file to see if the file is ready for the interesting
/// I/O.
///
/// To maintain the ready list, we need to monitor interesting events that happen
/// on the files. To do so, the `EpollFile` registers itself as an `Observer` to
/// the `IoNotifier`s of the monotored files. Thus, we can add a file to the ready
/// list when an event happens on the file.
///
/// LibOS files are easy to monitor. LibOS files are implemented by us. We know
/// exactly when an event happens and thus can broadcast it using `IoNotifier`.
///
/// Unlike LibOS files, host files are implemented by the host OS. We have no way
/// to let the host OS _push_ events to us. Luckily, we can do the reverse: _poll_
/// host files to check events. And there is a good timing for it; that is, at
/// every epoll wait call. We have made a helper called `HostFileEpoller`, which can
/// poll events on a set of host files and trigger their associated `Notifier`s to
/// broadcast their events, e.g., to `EpollFile`.
///
/// This way, both LibOS files and host files can notify the `EpollFile` about
/// their events.
pub struct EpollFile {
// All interesting entries.
interest: SgxMutex<HashMap<FileDesc, Arc<EpollEntry>>>,
// Entries that are probably ready (having events happened).
ready: SgxMutex<VecDeque<Arc<EpollEntry>>>,
// All threads that are waiting on this epoll file.
waiters: WaiterQueue,
// A notifier to broadcast events on this epoll file.
notifier: IoNotifier,
// A helper to poll the events on the interesting host files.
host_file_epoller: HostFileEpoller,
// Any EpollFile is wrapped with Arc when created.
weak_self: Weak<Self>,
// Host events
host_events: Atomic<IoEvents>,
}
impl EpollFile {
pub fn new() -> Arc<Self> {
let interest = Default::default();
let ready = Default::default();
let waiters = WaiterQueue::new();
let notifier = IoNotifier::new();
let host_file_epoller = HostFileEpoller::new();
let weak_self = Default::default();
let host_events = Atomic::new(IoEvents::empty());
let arc_self = Self {
interest,
ready,
waiters,
notifier,
host_file_epoller,
weak_self,
host_events,
}
.wrap_self();
arc_self.register_to_file_table();
arc_self
}
fn wrap_self(self) -> Arc<Self> {
let mut strong_self = Arc::new(self);
let weak_self = Arc::downgrade(&strong_self);
unsafe {
let ptr_self = Arc::into_raw(strong_self) as *mut Self;
(*ptr_self).weak_self = weak_self;
strong_self = Arc::from_raw(ptr_self);
}
strong_self
}
fn register_to_file_table(&self) {
let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>;
let thread = current!();
let file_table = thread.files().lock().unwrap();
file_table.notifier().register(weak_observer, None, None);
}
fn unregister_from_file_table(&self) {
let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>;
let thread = current!();
let file_table = thread.files().lock().unwrap();
file_table.notifier().unregister(&weak_observer);
}
pub fn control(&self, cmd: &EpollCtl) -> Result<()> {
debug!("epoll control: cmd = {:?}", cmd);
match cmd {
EpollCtl::Add(fd, event, flags) => {
self.add_interest(*fd, *event, *flags)?;
}
EpollCtl::Del(fd) => {
self.del_interest(*fd)?;
}
EpollCtl::Mod(fd, event, flags) => {
self.mod_interest(*fd, *event, *flags)?;
}
}
Ok(())
}
pub fn wait(
&self,
revents: &mut [MaybeUninit<EpollEvent>],
timeout: Option<&Duration>,
) -> Result<usize> {
debug!("epoll wait: timeout = {:?}", timeout);
let mut timeout = timeout.cloned();
let max_count = revents.len();
let mut reinsert = VecDeque::with_capacity(max_count);
let waiter = EpollWaiter::new(&self.host_file_epoller);
loop {
// Poll the latest states of the interested host files. If a host
// file is ready, then it will be pushed into the ready list. Note
// that this is the only way through which a host file can appear in
// the ready list. This ensures that only the host files whose
// events are update-to-date will be returned, reducing the chances
// of false positive results to the minimum.
self.host_file_epoller.poll_events(max_count);
// Prepare for the waiter.wait_mut() at the end of the loop
self.waiters.reset_and_enqueue(waiter.as_ref());
// Pop from the ready list to find as many results as possible
let mut count = 0;
while count < max_count {
// Pop some entries from the ready list
let mut ready_entries = self.pop_ready(max_count - count);
if ready_entries.len() == 0 {
break;
}
// Note that while iterating the ready entries, we do not hold the lock
// of the ready list. This reduces the chances of lock contention.
for ep_entry in ready_entries.into_iter() {
if ep_entry.is_deleted.load(Ordering::Acquire) {
continue;
}
// Poll the file that corresponds to the entry
let mut inner = ep_entry.inner.lock().unwrap();
let mask = inner.event.mask();
let file = &ep_entry.file;
let events = file.poll_new() & mask;
if events.is_empty() {
continue;
}
// We find a ready file!
let mut revent = inner.event;
revent.mask = events;
revents[count].write(revent);
count += 1;
// Behave differently according the epoll flags
if inner.flags.contains(EpollFlags::ONE_SHOT) {
inner.event.mask = IoEvents::empty();
}
if!inner
.flags
.intersects(EpollFlags::EDGE_TRIGGER | EpollFlags::ONE_SHOT)
{
drop(inner);
// Host files should not be reinserted into the ready list
if ep_entry.file.host_fd().is_none() {
reinsert.push_back(ep_entry);
}
}
}
}
// If any results, we can return
if count > 0 {
// Push the entries that are still ready after polling back to the ready list
if reinsert.len() > 0 {
self.push_ready_iter(reinsert.into_iter());
}
return Ok(count);
}
// Wait for a while to try again later.
let ret = waiter.wait_mut(timeout.as_mut());
if let Err(e) = ret {
if e.errno() == ETIMEDOUT {
return Ok(0);
} else {
return Err(e);
}
}
// This means we have been waken up successfully. Let's try again.
}
}
fn add_interest(&self, fd: FileDesc, mut event: EpollEvent, flags: EpollFlags) -> Result<()> {
let file = current!().file(fd)?;
let arc_self = self.weak_self.upgrade().unwrap();
if Arc::ptr_eq(&(arc_self as Arc<dyn File>), &file) {
return_errno!(EINVAL, "a epoll file cannot epoll itself");
}
self.check_flags(&flags);
self.prepare_event(&mut event);
let ep_entry = Arc::new(EpollEntry::new(fd, file, event, flags));
// A critical section protected by the lock of self.interest
{
let notifier = ep_entry
.file
.notifier()
.ok_or_else(|| errno!(EINVAL, "a file must has an associated notifier"))?;
let mut interest_entries = self.interest.lock().unwrap();
if interest_entries.get(&fd).is_some() {
return_errno!(EEXIST, "fd is already registered");
}
interest_entries.insert(fd, ep_entry.clone());
// Start observing events on the target file.
let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>;
let weak_ep_entry = Arc::downgrade(&ep_entry);
notifier.register(weak_observer, Some(IoEvents::all()), Some(weak_ep_entry));
// Handle host file
if ep_entry.file.host_fd().is_some() {
self.host_file_epoller
.add_file(ep_entry.file.clone(), event, flags);
return Ok(());
}
}
self.push_ready(ep_entry);
Ok(())
}
fn del_interest(&self, fd: FileDesc) -> Result<()> {
// A critical section protected by the lock of self.interest
{
let mut interest_entries = self.interest.lock().unwrap();
let ep_entry = interest_entries
.remove(&fd)
.ok_or_else(|| errno!(ENOENT, "fd is not added"))?;
ep_entry.is_deleted.store(true, Ordering::Release);
let notifier = ep_entry.file.notifier().unwrap();
let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>;
notifier.unregister(&weak_observer);
if ep_entry.file.host_fd().is_some() {
self.host_file_epoller.del_file(&ep_entry.file);
}
}
Ok(())
}
fn mod_interest(&self, fd: FileDesc, mut event: EpollEvent, flags: EpollFlags) -> Result<()> {
self.check_flags(&flags);
self.prepare_event(&mut event);
// A critical section protected by the lock of self.interest
let ep_entry = {
let mut interest_entries = self.interest.lock().unwrap();
let ep_entry = interest_entries
.get(&fd)
.ok_or_else(|| errno!(ENOENT, "fd is not added"))?
.clone();
let new_ep_inner = EpollEntryInner { event, flags };
let mut old_ep_inner = ep_entry.inner.lock().unwrap();
*old_ep_inner = new_ep_inner;
drop(old_ep_inner);
if ep_entry.file.host_fd().is_some() {
self.host_file_epoller
.mod_file(&ep_entry.file, event, flags);
return Ok(());
}
ep_entry
};
self.push_ready(ep_entry);
Ok(())
}
fn push_ready(&self, ep_entry: Arc<EpollEntry>) {
// Fast path to avoid locking
if ep_entry.is_ready.load(Ordering::Relaxed) {
// Concurrency note:
// What if right after returning a true value of `is_ready`, then the `EpollEntry` is
// popped from the ready list? Does it mean than we miss an interesting event?
//
// The answer is NO. If the `is_ready` field of an `EpollEntry` turns from `true` to
// `false`, then the `EpollEntry` must be popped out of the ready list and its
// corresponding file must be polled in the `wait` method. This means that we have
// taken into account any interesting events happened on the file so far.
return;
}
self.push_ready_iter(std::iter::once(ep_entry));
}
fn push_ready_iter<I: Iterator<Item = Arc<EpollEntry>>>(&self, ep_entries: I) {
let mut has_pushed_any = false;
// A critical section protected by self.ready.lock()
{
let mut ready_entries = self.ready.lock().unwrap();
for ep_entry in ep_entries {
if ep_entry.is_ready.load(Ordering::Relaxed) {
continue;
}
ep_entry.is_ready.store(true, Ordering::Relaxed);
ready_entries.push_back(ep_entry);
has_pushed_any = true;
}
}
if has_pushed_any {
self.mark_ready();
}
}
fn pop_ready(&self, max_count: usize) -> VecDeque<Arc<EpollEntry>> {
// A critical section protected by self.ready.lock()
{
let mut ready_entries = self.ready.lock().unwrap();
let max_count = max_count.min(ready_entries.len());
ready_entries
.drain(..max_count)
.map(|ep_entry| {
ep_entry.is_ready.store(false, Ordering::Relaxed);
ep_entry
})
.collect::<VecDeque<Arc<EpollEntry>>>()
}
}
fn mark_ready(&self) {
self.notifier.broadcast(&IoEvents::IN);
self.waiters.dequeue_and_wake_all();
}
fn check_flags(&self, flags: &EpollFlags) {
if flags.intersects(EpollFlags::EXCLUSIVE | EpollFlags::WAKE_UP) {
warn!("{:?} contains unsupported flags", flags);
}
}
fn prepare_event(&self, event: &mut EpollEvent) {
// Add two events that are reported by default
event.mask |= (IoEvents::ERR | IoEvents::HUP);
}
}
impl File for EpollFile {
fn poll_new(&self) -> IoEvents {
if self
.host_events
.load(Ordering::Acquire)
.contains(IoEvents::IN)
{
return IoEvents::IN;
}
let ready_entries = self.ready.lock().unwrap();
if!ready_entries.is_empty() {
return IoEvents::IN;
}
IoEvents::empty()
}
fn notifier(&self) -> Option<&IoNotifier> {
Some(&self.notifier)
}
fn host_fd(&self) -> Option<&HostFd> |
fn update_host_events(&self, ready: &IoEvents, mask: &IoEvents, trigger_notifier: bool) {
self.host_events.update(ready, mask, Ordering::Release);
if trigger_notifier {
self.notifier.broadcast(ready);
}
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl Drop for EpollFile {
fn drop(&mut self) {
// Do not try to `self.weak_self.upgrade()`! The Arc object must have been
// dropped at this point.
let self_observer = self.weak_self.clone() as Weak<dyn Observer<IoEvents>>;
// Unregister ourself from all interesting files' notifiers
let mut interest_entries = self.interest.lock().unwrap();
interest_entries.drain().for_each(|(_, ep_entry)| {
if let Some(notifier) = ep_entry.file.notifier() {
notifier.unregister(&self_observer);
}
});
self.unregister_from_file_table();
}
}
impl Observer<IoEvents> for EpollFile {
fn on_event(&self, _events: &IoEvents, metadata: &Option<Weak<dyn Any + Send + Sync>>) {
let ep_entry_opt = metadata
.as_ref()
.and_then(|weak_any| weak_any.upgrade())
.and_then(|strong_any| strong_any.downcast().ok());
let ep_entry: Arc<EpollEntry> = match ep_entry_opt {
None => return,
Some(ep_entry) => ep_entry,
};
self.push_ready(ep_entry);
}
}
impl Observer<FileTableEvent> for EpollFile {
fn on_event(&self, event: &FileTableEvent, _metadata: &Option<Weak<dyn Any + Send + Sync>>) {
let FileTableEvent::Del(fd) = event;
let _ = self.del_interest(*fd);
}
}
impl fmt::Debug for EpollFile {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("EpollFile")
.field("interest", &self.interest.lock().unwrap())
.field("ready", &self.ready.lock().unwrap())
.finish()
}
}
pub trait AsEpollFile {
fn as_epoll_file(&self) -> Result<&EpollFile>;
}
impl AsEpollFile for FileRef {
fn as_epoll_file(&self) -> Result<&EpollFile> {
self.as_any()
.downcast_ref::<EpollFile>()
.ok_or_else(|| errno!(EBADF, "not an epoll file"))
}
}
#[derive(Debug)]
struct EpollEntry {
fd: FileDesc,
file: FileRef,
inner: SgxMutex<EpollEntryInner>,
// Whether the entry is in the ready list
is_ready: AtomicBool,
// Whether the entry has been deleted from the interest list
is_deleted: AtomicBool,
}
impl EpollEntry {
pub fn new(fd: FileDesc, file: FileRef, event: EpollEvent, flags: EpollFlags) -> Self {
let is_ready = Default::default();
| {
Some(self.host_file_epoller.host_fd())
} | identifier_body |
epoll_file.rs | ::epoll_waiter::EpollWaiter;
use super::host_file_epoller::HostFileEpoller;
use super::{EpollCtl, EpollEvent, EpollFlags};
use crate::events::{Observer, Waiter, WaiterQueue};
use crate::fs::{
AtomicIoEvents, File, FileTableEvent, FileTableNotifier, HostFd, IoEvents, IoNotifier,
};
use crate::prelude::*;
// TODO: Prevent two epoll files from monitoring each other, which may cause
// deadlock in the current implementation.
// TODO: Fix unreliable EpollFiles after process spawning. EpollFile is connected
// with the current process's file table by regitering itself as an observer
// to the file table. But if an EpollFile is cloned or inherited by a child
// process, then this EpollFile still has connection with the parent process's
// file table, which is problematic.
/// A file that provides epoll API.
///
/// Conceptually, we maintain two lists: one consists of all interesting files,
/// which can be managed by the epoll ctl commands; the other are for ready files,
/// which are files that have some events. A epoll wait only needs to iterate the
/// ready list and poll each file to see if the file is ready for the interesting
/// I/O.
///
/// To maintain the ready list, we need to monitor interesting events that happen
/// on the files. To do so, the `EpollFile` registers itself as an `Observer` to
/// the `IoNotifier`s of the monotored files. Thus, we can add a file to the ready
/// list when an event happens on the file.
///
/// LibOS files are easy to monitor. LibOS files are implemented by us. We know
/// exactly when an event happens and thus can broadcast it using `IoNotifier`.
///
/// Unlike LibOS files, host files are implemented by the host OS. We have no way
/// to let the host OS _push_ events to us. Luckily, we can do the reverse: _poll_
/// host files to check events. And there is a good timing for it; that is, at
/// every epoll wait call. We have made a helper called `HostFileEpoller`, which can
/// poll events on a set of host files and trigger their associated `Notifier`s to
/// broadcast their events, e.g., to `EpollFile`.
///
/// This way, both LibOS files and host files can notify the `EpollFile` about
/// their events.
pub struct EpollFile {
// All interesting entries.
interest: SgxMutex<HashMap<FileDesc, Arc<EpollEntry>>>,
// Entries that are probably ready (having events happened).
ready: SgxMutex<VecDeque<Arc<EpollEntry>>>,
// All threads that are waiting on this epoll file.
waiters: WaiterQueue,
// A notifier to broadcast events on this epoll file.
notifier: IoNotifier,
// A helper to poll the events on the interesting host files.
host_file_epoller: HostFileEpoller,
// Any EpollFile is wrapped with Arc when created.
weak_self: Weak<Self>,
// Host events
host_events: Atomic<IoEvents>,
}
impl EpollFile {
pub fn new() -> Arc<Self> {
let interest = Default::default();
let ready = Default::default();
let waiters = WaiterQueue::new();
let notifier = IoNotifier::new();
let host_file_epoller = HostFileEpoller::new();
let weak_self = Default::default();
let host_events = Atomic::new(IoEvents::empty());
let arc_self = Self {
interest,
ready,
waiters,
notifier,
host_file_epoller,
weak_self,
host_events,
}
.wrap_self();
arc_self.register_to_file_table();
arc_self
}
fn wrap_self(self) -> Arc<Self> {
let mut strong_self = Arc::new(self);
let weak_self = Arc::downgrade(&strong_self);
unsafe {
let ptr_self = Arc::into_raw(strong_self) as *mut Self;
(*ptr_self).weak_self = weak_self;
strong_self = Arc::from_raw(ptr_self);
}
strong_self
}
fn register_to_file_table(&self) {
let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>;
let thread = current!();
let file_table = thread.files().lock().unwrap();
file_table.notifier().register(weak_observer, None, None);
}
fn unregister_from_file_table(&self) {
let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>;
let thread = current!();
let file_table = thread.files().lock().unwrap();
file_table.notifier().unregister(&weak_observer);
}
pub fn control(&self, cmd: &EpollCtl) -> Result<()> {
debug!("epoll control: cmd = {:?}", cmd);
match cmd {
EpollCtl::Add(fd, event, flags) => {
self.add_interest(*fd, *event, *flags)?;
}
EpollCtl::Del(fd) => {
self.del_interest(*fd)?;
}
EpollCtl::Mod(fd, event, flags) => {
self.mod_interest(*fd, *event, *flags)?;
}
}
Ok(())
}
pub fn wait(
&self,
revents: &mut [MaybeUninit<EpollEvent>],
timeout: Option<&Duration>,
) -> Result<usize> {
debug!("epoll wait: timeout = {:?}", timeout);
let mut timeout = timeout.cloned();
let max_count = revents.len();
let mut reinsert = VecDeque::with_capacity(max_count);
let waiter = EpollWaiter::new(&self.host_file_epoller);
loop {
// Poll the latest states of the interested host files. If a host
// file is ready, then it will be pushed into the ready list. Note
// that this is the only way through which a host file can appear in
// the ready list. This ensures that only the host files whose
// events are update-to-date will be returned, reducing the chances
// of false positive results to the minimum.
self.host_file_epoller.poll_events(max_count);
// Prepare for the waiter.wait_mut() at the end of the loop
self.waiters.reset_and_enqueue(waiter.as_ref());
// Pop from the ready list to find as many results as possible
let mut count = 0;
while count < max_count {
// Pop some entries from the ready list
let mut ready_entries = self.pop_ready(max_count - count);
if ready_entries.len() == 0 {
break;
}
// Note that while iterating the ready entries, we do not hold the lock
// of the ready list. This reduces the chances of lock contention.
for ep_entry in ready_entries.into_iter() {
if ep_entry.is_deleted.load(Ordering::Acquire) {
continue;
}
// Poll the file that corresponds to the entry
let mut inner = ep_entry.inner.lock().unwrap();
let mask = inner.event.mask();
let file = &ep_entry.file;
let events = file.poll_new() & mask;
if events.is_empty() {
continue;
}
// We find a ready file!
let mut revent = inner.event;
revent.mask = events;
revents[count].write(revent);
count += 1;
// Behave differently according the epoll flags
if inner.flags.contains(EpollFlags::ONE_SHOT) {
inner.event.mask = IoEvents::empty();
}
if!inner
.flags
.intersects(EpollFlags::EDGE_TRIGGER | EpollFlags::ONE_SHOT)
{
drop(inner);
// Host files should not be reinserted into the ready list
if ep_entry.file.host_fd().is_none() {
reinsert.push_back(ep_entry);
}
}
}
}
// If any results, we can return
if count > 0 {
// Push the entries that are still ready after polling back to the ready list
if reinsert.len() > 0 {
self.push_ready_iter(reinsert.into_iter());
}
return Ok(count);
}
// Wait for a while to try again later.
let ret = waiter.wait_mut(timeout.as_mut());
if let Err(e) = ret {
if e.errno() == ETIMEDOUT {
return Ok(0);
} else {
return Err(e);
}
}
// This means we have been waken up successfully. Let's try again.
}
}
fn add_interest(&self, fd: FileDesc, mut event: EpollEvent, flags: EpollFlags) -> Result<()> {
let file = current!().file(fd)?;
let arc_self = self.weak_self.upgrade().unwrap();
if Arc::ptr_eq(&(arc_self as Arc<dyn File>), &file) {
return_errno!(EINVAL, "a epoll file cannot epoll itself");
}
self.check_flags(&flags);
self.prepare_event(&mut event);
let ep_entry = Arc::new(EpollEntry::new(fd, file, event, flags));
// A critical section protected by the lock of self.interest
{
let notifier = ep_entry
.file
.notifier()
.ok_or_else(|| errno!(EINVAL, "a file must has an associated notifier"))?;
let mut interest_entries = self.interest.lock().unwrap();
if interest_entries.get(&fd).is_some() {
return_errno!(EEXIST, "fd is already registered");
}
interest_entries.insert(fd, ep_entry.clone());
// Start observing events on the target file.
let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>;
let weak_ep_entry = Arc::downgrade(&ep_entry);
notifier.register(weak_observer, Some(IoEvents::all()), Some(weak_ep_entry));
// Handle host file
if ep_entry.file.host_fd().is_some() {
self.host_file_epoller
.add_file(ep_entry.file.clone(), event, flags);
return Ok(());
}
}
self.push_ready(ep_entry);
Ok(())
}
fn del_interest(&self, fd: FileDesc) -> Result<()> {
// A critical section protected by the lock of self.interest
{
let mut interest_entries = self.interest.lock().unwrap();
let ep_entry = interest_entries
.remove(&fd)
.ok_or_else(|| errno!(ENOENT, "fd is not added"))?;
ep_entry.is_deleted.store(true, Ordering::Release);
let notifier = ep_entry.file.notifier().unwrap();
let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>;
notifier.unregister(&weak_observer);
if ep_entry.file.host_fd().is_some() {
self.host_file_epoller.del_file(&ep_entry.file);
}
}
Ok(())
}
fn mod_interest(&self, fd: FileDesc, mut event: EpollEvent, flags: EpollFlags) -> Result<()> {
self.check_flags(&flags);
self.prepare_event(&mut event);
// A critical section protected by the lock of self.interest
let ep_entry = {
let mut interest_entries = self.interest.lock().unwrap();
let ep_entry = interest_entries
.get(&fd)
.ok_or_else(|| errno!(ENOENT, "fd is not added"))?
.clone();
let new_ep_inner = EpollEntryInner { event, flags };
let mut old_ep_inner = ep_entry.inner.lock().unwrap();
*old_ep_inner = new_ep_inner;
drop(old_ep_inner);
if ep_entry.file.host_fd().is_some() {
self.host_file_epoller
.mod_file(&ep_entry.file, event, flags);
return Ok(());
}
ep_entry
};
self.push_ready(ep_entry);
Ok(())
}
fn push_ready(&self, ep_entry: Arc<EpollEntry>) {
// Fast path to avoid locking
if ep_entry.is_ready.load(Ordering::Relaxed) {
// Concurrency note:
// What if right after returning a true value of `is_ready`, then the `EpollEntry` is
// popped from the ready list? Does it mean than we miss an interesting event?
//
// The answer is NO. If the `is_ready` field of an `EpollEntry` turns from `true` to
// `false`, then the `EpollEntry` must be popped out of the ready list and its
// corresponding file must be polled in the `wait` method. This means that we have
// taken into account any interesting events happened on the file so far.
return;
}
self.push_ready_iter(std::iter::once(ep_entry));
}
fn push_ready_iter<I: Iterator<Item = Arc<EpollEntry>>>(&self, ep_entries: I) {
let mut has_pushed_any = false;
// A critical section protected by self.ready.lock()
{
let mut ready_entries = self.ready.lock().unwrap();
for ep_entry in ep_entries {
if ep_entry.is_ready.load(Ordering::Relaxed) {
continue;
}
ep_entry.is_ready.store(true, Ordering::Relaxed);
ready_entries.push_back(ep_entry);
has_pushed_any = true;
}
}
if has_pushed_any {
self.mark_ready();
}
}
fn pop_ready(&self, max_count: usize) -> VecDeque<Arc<EpollEntry>> {
// A critical section protected by self.ready.lock()
{
let mut ready_entries = self.ready.lock().unwrap();
let max_count = max_count.min(ready_entries.len());
ready_entries
.drain(..max_count)
.map(|ep_entry| {
ep_entry.is_ready.store(false, Ordering::Relaxed);
ep_entry
})
.collect::<VecDeque<Arc<EpollEntry>>>()
}
}
fn mark_ready(&self) {
self.notifier.broadcast(&IoEvents::IN);
self.waiters.dequeue_and_wake_all();
}
fn check_flags(&self, flags: &EpollFlags) {
if flags.intersects(EpollFlags::EXCLUSIVE | EpollFlags::WAKE_UP) {
warn!("{:?} contains unsupported flags", flags);
}
}
fn prepare_event(&self, event: &mut EpollEvent) {
// Add two events that are reported by default
event.mask |= (IoEvents::ERR | IoEvents::HUP);
}
}
impl File for EpollFile {
fn poll_new(&self) -> IoEvents {
if self
.host_events
.load(Ordering::Acquire)
.contains(IoEvents::IN)
{
return IoEvents::IN;
}
let ready_entries = self.ready.lock().unwrap();
if!ready_entries.is_empty() {
return IoEvents::IN;
}
IoEvents::empty()
}
fn notifier(&self) -> Option<&IoNotifier> {
Some(&self.notifier)
}
fn host_fd(&self) -> Option<&HostFd> {
Some(self.host_file_epoller.host_fd())
}
fn update_host_events(&self, ready: &IoEvents, mask: &IoEvents, trigger_notifier: bool) {
self.host_events.update(ready, mask, Ordering::Release);
if trigger_notifier {
self.notifier.broadcast(ready);
}
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl Drop for EpollFile {
fn drop(&mut self) {
// Do not try to `self.weak_self.upgrade()`! The Arc object must have been
// dropped at this point.
let self_observer = self.weak_self.clone() as Weak<dyn Observer<IoEvents>>;
// Unregister ourself from all interesting files' notifiers
let mut interest_entries = self.interest.lock().unwrap();
interest_entries.drain().for_each(|(_, ep_entry)| {
if let Some(notifier) = ep_entry.file.notifier() {
notifier.unregister(&self_observer);
}
});
self.unregister_from_file_table();
}
}
impl Observer<IoEvents> for EpollFile {
fn on_event(&self, _events: &IoEvents, metadata: &Option<Weak<dyn Any + Send + Sync>>) {
let ep_entry_opt = metadata
.as_ref()
.and_then(|weak_any| weak_any.upgrade())
.and_then(|strong_any| strong_any.downcast().ok());
let ep_entry: Arc<EpollEntry> = match ep_entry_opt {
None => return,
Some(ep_entry) => ep_entry,
};
self.push_ready(ep_entry);
}
}
impl Observer<FileTableEvent> for EpollFile {
fn on_event(&self, event: &FileTableEvent, _metadata: &Option<Weak<dyn Any + Send + Sync>>) {
let FileTableEvent::Del(fd) = event;
let _ = self.del_interest(*fd);
}
}
impl fmt::Debug for EpollFile {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("EpollFile")
.field("interest", &self.interest.lock().unwrap())
.field("ready", &self.ready.lock().unwrap())
.finish()
}
}
pub trait AsEpollFile {
fn as_epoll_file(&self) -> Result<&EpollFile>;
}
impl AsEpollFile for FileRef {
fn | (&self) -> Result<&EpollFile> {
self.as_any()
.downcast_ref::<EpollFile>()
.ok_or_else(|| errno!(EBADF, "not an epoll file"))
}
}
#[derive(Debug)]
struct EpollEntry {
fd: FileDesc,
file: FileRef,
inner: SgxMutex<EpollEntryInner>,
// Whether the entry is in the ready list
is_ready: AtomicBool,
// Whether the entry has been deleted from the interest list
is_deleted: AtomicBool,
}
impl EpollEntry {
pub fn new(fd: FileDesc, file: FileRef, event: EpollEvent, flags: EpollFlags) -> Self {
let is_ready = Default::default();
| as_epoll_file | identifier_name |
epoll_file.rs | ::epoll_waiter::EpollWaiter;
use super::host_file_epoller::HostFileEpoller;
use super::{EpollCtl, EpollEvent, EpollFlags};
use crate::events::{Observer, Waiter, WaiterQueue};
use crate::fs::{
AtomicIoEvents, File, FileTableEvent, FileTableNotifier, HostFd, IoEvents, IoNotifier,
};
use crate::prelude::*;
// TODO: Prevent two epoll files from monitoring each other, which may cause
// deadlock in the current implementation.
// TODO: Fix unreliable EpollFiles after process spawning. EpollFile is connected
// with the current process's file table by regitering itself as an observer
// to the file table. But if an EpollFile is cloned or inherited by a child
// process, then this EpollFile still has connection with the parent process's
// file table, which is problematic.
/// A file that provides epoll API.
///
/// Conceptually, we maintain two lists: one consists of all interesting files,
/// which can be managed by the epoll ctl commands; the other are for ready files,
/// which are files that have some events. A epoll wait only needs to iterate the
/// ready list and poll each file to see if the file is ready for the interesting
/// I/O.
///
/// To maintain the ready list, we need to monitor interesting events that happen
/// on the files. To do so, the `EpollFile` registers itself as an `Observer` to
/// the `IoNotifier`s of the monotored files. Thus, we can add a file to the ready
/// list when an event happens on the file.
///
/// LibOS files are easy to monitor. LibOS files are implemented by us. We know
/// exactly when an event happens and thus can broadcast it using `IoNotifier`.
///
/// Unlike LibOS files, host files are implemented by the host OS. We have no way
/// to let the host OS _push_ events to us. Luckily, we can do the reverse: _poll_
/// host files to check events. And there is a good timing for it; that is, at
/// every epoll wait call. We have made a helper called `HostFileEpoller`, which can
/// poll events on a set of host files and trigger their associated `Notifier`s to
/// broadcast their events, e.g., to `EpollFile`.
///
/// This way, both LibOS files and host files can notify the `EpollFile` about
/// their events.
pub struct EpollFile {
// All interesting entries.
interest: SgxMutex<HashMap<FileDesc, Arc<EpollEntry>>>,
// Entries that are probably ready (having events happened).
ready: SgxMutex<VecDeque<Arc<EpollEntry>>>,
// All threads that are waiting on this epoll file.
waiters: WaiterQueue,
// A notifier to broadcast events on this epoll file.
notifier: IoNotifier,
// A helper to poll the events on the interesting host files.
host_file_epoller: HostFileEpoller,
// Any EpollFile is wrapped with Arc when created.
weak_self: Weak<Self>,
// Host events
host_events: Atomic<IoEvents>,
}
impl EpollFile {
pub fn new() -> Arc<Self> {
let interest = Default::default();
let ready = Default::default();
let waiters = WaiterQueue::new();
let notifier = IoNotifier::new();
let host_file_epoller = HostFileEpoller::new();
let weak_self = Default::default();
let host_events = Atomic::new(IoEvents::empty());
let arc_self = Self {
interest,
ready,
waiters,
notifier,
host_file_epoller,
weak_self,
host_events,
}
.wrap_self();
arc_self.register_to_file_table();
arc_self
}
fn wrap_self(self) -> Arc<Self> {
let mut strong_self = Arc::new(self);
let weak_self = Arc::downgrade(&strong_self);
unsafe {
let ptr_self = Arc::into_raw(strong_self) as *mut Self;
(*ptr_self).weak_self = weak_self;
strong_self = Arc::from_raw(ptr_self);
}
strong_self
}
fn register_to_file_table(&self) {
let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>;
let thread = current!();
let file_table = thread.files().lock().unwrap();
file_table.notifier().register(weak_observer, None, None);
}
fn unregister_from_file_table(&self) {
let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>;
let thread = current!();
let file_table = thread.files().lock().unwrap();
file_table.notifier().unregister(&weak_observer);
}
pub fn control(&self, cmd: &EpollCtl) -> Result<()> {
debug!("epoll control: cmd = {:?}", cmd);
match cmd {
EpollCtl::Add(fd, event, flags) => |
EpollCtl::Del(fd) => {
self.del_interest(*fd)?;
}
EpollCtl::Mod(fd, event, flags) => {
self.mod_interest(*fd, *event, *flags)?;
}
}
Ok(())
}
pub fn wait(
&self,
revents: &mut [MaybeUninit<EpollEvent>],
timeout: Option<&Duration>,
) -> Result<usize> {
debug!("epoll wait: timeout = {:?}", timeout);
let mut timeout = timeout.cloned();
let max_count = revents.len();
let mut reinsert = VecDeque::with_capacity(max_count);
let waiter = EpollWaiter::new(&self.host_file_epoller);
loop {
// Poll the latest states of the interested host files. If a host
// file is ready, then it will be pushed into the ready list. Note
// that this is the only way through which a host file can appear in
// the ready list. This ensures that only the host files whose
// events are update-to-date will be returned, reducing the chances
// of false positive results to the minimum.
self.host_file_epoller.poll_events(max_count);
// Prepare for the waiter.wait_mut() at the end of the loop
self.waiters.reset_and_enqueue(waiter.as_ref());
// Pop from the ready list to find as many results as possible
let mut count = 0;
while count < max_count {
// Pop some entries from the ready list
let mut ready_entries = self.pop_ready(max_count - count);
if ready_entries.len() == 0 {
break;
}
// Note that while iterating the ready entries, we do not hold the lock
// of the ready list. This reduces the chances of lock contention.
for ep_entry in ready_entries.into_iter() {
if ep_entry.is_deleted.load(Ordering::Acquire) {
continue;
}
// Poll the file that corresponds to the entry
let mut inner = ep_entry.inner.lock().unwrap();
let mask = inner.event.mask();
let file = &ep_entry.file;
let events = file.poll_new() & mask;
if events.is_empty() {
continue;
}
// We find a ready file!
let mut revent = inner.event;
revent.mask = events;
revents[count].write(revent);
count += 1;
// Behave differently according the epoll flags
if inner.flags.contains(EpollFlags::ONE_SHOT) {
inner.event.mask = IoEvents::empty();
}
if!inner
.flags
.intersects(EpollFlags::EDGE_TRIGGER | EpollFlags::ONE_SHOT)
{
drop(inner);
// Host files should not be reinserted into the ready list
if ep_entry.file.host_fd().is_none() {
reinsert.push_back(ep_entry);
}
}
}
}
// If any results, we can return
if count > 0 {
// Push the entries that are still ready after polling back to the ready list
if reinsert.len() > 0 {
self.push_ready_iter(reinsert.into_iter());
}
return Ok(count);
}
// Wait for a while to try again later.
let ret = waiter.wait_mut(timeout.as_mut());
if let Err(e) = ret {
if e.errno() == ETIMEDOUT {
return Ok(0);
} else {
return Err(e);
}
}
// This means we have been waken up successfully. Let's try again.
}
}
fn add_interest(&self, fd: FileDesc, mut event: EpollEvent, flags: EpollFlags) -> Result<()> {
let file = current!().file(fd)?;
let arc_self = self.weak_self.upgrade().unwrap();
if Arc::ptr_eq(&(arc_self as Arc<dyn File>), &file) {
return_errno!(EINVAL, "a epoll file cannot epoll itself");
}
self.check_flags(&flags);
self.prepare_event(&mut event);
let ep_entry = Arc::new(EpollEntry::new(fd, file, event, flags));
// A critical section protected by the lock of self.interest
{
let notifier = ep_entry
.file
.notifier()
.ok_or_else(|| errno!(EINVAL, "a file must has an associated notifier"))?;
let mut interest_entries = self.interest.lock().unwrap();
if interest_entries.get(&fd).is_some() {
return_errno!(EEXIST, "fd is already registered");
}
interest_entries.insert(fd, ep_entry.clone());
// Start observing events on the target file.
let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>;
let weak_ep_entry = Arc::downgrade(&ep_entry);
notifier.register(weak_observer, Some(IoEvents::all()), Some(weak_ep_entry));
// Handle host file
if ep_entry.file.host_fd().is_some() {
self.host_file_epoller
.add_file(ep_entry.file.clone(), event, flags);
return Ok(());
}
}
self.push_ready(ep_entry);
Ok(())
}
fn del_interest(&self, fd: FileDesc) -> Result<()> {
// A critical section protected by the lock of self.interest
{
let mut interest_entries = self.interest.lock().unwrap();
let ep_entry = interest_entries
.remove(&fd)
.ok_or_else(|| errno!(ENOENT, "fd is not added"))?;
ep_entry.is_deleted.store(true, Ordering::Release);
let notifier = ep_entry.file.notifier().unwrap();
let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>;
notifier.unregister(&weak_observer);
if ep_entry.file.host_fd().is_some() {
self.host_file_epoller.del_file(&ep_entry.file);
}
}
Ok(())
}
fn mod_interest(&self, fd: FileDesc, mut event: EpollEvent, flags: EpollFlags) -> Result<()> {
self.check_flags(&flags);
self.prepare_event(&mut event);
// A critical section protected by the lock of self.interest
let ep_entry = {
let mut interest_entries = self.interest.lock().unwrap();
let ep_entry = interest_entries
.get(&fd)
.ok_or_else(|| errno!(ENOENT, "fd is not added"))?
.clone();
let new_ep_inner = EpollEntryInner { event, flags };
let mut old_ep_inner = ep_entry.inner.lock().unwrap();
*old_ep_inner = new_ep_inner;
drop(old_ep_inner);
if ep_entry.file.host_fd().is_some() {
self.host_file_epoller
.mod_file(&ep_entry.file, event, flags);
return Ok(());
}
ep_entry
};
self.push_ready(ep_entry);
Ok(())
}
fn push_ready(&self, ep_entry: Arc<EpollEntry>) {
// Fast path to avoid locking
if ep_entry.is_ready.load(Ordering::Relaxed) {
// Concurrency note:
// What if right after returning a true value of `is_ready`, then the `EpollEntry` is
// popped from the ready list? Does it mean than we miss an interesting event?
//
// The answer is NO. If the `is_ready` field of an `EpollEntry` turns from `true` to
// `false`, then the `EpollEntry` must be popped out of the ready list and its
// corresponding file must be polled in the `wait` method. This means that we have
// taken into account any interesting events happened on the file so far.
return;
}
self.push_ready_iter(std::iter::once(ep_entry));
}
fn push_ready_iter<I: Iterator<Item = Arc<EpollEntry>>>(&self, ep_entries: I) {
let mut has_pushed_any = false;
// A critical section protected by self.ready.lock()
{
let mut ready_entries = self.ready.lock().unwrap();
for ep_entry in ep_entries {
if ep_entry.is_ready.load(Ordering::Relaxed) {
continue;
}
ep_entry.is_ready.store(true, Ordering::Relaxed);
ready_entries.push_back(ep_entry);
has_pushed_any = true;
}
}
if has_pushed_any {
self.mark_ready();
}
}
fn pop_ready(&self, max_count: usize) -> VecDeque<Arc<EpollEntry>> {
// A critical section protected by self.ready.lock()
{
let mut ready_entries = self.ready.lock().unwrap();
let max_count = max_count.min(ready_entries.len());
ready_entries
.drain(..max_count)
.map(|ep_entry| {
ep_entry.is_ready.store(false, Ordering::Relaxed);
ep_entry
})
.collect::<VecDeque<Arc<EpollEntry>>>()
}
}
fn mark_ready(&self) {
self.notifier.broadcast(&IoEvents::IN);
self.waiters.dequeue_and_wake_all();
}
fn check_flags(&self, flags: &EpollFlags) {
if flags.intersects(EpollFlags::EXCLUSIVE | EpollFlags::WAKE_UP) {
warn!("{:?} contains unsupported flags", flags);
}
}
fn prepare_event(&self, event: &mut EpollEvent) {
// Add two events that are reported by default
event.mask |= (IoEvents::ERR | IoEvents::HUP);
}
}
impl File for EpollFile {
fn poll_new(&self) -> IoEvents {
if self
.host_events
.load(Ordering::Acquire)
.contains(IoEvents::IN)
{
return IoEvents::IN;
}
let ready_entries = self.ready.lock().unwrap();
if!ready_entries.is_empty() {
return IoEvents::IN;
}
IoEvents::empty()
}
fn notifier(&self) -> Option<&IoNotifier> {
Some(&self.notifier)
}
fn host_fd(&self) -> Option<&HostFd> {
Some(self.host_file_epoller.host_fd())
}
fn update_host_events(&self, ready: &IoEvents, mask: &IoEvents, trigger_notifier: bool) {
self.host_events.update(ready, mask, Ordering::Release);
if trigger_notifier {
self.notifier.broadcast(ready);
}
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl Drop for EpollFile {
fn drop(&mut self) {
// Do not try to `self.weak_self.upgrade()`! The Arc object must have been
// dropped at this point.
let self_observer = self.weak_self.clone() as Weak<dyn Observer<IoEvents>>;
// Unregister ourself from all interesting files' notifiers
let mut interest_entries = self.interest.lock().unwrap();
interest_entries.drain().for_each(|(_, ep_entry)| {
if let Some(notifier) = ep_entry.file.notifier() {
notifier.unregister(&self_observer);
}
});
self.unregister_from_file_table();
}
}
impl Observer<IoEvents> for EpollFile {
fn on_event(&self, _events: &IoEvents, metadata: &Option<Weak<dyn Any + Send + Sync>>) {
let ep_entry_opt = metadata
.as_ref()
.and_then(|weak_any| weak_any.upgrade())
.and_then(|strong_any| strong_any.downcast().ok());
let ep_entry: Arc<EpollEntry> = match ep_entry_opt {
None => return,
Some(ep_entry) => ep_entry,
};
self.push_ready(ep_entry);
}
}
impl Observer<FileTableEvent> for EpollFile {
fn on_event(&self, event: &FileTableEvent, _metadata: &Option<Weak<dyn Any + Send + Sync>>) {
let FileTableEvent::Del(fd) = event;
let _ = self.del_interest(*fd);
}
}
impl fmt::Debug for EpollFile {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("EpollFile")
.field("interest", &self.interest.lock().unwrap())
.field("ready", &self.ready.lock().unwrap())
.finish()
}
}
pub trait AsEpollFile {
fn as_epoll_file(&self) -> Result<&EpollFile>;
}
impl AsEpollFile for FileRef {
fn as_epoll_file(&self) -> Result<&EpollFile> {
self.as_any()
.downcast_ref::<EpollFile>()
.ok_or_else(|| errno!(EBADF, "not an epoll file"))
}
}
#[derive(Debug)]
struct EpollEntry {
fd: FileDesc,
file: FileRef,
inner: SgxMutex<EpollEntryInner>,
// Whether the entry is in the ready list
is_ready: AtomicBool,
// Whether the entry has been deleted from the interest list
is_deleted: AtomicBool,
}
impl EpollEntry {
pub fn new(fd: FileDesc, file: FileRef, event: EpollEvent, flags: EpollFlags) -> Self {
let is_ready = Default::default();
| {
self.add_interest(*fd, *event, *flags)?;
} | conditional_block |
epoll_file.rs | super::epoll_waiter::EpollWaiter;
use super::host_file_epoller::HostFileEpoller;
use super::{EpollCtl, EpollEvent, EpollFlags};
use crate::events::{Observer, Waiter, WaiterQueue};
use crate::fs::{
AtomicIoEvents, File, FileTableEvent, FileTableNotifier, HostFd, IoEvents, IoNotifier,
};
use crate::prelude::*;
// TODO: Prevent two epoll files from monitoring each other, which may cause
// deadlock in the current implementation.
// TODO: Fix unreliable EpollFiles after process spawning. EpollFile is connected | // process, then this EpollFile still has connection with the parent process's
// file table, which is problematic.
/// A file that provides epoll API.
///
/// Conceptually, we maintain two lists: one consists of all interesting files,
/// which can be managed by the epoll ctl commands; the other are for ready files,
/// which are files that have some events. A epoll wait only needs to iterate the
/// ready list and poll each file to see if the file is ready for the interesting
/// I/O.
///
/// To maintain the ready list, we need to monitor interesting events that happen
/// on the files. To do so, the `EpollFile` registers itself as an `Observer` to
/// the `IoNotifier`s of the monotored files. Thus, we can add a file to the ready
/// list when an event happens on the file.
///
/// LibOS files are easy to monitor. LibOS files are implemented by us. We know
/// exactly when an event happens and thus can broadcast it using `IoNotifier`.
///
/// Unlike LibOS files, host files are implemented by the host OS. We have no way
/// to let the host OS _push_ events to us. Luckily, we can do the reverse: _poll_
/// host files to check events. And there is a good timing for it; that is, at
/// every epoll wait call. We have made a helper called `HostFileEpoller`, which can
/// poll events on a set of host files and trigger their associated `Notifier`s to
/// broadcast their events, e.g., to `EpollFile`.
///
/// This way, both LibOS files and host files can notify the `EpollFile` about
/// their events.
pub struct EpollFile {
// All interesting entries.
interest: SgxMutex<HashMap<FileDesc, Arc<EpollEntry>>>,
// Entries that are probably ready (having events happened).
ready: SgxMutex<VecDeque<Arc<EpollEntry>>>,
// All threads that are waiting on this epoll file.
waiters: WaiterQueue,
// A notifier to broadcast events on this epoll file.
notifier: IoNotifier,
// A helper to poll the events on the interesting host files.
host_file_epoller: HostFileEpoller,
// Any EpollFile is wrapped with Arc when created.
weak_self: Weak<Self>,
// Host events
host_events: Atomic<IoEvents>,
}
impl EpollFile {
pub fn new() -> Arc<Self> {
let interest = Default::default();
let ready = Default::default();
let waiters = WaiterQueue::new();
let notifier = IoNotifier::new();
let host_file_epoller = HostFileEpoller::new();
let weak_self = Default::default();
let host_events = Atomic::new(IoEvents::empty());
let arc_self = Self {
interest,
ready,
waiters,
notifier,
host_file_epoller,
weak_self,
host_events,
}
.wrap_self();
arc_self.register_to_file_table();
arc_self
}
fn wrap_self(self) -> Arc<Self> {
let mut strong_self = Arc::new(self);
let weak_self = Arc::downgrade(&strong_self);
unsafe {
let ptr_self = Arc::into_raw(strong_self) as *mut Self;
(*ptr_self).weak_self = weak_self;
strong_self = Arc::from_raw(ptr_self);
}
strong_self
}
fn register_to_file_table(&self) {
let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>;
let thread = current!();
let file_table = thread.files().lock().unwrap();
file_table.notifier().register(weak_observer, None, None);
}
fn unregister_from_file_table(&self) {
let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>;
let thread = current!();
let file_table = thread.files().lock().unwrap();
file_table.notifier().unregister(&weak_observer);
}
pub fn control(&self, cmd: &EpollCtl) -> Result<()> {
debug!("epoll control: cmd = {:?}", cmd);
match cmd {
EpollCtl::Add(fd, event, flags) => {
self.add_interest(*fd, *event, *flags)?;
}
EpollCtl::Del(fd) => {
self.del_interest(*fd)?;
}
EpollCtl::Mod(fd, event, flags) => {
self.mod_interest(*fd, *event, *flags)?;
}
}
Ok(())
}
pub fn wait(
&self,
revents: &mut [MaybeUninit<EpollEvent>],
timeout: Option<&Duration>,
) -> Result<usize> {
debug!("epoll wait: timeout = {:?}", timeout);
let mut timeout = timeout.cloned();
let max_count = revents.len();
let mut reinsert = VecDeque::with_capacity(max_count);
let waiter = EpollWaiter::new(&self.host_file_epoller);
loop {
// Poll the latest states of the interested host files. If a host
// file is ready, then it will be pushed into the ready list. Note
// that this is the only way through which a host file can appear in
// the ready list. This ensures that only the host files whose
// events are update-to-date will be returned, reducing the chances
// of false positive results to the minimum.
self.host_file_epoller.poll_events(max_count);
// Prepare for the waiter.wait_mut() at the end of the loop
self.waiters.reset_and_enqueue(waiter.as_ref());
// Pop from the ready list to find as many results as possible
let mut count = 0;
while count < max_count {
// Pop some entries from the ready list
let mut ready_entries = self.pop_ready(max_count - count);
if ready_entries.len() == 0 {
break;
}
// Note that while iterating the ready entries, we do not hold the lock
// of the ready list. This reduces the chances of lock contention.
for ep_entry in ready_entries.into_iter() {
if ep_entry.is_deleted.load(Ordering::Acquire) {
continue;
}
// Poll the file that corresponds to the entry
let mut inner = ep_entry.inner.lock().unwrap();
let mask = inner.event.mask();
let file = &ep_entry.file;
let events = file.poll_new() & mask;
if events.is_empty() {
continue;
}
// We find a ready file!
let mut revent = inner.event;
revent.mask = events;
revents[count].write(revent);
count += 1;
// Behave differently according the epoll flags
if inner.flags.contains(EpollFlags::ONE_SHOT) {
inner.event.mask = IoEvents::empty();
}
if!inner
.flags
.intersects(EpollFlags::EDGE_TRIGGER | EpollFlags::ONE_SHOT)
{
drop(inner);
// Host files should not be reinserted into the ready list
if ep_entry.file.host_fd().is_none() {
reinsert.push_back(ep_entry);
}
}
}
}
// If any results, we can return
if count > 0 {
// Push the entries that are still ready after polling back to the ready list
if reinsert.len() > 0 {
self.push_ready_iter(reinsert.into_iter());
}
return Ok(count);
}
// Wait for a while to try again later.
let ret = waiter.wait_mut(timeout.as_mut());
if let Err(e) = ret {
if e.errno() == ETIMEDOUT {
return Ok(0);
} else {
return Err(e);
}
}
// This means we have been waken up successfully. Let's try again.
}
}
fn add_interest(&self, fd: FileDesc, mut event: EpollEvent, flags: EpollFlags) -> Result<()> {
let file = current!().file(fd)?;
let arc_self = self.weak_self.upgrade().unwrap();
if Arc::ptr_eq(&(arc_self as Arc<dyn File>), &file) {
return_errno!(EINVAL, "a epoll file cannot epoll itself");
}
self.check_flags(&flags);
self.prepare_event(&mut event);
let ep_entry = Arc::new(EpollEntry::new(fd, file, event, flags));
// A critical section protected by the lock of self.interest
{
let notifier = ep_entry
.file
.notifier()
.ok_or_else(|| errno!(EINVAL, "a file must has an associated notifier"))?;
let mut interest_entries = self.interest.lock().unwrap();
if interest_entries.get(&fd).is_some() {
return_errno!(EEXIST, "fd is already registered");
}
interest_entries.insert(fd, ep_entry.clone());
// Start observing events on the target file.
let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>;
let weak_ep_entry = Arc::downgrade(&ep_entry);
notifier.register(weak_observer, Some(IoEvents::all()), Some(weak_ep_entry));
// Handle host file
if ep_entry.file.host_fd().is_some() {
self.host_file_epoller
.add_file(ep_entry.file.clone(), event, flags);
return Ok(());
}
}
self.push_ready(ep_entry);
Ok(())
}
fn del_interest(&self, fd: FileDesc) -> Result<()> {
// A critical section protected by the lock of self.interest
{
let mut interest_entries = self.interest.lock().unwrap();
let ep_entry = interest_entries
.remove(&fd)
.ok_or_else(|| errno!(ENOENT, "fd is not added"))?;
ep_entry.is_deleted.store(true, Ordering::Release);
let notifier = ep_entry.file.notifier().unwrap();
let weak_observer = self.weak_self.clone() as Weak<dyn Observer<_>>;
notifier.unregister(&weak_observer);
if ep_entry.file.host_fd().is_some() {
self.host_file_epoller.del_file(&ep_entry.file);
}
}
Ok(())
}
fn mod_interest(&self, fd: FileDesc, mut event: EpollEvent, flags: EpollFlags) -> Result<()> {
self.check_flags(&flags);
self.prepare_event(&mut event);
// A critical section protected by the lock of self.interest
let ep_entry = {
let mut interest_entries = self.interest.lock().unwrap();
let ep_entry = interest_entries
.get(&fd)
.ok_or_else(|| errno!(ENOENT, "fd is not added"))?
.clone();
let new_ep_inner = EpollEntryInner { event, flags };
let mut old_ep_inner = ep_entry.inner.lock().unwrap();
*old_ep_inner = new_ep_inner;
drop(old_ep_inner);
if ep_entry.file.host_fd().is_some() {
self.host_file_epoller
.mod_file(&ep_entry.file, event, flags);
return Ok(());
}
ep_entry
};
self.push_ready(ep_entry);
Ok(())
}
fn push_ready(&self, ep_entry: Arc<EpollEntry>) {
// Fast path to avoid locking
if ep_entry.is_ready.load(Ordering::Relaxed) {
// Concurrency note:
// What if right after returning a true value of `is_ready`, then the `EpollEntry` is
// popped from the ready list? Does it mean than we miss an interesting event?
//
// The answer is NO. If the `is_ready` field of an `EpollEntry` turns from `true` to
// `false`, then the `EpollEntry` must be popped out of the ready list and its
// corresponding file must be polled in the `wait` method. This means that we have
// taken into account any interesting events happened on the file so far.
return;
}
self.push_ready_iter(std::iter::once(ep_entry));
}
fn push_ready_iter<I: Iterator<Item = Arc<EpollEntry>>>(&self, ep_entries: I) {
let mut has_pushed_any = false;
// A critical section protected by self.ready.lock()
{
let mut ready_entries = self.ready.lock().unwrap();
for ep_entry in ep_entries {
if ep_entry.is_ready.load(Ordering::Relaxed) {
continue;
}
ep_entry.is_ready.store(true, Ordering::Relaxed);
ready_entries.push_back(ep_entry);
has_pushed_any = true;
}
}
if has_pushed_any {
self.mark_ready();
}
}
fn pop_ready(&self, max_count: usize) -> VecDeque<Arc<EpollEntry>> {
// A critical section protected by self.ready.lock()
{
let mut ready_entries = self.ready.lock().unwrap();
let max_count = max_count.min(ready_entries.len());
ready_entries
.drain(..max_count)
.map(|ep_entry| {
ep_entry.is_ready.store(false, Ordering::Relaxed);
ep_entry
})
.collect::<VecDeque<Arc<EpollEntry>>>()
}
}
fn mark_ready(&self) {
self.notifier.broadcast(&IoEvents::IN);
self.waiters.dequeue_and_wake_all();
}
fn check_flags(&self, flags: &EpollFlags) {
if flags.intersects(EpollFlags::EXCLUSIVE | EpollFlags::WAKE_UP) {
warn!("{:?} contains unsupported flags", flags);
}
}
fn prepare_event(&self, event: &mut EpollEvent) {
// Add two events that are reported by default
event.mask |= (IoEvents::ERR | IoEvents::HUP);
}
}
impl File for EpollFile {
fn poll_new(&self) -> IoEvents {
if self
.host_events
.load(Ordering::Acquire)
.contains(IoEvents::IN)
{
return IoEvents::IN;
}
let ready_entries = self.ready.lock().unwrap();
if!ready_entries.is_empty() {
return IoEvents::IN;
}
IoEvents::empty()
}
fn notifier(&self) -> Option<&IoNotifier> {
Some(&self.notifier)
}
fn host_fd(&self) -> Option<&HostFd> {
Some(self.host_file_epoller.host_fd())
}
fn update_host_events(&self, ready: &IoEvents, mask: &IoEvents, trigger_notifier: bool) {
self.host_events.update(ready, mask, Ordering::Release);
if trigger_notifier {
self.notifier.broadcast(ready);
}
}
fn as_any(&self) -> &dyn Any {
self
}
}
impl Drop for EpollFile {
fn drop(&mut self) {
// Do not try to `self.weak_self.upgrade()`! The Arc object must have been
// dropped at this point.
let self_observer = self.weak_self.clone() as Weak<dyn Observer<IoEvents>>;
// Unregister ourself from all interesting files' notifiers
let mut interest_entries = self.interest.lock().unwrap();
interest_entries.drain().for_each(|(_, ep_entry)| {
if let Some(notifier) = ep_entry.file.notifier() {
notifier.unregister(&self_observer);
}
});
self.unregister_from_file_table();
}
}
impl Observer<IoEvents> for EpollFile {
fn on_event(&self, _events: &IoEvents, metadata: &Option<Weak<dyn Any + Send + Sync>>) {
let ep_entry_opt = metadata
.as_ref()
.and_then(|weak_any| weak_any.upgrade())
.and_then(|strong_any| strong_any.downcast().ok());
let ep_entry: Arc<EpollEntry> = match ep_entry_opt {
None => return,
Some(ep_entry) => ep_entry,
};
self.push_ready(ep_entry);
}
}
impl Observer<FileTableEvent> for EpollFile {
fn on_event(&self, event: &FileTableEvent, _metadata: &Option<Weak<dyn Any + Send + Sync>>) {
let FileTableEvent::Del(fd) = event;
let _ = self.del_interest(*fd);
}
}
impl fmt::Debug for EpollFile {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("EpollFile")
.field("interest", &self.interest.lock().unwrap())
.field("ready", &self.ready.lock().unwrap())
.finish()
}
}
pub trait AsEpollFile {
fn as_epoll_file(&self) -> Result<&EpollFile>;
}
impl AsEpollFile for FileRef {
fn as_epoll_file(&self) -> Result<&EpollFile> {
self.as_any()
.downcast_ref::<EpollFile>()
.ok_or_else(|| errno!(EBADF, "not an epoll file"))
}
}
#[derive(Debug)]
struct EpollEntry {
fd: FileDesc,
file: FileRef,
inner: SgxMutex<EpollEntryInner>,
// Whether the entry is in the ready list
is_ready: AtomicBool,
// Whether the entry has been deleted from the interest list
is_deleted: AtomicBool,
}
impl EpollEntry {
pub fn new(fd: FileDesc, file: FileRef, event: EpollEvent, flags: EpollFlags) -> Self {
let is_ready = Default::default();
| // with the current process's file table by regitering itself as an observer
// to the file table. But if an EpollFile is cloned or inherited by a child | random_line_split |
lib.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
//! This crate contains Winterfell STARK prover and verifier.
//!
//! A STARK is a novel proof-of-computation scheme to create efficiently verifiable proofs of the
//! correct execution of a computation. The scheme was developed by Eli Ben-Sasson, Michael Riabzev
//! et al. at Technion - Israel Institute of Technology. STARKs do not require an initial trusted
//! setup, and rely on very few cryptographic assumptions. See [references](#references) for more
//! info.
//!
//! ## Proof generation
//! To generate a proof that a computation was executed correctly, you'll need to do the
//! following:
//!
//! 1. Define an *algebraic intermediate representation* (AIR) for your computation. This can
//! be done by implementing [Air] trait.
//! 2. Execute your computation and record its execution trace in [ExecutionTrace] struct.
//! 3. Execute [prove()] function and supply the AIR of your computation together with its
//! execution trace as input parameters. The function will produce a instance of [StarkProof]
//! as an output.
//!
//! This `StarkProof` can be serialized and sent to a STARK verifier for verification. The size
//! of proof depends on the specifics of a given computation, but for most computations it should
//! be in the range between 15 KB (for very small computations) and 300 KB (for very large
//! computations).
//!
//! Proof generation time is also highly dependent on the specifics of a given computation, but
//! also depends on the capabilities of the machine used to generate the proofs (i.e. on number
//! of CPU cores and memory bandwidth).
//!
//! When the crate is compiled with `concurrent` feature enabled, proof generation will be
//! performed in multiple threads (usually, as many threads as there are logical cores on the
//! machine). The number of threads can be configured via `RAYON_NUM_THREADS` environment
//! variable.
//!
//! ## Prof verification
//! To verify a [StarkProof] generated as described in the previous sections, you'll need to
//! do the following:
//!
//! 1. Define an *algebraic intermediate representation* (AIR) for you computation. This AIR
//! must be the same as the one used during proof generation process.
//! 2. Execute [verify()] function and supply the AIR of your computation together with the
//! [StarkProof] and related public inputs as parameters.
//!
//! Proof verification is extremely fast and is nearly independent of the complexity of the
//! computation being verified. In vast majority of cases proofs can be verified in 3 - 5 ms
//! on a modern mid-range laptop CPU (using a single core).
//!
//! There is one exception, however: if a computation requires a lot of `sequence` assertions
//! (see [Assertion] for more info), the verification time will grow linearly in the number of
//! asserted values. But for the impact to be noticeable, the number of asserted values would
//! need to be in tens of thousands. And even for hundreds of thousands of asserted values, the
//! verification time should not exceed 50 ms.
//!
//! # Examples
//! The best way to understand the STARK proof generation and verification process is to go
//! through a trivial example from start to finish. First, we'll need to pick a computation for
//! which we'll be generating and verifying STARK proofs. To keep things simple, we'll use the
//! following:
//!
//! ```no_run
//! use winterfell::math::{fields::f128::BaseElement, FieldElement};
//!
//! fn do_work(start: BaseElement, n: usize) -> BaseElement {
//! let mut result = start;
//! for _ in 1..n {
//! result = result.exp(3) + BaseElement::new(42);
//! }
//! result
//! }
//! ```
//!
//! This computation starts with an element in a finite field and then, for the specified number
//! of steps, cubes the element and adds value `42` to it.
//!
//! Suppose, we run this computation for a million steps and get some result. Using STARKs we can
//! prove that we did the work correctly without requiring any verifying party to re-execute the
//! computation. Here is how to do it:
//!
//! First, we need to define an *execution trace* for our computation. This trace should capture
//! the state of the computation at every step of its execution. In our case, the trace is just a
//! single column of intermediate values after each execution of the loop. For example, if we start
//! with value `3` and run the computation for 1,048,576 (same as 2<sup>20</sup>) steps, the
//! execution trace will look like this:
//!
//! | Step | State |
//! | :-------: | :----- |
//! | 0 | 3 |
//! | 1 | 69 |
//! | 2 | 328551 |
//! | 3 | 35465687262668193 |
//! | 4 | 237280320818395402166933071684267763523 |
//! |... |
//! | 1,048,575 | 247770943907079986105389697876176586605 |
//!
//! To record the trace, we'll use the [ExecutionTrace] struct. The function below, is just a
//! modified version of the `do_work()` function which records every intermediate state of the
//! computation in the [ExecutionTrace] struct:
//!
//! ```no_run
//! use winterfell::{
//! math::{fields::f128::BaseElement, FieldElement},
//! ExecutionTrace,
//! };
//!
//! pub fn build_do_work_trace(start: BaseElement, n: usize) -> ExecutionTrace<BaseElement> {
//! // Instantiate the trace with a given width and length; this will allocate all
//! // required memory for the trace
//! let trace_width = 1;
//! let mut trace = ExecutionTrace::new(trace_width, n);
//!
//! // Fill the trace with data; the first closure initializes the first state of the
//! // computation; the second closure computes the next state of the computation based
//! // on its current state.
//! trace.fill(
//! |state| {
//! state[0] = start;
//! },
//! |_, state| {
//! state[0] = state[0].exp(3u32.into()) + BaseElement::new(42);
//! },
//! );
//!
//! trace
//! }
//! ```
//!
//! Next, we need to define *algebraic intermediate representation* (AIR) for our computation.
//! This process is usually called *arithmetization*. We do this by implementing the [Air] trait.
//! At the high level, the code below does three things:
//!
//! 1. Defines what the public inputs for our computation should look like. These inputs are
//! called "public" because they must be known to both, the prover and the verifier.
//! 2. Defines a transition function with a single transition constraint. This transition
//! constraint must evaluate to zero for all valid state transitions, and to non-zero for any
//! invalid state transition. The degree of this constraint is 3 (see more about constraint
//! degrees "Constraint degrees" section of [Air] trait documentation).
//! 3. Define two assertions against an execution trace of our computation. These assertions tie
//! a specific set of public inputs to a specific execution trace (see more about assertions
//! "Trace assertions" section of [Air] trait documentation).
//!
//! Here is the actual code:
//!
//! ```no_run
//! use winterfell::{
//! math::{fields::f128::BaseElement, FieldElement},
//! Air, AirContext, Assertion, ByteWriter, EvaluationFrame, ProofOptions, Serializable,
//! TraceInfo, TransitionConstraintDegree,
//! };
//!
//! // Public inputs for our computation will consist of the starting value and the end result.
//! pub struct PublicInputs {
//! start: BaseElement,
//! result: BaseElement,
//! }
//!
//! // We need to describe how public inputs can be converted to bytes.
//! impl Serializable for PublicInputs {
//! fn write_into<W: ByteWriter>(&self, target: &mut W) {
//! target.write(self.start);
//! target.write(self.result);
//! }
//! }
//!
//! // For a specific instance of our computation, we'll keep track of the public inputs and
//! // the computation's context which we'll build in the constructor. The context is used
//! // internally by the Winterfell prover/verifier when interpreting this AIR.
//! pub struct WorkAir {
//! context: AirContext<BaseElement>,
//! start: BaseElement,
//! result: BaseElement,
//! }
//!
//! impl Air for WorkAir {
//! // First, we'll specify which finite field to use for our computation, and also how
//! // the public inputs must look like.
//! type BaseElement = BaseElement;
//! type PublicInputs = PublicInputs;
//!
//! // Here, we'll construct a new instance of our computation which is defined by 3
//! // parameters: starting value, number of steps, and the end result. Another way to
//! // think about it is that an instance of our computation is a specific invocation of
//! // the do_work() function.
//! fn new(trace_info: TraceInfo, pub_inputs: PublicInputs, options: ProofOptions) -> Self {
//! // our execution trace should have only one column.
//! assert_eq!(1, trace_info.width());
//!
//! // Our computation requires a single transition constraint. The constraint itself
//! // is defined in the evaluate_transition() method below, but here we need to specify
//! // the expected degree of the constraint. If the expected and actual degrees of the
//! // constraints don't match, an error will be thrown in the debug mode, but in release
//! // mode, an invalid proof will be generated which will not be accepted by any
//! // verifier.
//! let degrees = vec![TransitionConstraintDegree::new(3)];
//! WorkAir {
//! context: AirContext::new(trace_info, degrees, options),
//! start: pub_inputs.start,
//! result: pub_inputs.result,
//! }
//! }
//!
//! // In this method we'll define our transition constraints; a computation is considered to
//! // be valid, if for all valid state transitions, transition constraints evaluate to all
//! // zeros, and for any invalid transition, at least one constraint evaluates to a non-zero
//! // value. The `frame` parameter will contain current and next states of the computation.
//! fn evaluate_transition<E: FieldElement + From<Self::BaseElement>>(
//! &self,
//! frame: &EvaluationFrame<E>,
//! _periodic_values: &[E],
//! result: &mut [E],
//! ) {
//! // First, we'll read the current state, and use it to compute the expected next state
//! let current_state = &frame.current()[0];
//! let next_state = current_state.exp(3u32.into()) + E::from(42u32);
//!
//! // Then, we'll subtract the expected next state from the actual next state; this will
//! // evaluate to zero if and only if the expected and actual states are the same.
//! result[0] = frame.next()[0] - next_state;
//! }
//!
//! // Here, we'll define a set of assertions about the execution trace which must be
//! // satisfied for the computation to be valid. Essentially, this ties computation's
//! // execution trace to the public inputs.
//! fn get_assertions(&self) -> Vec<Assertion<Self::BaseElement>> {
//! // for our computation to be valid, value in column 0 at step 0 must be equal to the
//! // starting value, and at the last step it must be equal to the result.
//! let last_step = self.trace_length() - 1;
//! vec![
//! Assertion::single(0, 0, self.start),
//! Assertion::single(0, last_step, self.result),
//! ]
//! }
//!
//! // This is just boilerplate which is used by the Winterfell prover/verifier to retrieve
//! // the context of the computation.
//! fn context(&self) -> &AirContext<Self::BaseElement> {
//! &self.context
//! }
//! }
//! ```
//!
//! Now, we are finally ready to generate and verify STARK proofs.
//!
//! In the code below, we will execute our computation and get the result together with the proof
//! that the computation was executed correctly. Then, we will use this proof (together with the
//! public inputs) to verify that we did in fact execute the computation and got the claimed
//! result.
//!
//! ```
//! # use winterfell::{
//! # math::{fields::f128::BaseElement, FieldElement},
//! # Air, AirContext, Assertion, ByteWriter, EvaluationFrame, Serializable,
//! # TraceInfo, TransitionConstraintDegree,
//! # ExecutionTrace, FieldExtension, HashFunction, ProofOptions, StarkProof,
//! # };
//! #
//! # pub fn build_do_work_trace(start: BaseElement, n: usize) -> ExecutionTrace<BaseElement> {
//! # let trace_width = 1;
//! # let mut trace = ExecutionTrace::new(trace_width, n);
//! # trace.fill(
//! # |state| {
//! # state[0] = start;
//! # },
//! # |_, state| {
//! # state[0] = state[0].exp(3u32.into()) + BaseElement::new(42);
//! # },
//! # );
//! # trace
//! # }
//! #
//! #
//! # pub struct PublicInputs {
//! # start: BaseElement,
//! # result: BaseElement,
//! # }
//! #
//! # impl Serializable for PublicInputs {
//! # fn write_into<W: ByteWriter>(&self, target: &mut W) {
//! # target.write(self.start);
//! # target.write(self.result);
//! # }
//! # }
//! #
//! # pub struct WorkAir {
//! # context: AirContext<BaseElement>,
//! # start: BaseElement,
//! # result: BaseElement,
//! # }
//! #
//! # impl Air for WorkAir {
//! # type BaseElement = BaseElement;
//! # type PublicInputs = PublicInputs;
//! #
//! # fn new(trace_info: TraceInfo, pub_inputs: PublicInputs, options: ProofOptions) -> Self {
//! # assert_eq!(1, trace_info.width());
//! # let degrees = vec![TransitionConstraintDegree::new(3)];
//! # WorkAir {
//! # context: AirContext::new(trace_info, degrees, options),
//! # start: pub_inputs.start,
//! # result: pub_inputs.result, | //! #
//! # fn evaluate_transition<E: FieldElement + From<Self::BaseElement>>(
//! # &self,
//! # frame: &EvaluationFrame<E>,
//! # _periodic_values: &[E],
//! # result: &mut [E],
//! # ) {
//! # let current_state = &frame.current()[0];
//! # let next_state = current_state.exp(3u32.into()) + E::from(42u32);
//! # result[0] = frame.next()[0] - next_state;
//! # }
//! #
//! # fn get_assertions(&self) -> Vec<Assertion<Self::BaseElement>> {
//! # let last_step = self.trace_length() - 1;
//! # vec![
//! # Assertion::single(0, 0, self.start),
//! # Assertion::single(0, last_step, self.result),
//! # ]
//! # }
//! #
//! # fn context(&self) -> &AirContext<Self::BaseElement> {
//! # &self.context
//! # }
//! # }
//! #
//! // We'll just hard-code the parameters here for this example. We'll also just run the
//! // computation just for 1024 steps to save time during testing.
//! let start = BaseElement::new(3);
//! let n = 1024;
//!
//! // Build the execution trace and get the result from the last step.
//! let trace = build_do_work_trace(start, n);
//! let result = trace.get(0, n - 1);
//!
//! // Define proof options; these will be enough for ~96-bit security level.
//! let options = ProofOptions::new(
//! 32, // number of queries
//! 8, // blowup factor
//! 0, // grinding factor
//! HashFunction::Blake3_256,
//! FieldExtension::None,
//! 8, // FRI folding factor
//! 128, // FRI max remainder length
//! );
//!
//! // Generate the proof.
//! let pub_inputs = PublicInputs { start, result };
//! let proof = winterfell::prove::<WorkAir>(trace, pub_inputs, options).unwrap();
//!
//! // Verify the proof. The number of steps and options are encoded in the proof itself,
//! // so we don't need to pass them explicitly to the verifier.
//! let pub_inputs = PublicInputs { start, result };
//! assert!(winterfell::verify::<WorkAir>(proof, pub_inputs).is_ok());
//! ```
//!
//! That's all there is to it!
//!
//! # References
//!
//! If you are interested in learning how STARKs work under the hood, here are a few links to get
//! you started. From the standpoint of this library, *arithmetization* is by far the most
//! important concept to understand.
//!
//! * STARKs whitepaper: [Scalable, transparent, and post-quantum secure computational integrity](https://eprint.iacr.org/2018/046)
//! * STARKs vs. SNARKs: [A Cambrian Explosion of Crypto Proofs](https://nakamoto.com/cambrian-explosion-of-crypto-proofs/)
//!
//! Vitalik Buterin's blog series on zk-STARKs:
//! * [STARKs, part 1: Proofs with Polynomials](https://vitalik.ca/general/2017/11/09/starks_part_1.html)
//! * [STARKs, part 2: Thank Goodness it's FRI-day](https://vitalik.ca/general/2017/11/22/starks_part_2.html)
//! * [STARKs, part 3: Into the Weeds](https://vitalik.ca/general/2018/07/21/starks_part_3.html)
//!
//! StarkWare's STARK Math blog series:
//! * [STARK Math: The Journey Begins](https://medium.com/starkware/stark-math-the-journey-begins-51bd2b063c71)
//! * [Arithmetization I](https://medium.com/starkware/arithmetization-i-15c046390862)
//! * [Arithmetization II](https://medium.com/starkware/arithmetization-ii-403c3b3f4355)
//! * [Low Degree Testing](https://medium.com/starkware/low-degree-testing-f7614f5172db)
//! * [A Framework for Efficient STARKs](https://medium.com/starkware/a-framework-for-efficient-starks-19608ba06fbe)
#![no_std]
pub use prover::{
crypto, iterators, math, prove, Air, AirContext, Assertion, BoundaryConstraint,
BoundaryConstraintGroup, ByteReader, ByteWriter, ConstraintCompositionCoefficients,
ConstraintDivisor, DeepCompositionCoefficients, Deserializable, DeserializationError,
EvaluationFrame, ExecutionTrace, ExecutionTraceFragment, FieldExtension, HashFunction,
ProofOptions, ProverError, Serializable, StarkProof, TraceInfo, TransitionConstraintDegree,
TransitionConstraintGroup,
};
pub use verifier::{verify, VerifierError}; | //! # }
//! # } | random_line_split |
fetch.rs | use futures::{
future,
sync::{mpsc, oneshot},
};
use crate::msg;
use flatbuffers::FlatBufferBuilder;
use crate::js::*;
use crate::runtime::{Runtime, EVENT_LOOP};
use crate::utils::*;
use libfly::*;
use crate::errors::{FlyError, FlyResult};
use crate::get_next_stream_id;
use hyper::body::Payload;
use hyper::client::HttpConnector;
use hyper::header::HeaderName;
use hyper::rt::{Future, Stream};
use hyper::HeaderMap;
use hyper::{Body, Client, Method, Request, StatusCode};
use hyper_tls::HttpsConnector;
use std::io;
use std::slice;
use crate::metrics::*;
use floating_duration::TimeAsFloat;
use http::uri::Scheme;
use std::time;
lazy_static! {
static ref HTTP_CLIENT: Client<HttpsConnector<HttpConnector>, Body> = {
Client::builder()
.executor(EVENT_LOOP.0.clone())
.build(HttpsConnector::new(4).unwrap())
};
}
pub fn op_fetch(rt: &mut Runtime, base: &msg::Base, raw: fly_buf) -> Box<Op> | let host = if let Some(port) = http_uri.port_part() {
format!("{}:{}", host_str, port.as_str())
} else {
let port = if let Some(scheme) = http_uri.scheme_part() {
if scheme == &Scheme::HTTPS {
"443"
} else {
"80"
}
} else {
"80"
};
format!("{}:{}", host_str, port)
};
FETCH_HTTP_REQUESTS_TOTAL
.with_label_values(&[rt.name.as_str(), rt.version.as_str(), host.as_str()])
.inc();
let method = match msg.method() {
msg::HttpMethod::Get => Method::GET,
msg::HttpMethod::Head => Method::HEAD,
msg::HttpMethod::Post => Method::POST,
msg::HttpMethod::Put => Method::PUT,
msg::HttpMethod::Patch => Method::PATCH,
msg::HttpMethod::Delete => Method::DELETE,
msg::HttpMethod::Connect => Method::CONNECT,
msg::HttpMethod::Options => Method::OPTIONS,
msg::HttpMethod::Trace => Method::TRACE,
};
let msg_headers = msg.headers().unwrap();
let mut headers = HeaderMap::new();
for i in 0..msg_headers.len() {
let h = msg_headers.get(i);
trace!("header: {} => {}", h.key().unwrap(), h.value().unwrap());
headers.insert(
HeaderName::from_bytes(h.key().unwrap().as_bytes()).unwrap(),
h.value().unwrap().parse().unwrap(),
);
}
let has_body = msg.has_body();
trace!("HAS BODY? {}", has_body);
let req_body = if has_body {
if raw.data_len > 0 {
trace!("STATIC BODY!");
Body::from(unsafe { slice::from_raw_parts(raw.data_ptr, raw.data_len) }.to_vec())
} else {
trace!("STREAMING BODY");
let (sender, recver) = mpsc::unbounded::<Vec<u8>>();
{
rt.streams.lock().unwrap().insert(req_id, sender);
}
Body::wrap_stream(recver.map_err(|_| std::sync::mpsc::RecvError {}))
}
} else {
Body::empty()
};
// let req_body = Body::empty();
let mut req = Request::new(req_body);
{
*req.uri_mut() = http_uri.clone();
*req.method_mut() = method;
*req.headers_mut() = headers;
}
let (p, c) = oneshot::channel::<FlyResult<JsHttpResponse>>();
let rt_name = rt.name.clone();
let rt_version = rt.version.clone();
let method = req.method().clone();
rt.spawn(future::lazy(move || {
let timer = time::Instant::now();
HTTP_CLIENT.request(req).then(move |reserr| {
debug!("got http response (or error)");
if let Err(err) = reserr {
if p.send(Err(err.into())).is_err() {
error!("error sending error for http response :/");
}
return Ok(());
}
let res = reserr.unwrap(); // should be safe.
FETCH_HEADERS_DURATION
.with_label_values(&[
rt_name.as_str(),
rt_version.as_str(),
method.as_str(),
host.as_str(),
res.status().as_str(),
])
.observe(timer.elapsed().as_fractional_secs());
let (parts, body) = res.into_parts();
let mut stream_rx: Option<JsBody> = None;
let has_body =!body.is_end_stream();
if has_body {
stream_rx = Some(JsBody::BoxedStream(Box::new(
body.map_err(|e| format!("{}", e).into()).map(move |chunk| {
let bytes = chunk.into_bytes();
DATA_IN_TOTAL
.with_label_values(&[rt_name.as_str(), rt_version.as_str(), "fetch"])
.inc_by(bytes.len() as i64);
bytes.to_vec()
}),
)));
}
if p.send(Ok(JsHttpResponse {
headers: parts.headers,
status: parts.status,
body: stream_rx,
}))
.is_err()
{
error!("error sending fetch http response");
return Ok(());
}
debug!("done with http request");
Ok(())
})
}));
let fut = c
.map_err(|e| {
FlyError::from(io::Error::new(
io::ErrorKind::Other,
format!("err getting response from oneshot: {}", e).as_str(),
))
})
.and_then(move |reserr: FlyResult<JsHttpResponse>| {
if let Err(err) = reserr {
return Err(err);
}
let res = reserr.unwrap();
let builder = &mut FlatBufferBuilder::new();
let headers: Vec<_> = res
.headers
.iter()
.map(|(key, value)| {
let key = builder.create_string(key.as_str());
let value = builder.create_string(value.to_str().unwrap());
msg::HttpHeader::create(
builder,
&msg::HttpHeaderArgs {
key: Some(key),
value: Some(value),
..Default::default()
},
)
})
.collect();
let res_headers = builder.create_vector(&headers);
let msg = msg::FetchHttpResponse::create(
builder,
&msg::FetchHttpResponseArgs {
id: req_id,
headers: Some(res_headers),
status: res.status.as_u16(),
has_body: res.body.is_some(),
..Default::default()
},
);
if let Some(stream) = res.body {
send_body_stream(ptr, req_id, stream);
}
Ok(serialize_response(
cmd_id,
builder,
msg::BaseArgs {
msg: Some(msg.as_union_value()),
msg_type: msg::Any::FetchHttpResponse,
..Default::default()
},
))
});
Box::new(fut)
}
pub fn op_http_response(rt: &mut Runtime, base: &msg::Base, raw: fly_buf) -> Box<Op> {
debug!("handling http response");
let msg = base.msg_as_http_response().unwrap();
let req_id = msg.id();
let status = match StatusCode::from_u16(msg.status()) {
Ok(s) => s,
Err(e) => return odd_future(format!("{}", e).into()),
};
let mut headers = HeaderMap::new();
if let Some(msg_headers) = msg.headers() {
for i in 0..msg_headers.len() {
let h = msg_headers.get(i);
headers.insert(
HeaderName::from_bytes(h.key().unwrap().as_bytes()).unwrap(),
h.value().unwrap().parse().unwrap(),
);
}
}
let mut body: Option<JsBody> = None;
let has_body = msg.has_body();
if has_body {
if raw.data_len == 0 {
debug!("http response will have a streaming body");
let (sender, recver) = mpsc::unbounded::<Vec<u8>>();
{
let mut streams = rt.streams.lock().unwrap();
streams.insert(req_id, sender);
}
body = Some(JsBody::Stream(recver));
} else {
debug!("http response will have a static body");
body = Some(JsBody::Static(
unsafe { slice::from_raw_parts(raw.data_ptr, raw.data_len) }.to_vec(),
));
}
}
let mut responses = rt.responses.lock().unwrap();
match responses.remove(&req_id) {
Some(sender) => {
if sender
.send(JsHttpResponse {
headers: headers,
status: status,
body: body,
})
.is_err()
{
return odd_future("error sending http response".to_string().into());
}
}
None => return odd_future("no response receiver!".to_string().into()),
};
ok_future(None)
}
fn file_request(rt: &mut Runtime, cmd_id: u32, url: &str) -> Box<Op> {
let req_id = get_next_stream_id();
let path: String = url.chars().skip(7).collect();
let ptr = rt.ptr;
Box::new(
rt.fs_store
.read(path)
.map_err(|e| format!("fs error: {:?}", e).into())
.and_then(move |maybe_entry| {
let builder = &mut FlatBufferBuilder::new();
let msg = msg::FetchHttpResponse::create(
builder,
&msg::FetchHttpResponseArgs {
id: req_id,
headers: None,
status: if maybe_entry.is_some() { 404 } else { 200 },
has_body: maybe_entry.is_some(),
..Default::default()
},
);
if let Some(entry) = maybe_entry {
send_body_stream(
ptr,
req_id,
JsBody::BoxedStream(Box::new(
entry.stream.map_err(|e| format!("{:?}", e).into()),
)),
);
}
Ok(serialize_response(
cmd_id,
builder,
msg::BaseArgs {
msg: Some(msg.as_union_value()),
msg_type: msg::Any::FetchHttpResponse,
..Default::default()
},
))
}),
)
}
| {
let cmd_id = base.cmd_id();
let msg = base.msg_as_http_request().unwrap();
let url = msg.url().unwrap();
if url.starts_with("file://") {
return file_request(rt, cmd_id, url);
}
let ptr = rt.ptr;
let req_id = msg.id();
let http_uri: hyper::Uri = match url.parse() {
Ok(u) => u,
Err(e) => return odd_future(format!("{}", e).into()),
};
// for the metrics
let host_str = http_uri.host().unwrap_or("unknown"); | identifier_body |
fetch.rs | use futures::{
future,
sync::{mpsc, oneshot},
};
use crate::msg;
use flatbuffers::FlatBufferBuilder;
use crate::js::*;
use crate::runtime::{Runtime, EVENT_LOOP};
use crate::utils::*;
use libfly::*;
use crate::errors::{FlyError, FlyResult};
use crate::get_next_stream_id;
use hyper::body::Payload;
use hyper::client::HttpConnector;
use hyper::header::HeaderName;
use hyper::rt::{Future, Stream};
use hyper::HeaderMap;
use hyper::{Body, Client, Method, Request, StatusCode};
use hyper_tls::HttpsConnector;
use std::io;
use std::slice;
use crate::metrics::*;
use floating_duration::TimeAsFloat;
use http::uri::Scheme;
use std::time;
lazy_static! {
static ref HTTP_CLIENT: Client<HttpsConnector<HttpConnector>, Body> = {
Client::builder()
.executor(EVENT_LOOP.0.clone())
.build(HttpsConnector::new(4).unwrap())
};
}
pub fn op_fetch(rt: &mut Runtime, base: &msg::Base, raw: fly_buf) -> Box<Op> {
let cmd_id = base.cmd_id();
let msg = base.msg_as_http_request().unwrap();
let url = msg.url().unwrap();
if url.starts_with("file://") {
return file_request(rt, cmd_id, url);
}
let ptr = rt.ptr;
let req_id = msg.id();
let http_uri: hyper::Uri = match url.parse() {
Ok(u) => u,
Err(e) => return odd_future(format!("{}", e).into()),
};
// for the metrics
let host_str = http_uri.host().unwrap_or("unknown");
let host = if let Some(port) = http_uri.port_part() {
format!("{}:{}", host_str, port.as_str())
} else {
let port = if let Some(scheme) = http_uri.scheme_part() {
if scheme == &Scheme::HTTPS {
"443"
} else {
"80"
}
} else {
"80"
};
format!("{}:{}", host_str, port)
};
FETCH_HTTP_REQUESTS_TOTAL
.with_label_values(&[rt.name.as_str(), rt.version.as_str(), host.as_str()])
.inc();
let method = match msg.method() {
msg::HttpMethod::Get => Method::GET,
msg::HttpMethod::Head => Method::HEAD,
msg::HttpMethod::Post => Method::POST,
msg::HttpMethod::Put => Method::PUT,
msg::HttpMethod::Patch => Method::PATCH,
msg::HttpMethod::Delete => Method::DELETE,
msg::HttpMethod::Connect => Method::CONNECT,
msg::HttpMethod::Options => Method::OPTIONS,
msg::HttpMethod::Trace => Method::TRACE,
};
let msg_headers = msg.headers().unwrap();
let mut headers = HeaderMap::new();
for i in 0..msg_headers.len() {
let h = msg_headers.get(i);
trace!("header: {} => {}", h.key().unwrap(), h.value().unwrap());
headers.insert(
HeaderName::from_bytes(h.key().unwrap().as_bytes()).unwrap(),
h.value().unwrap().parse().unwrap(),
);
}
let has_body = msg.has_body();
trace!("HAS BODY? {}", has_body);
let req_body = if has_body {
if raw.data_len > 0 {
trace!("STATIC BODY!");
Body::from(unsafe { slice::from_raw_parts(raw.data_ptr, raw.data_len) }.to_vec())
} else {
trace!("STREAMING BODY");
let (sender, recver) = mpsc::unbounded::<Vec<u8>>();
{
rt.streams.lock().unwrap().insert(req_id, sender);
}
Body::wrap_stream(recver.map_err(|_| std::sync::mpsc::RecvError {}))
}
} else {
Body::empty()
};
// let req_body = Body::empty();
let mut req = Request::new(req_body);
{
*req.uri_mut() = http_uri.clone();
*req.method_mut() = method;
*req.headers_mut() = headers;
}
let (p, c) = oneshot::channel::<FlyResult<JsHttpResponse>>();
let rt_name = rt.name.clone();
let rt_version = rt.version.clone();
let method = req.method().clone();
rt.spawn(future::lazy(move || {
let timer = time::Instant::now();
HTTP_CLIENT.request(req).then(move |reserr| {
debug!("got http response (or error)");
if let Err(err) = reserr {
if p.send(Err(err.into())).is_err() {
error!("error sending error for http response :/");
}
return Ok(());
}
let res = reserr.unwrap(); // should be safe.
FETCH_HEADERS_DURATION
.with_label_values(&[
rt_name.as_str(),
rt_version.as_str(),
method.as_str(),
host.as_str(),
res.status().as_str(),
])
.observe(timer.elapsed().as_fractional_secs());
let (parts, body) = res.into_parts();
let mut stream_rx: Option<JsBody> = None;
let has_body =!body.is_end_stream();
if has_body {
stream_rx = Some(JsBody::BoxedStream(Box::new(
body.map_err(|e| format!("{}", e).into()).map(move |chunk| {
let bytes = chunk.into_bytes();
DATA_IN_TOTAL
.with_label_values(&[rt_name.as_str(), rt_version.as_str(), "fetch"])
.inc_by(bytes.len() as i64);
bytes.to_vec()
}),
)));
}
if p.send(Ok(JsHttpResponse {
headers: parts.headers,
status: parts.status,
body: stream_rx,
}))
.is_err()
{
error!("error sending fetch http response");
return Ok(());
}
debug!("done with http request");
Ok(())
})
}));
let fut = c
.map_err(|e| {
FlyError::from(io::Error::new(
io::ErrorKind::Other,
format!("err getting response from oneshot: {}", e).as_str(),
))
})
.and_then(move |reserr: FlyResult<JsHttpResponse>| {
if let Err(err) = reserr {
return Err(err);
}
let res = reserr.unwrap();
let builder = &mut FlatBufferBuilder::new();
let headers: Vec<_> = res
.headers
.iter()
.map(|(key, value)| {
let key = builder.create_string(key.as_str());
let value = builder.create_string(value.to_str().unwrap());
msg::HttpHeader::create(
builder,
&msg::HttpHeaderArgs {
key: Some(key),
value: Some(value),
..Default::default()
},
)
})
.collect();
let res_headers = builder.create_vector(&headers);
let msg = msg::FetchHttpResponse::create(
builder,
&msg::FetchHttpResponseArgs {
id: req_id, | status: res.status.as_u16(),
has_body: res.body.is_some(),
..Default::default()
},
);
if let Some(stream) = res.body {
send_body_stream(ptr, req_id, stream);
}
Ok(serialize_response(
cmd_id,
builder,
msg::BaseArgs {
msg: Some(msg.as_union_value()),
msg_type: msg::Any::FetchHttpResponse,
..Default::default()
},
))
});
Box::new(fut)
}
pub fn op_http_response(rt: &mut Runtime, base: &msg::Base, raw: fly_buf) -> Box<Op> {
debug!("handling http response");
let msg = base.msg_as_http_response().unwrap();
let req_id = msg.id();
let status = match StatusCode::from_u16(msg.status()) {
Ok(s) => s,
Err(e) => return odd_future(format!("{}", e).into()),
};
let mut headers = HeaderMap::new();
if let Some(msg_headers) = msg.headers() {
for i in 0..msg_headers.len() {
let h = msg_headers.get(i);
headers.insert(
HeaderName::from_bytes(h.key().unwrap().as_bytes()).unwrap(),
h.value().unwrap().parse().unwrap(),
);
}
}
let mut body: Option<JsBody> = None;
let has_body = msg.has_body();
if has_body {
if raw.data_len == 0 {
debug!("http response will have a streaming body");
let (sender, recver) = mpsc::unbounded::<Vec<u8>>();
{
let mut streams = rt.streams.lock().unwrap();
streams.insert(req_id, sender);
}
body = Some(JsBody::Stream(recver));
} else {
debug!("http response will have a static body");
body = Some(JsBody::Static(
unsafe { slice::from_raw_parts(raw.data_ptr, raw.data_len) }.to_vec(),
));
}
}
let mut responses = rt.responses.lock().unwrap();
match responses.remove(&req_id) {
Some(sender) => {
if sender
.send(JsHttpResponse {
headers: headers,
status: status,
body: body,
})
.is_err()
{
return odd_future("error sending http response".to_string().into());
}
}
None => return odd_future("no response receiver!".to_string().into()),
};
ok_future(None)
}
fn file_request(rt: &mut Runtime, cmd_id: u32, url: &str) -> Box<Op> {
let req_id = get_next_stream_id();
let path: String = url.chars().skip(7).collect();
let ptr = rt.ptr;
Box::new(
rt.fs_store
.read(path)
.map_err(|e| format!("fs error: {:?}", e).into())
.and_then(move |maybe_entry| {
let builder = &mut FlatBufferBuilder::new();
let msg = msg::FetchHttpResponse::create(
builder,
&msg::FetchHttpResponseArgs {
id: req_id,
headers: None,
status: if maybe_entry.is_some() { 404 } else { 200 },
has_body: maybe_entry.is_some(),
..Default::default()
},
);
if let Some(entry) = maybe_entry {
send_body_stream(
ptr,
req_id,
JsBody::BoxedStream(Box::new(
entry.stream.map_err(|e| format!("{:?}", e).into()),
)),
);
}
Ok(serialize_response(
cmd_id,
builder,
msg::BaseArgs {
msg: Some(msg.as_union_value()),
msg_type: msg::Any::FetchHttpResponse,
..Default::default()
},
))
}),
)
} | headers: Some(res_headers), | random_line_split |
fetch.rs | use futures::{
future,
sync::{mpsc, oneshot},
};
use crate::msg;
use flatbuffers::FlatBufferBuilder;
use crate::js::*;
use crate::runtime::{Runtime, EVENT_LOOP};
use crate::utils::*;
use libfly::*;
use crate::errors::{FlyError, FlyResult};
use crate::get_next_stream_id;
use hyper::body::Payload;
use hyper::client::HttpConnector;
use hyper::header::HeaderName;
use hyper::rt::{Future, Stream};
use hyper::HeaderMap;
use hyper::{Body, Client, Method, Request, StatusCode};
use hyper_tls::HttpsConnector;
use std::io;
use std::slice;
use crate::metrics::*;
use floating_duration::TimeAsFloat;
use http::uri::Scheme;
use std::time;
lazy_static! {
static ref HTTP_CLIENT: Client<HttpsConnector<HttpConnector>, Body> = {
Client::builder()
.executor(EVENT_LOOP.0.clone())
.build(HttpsConnector::new(4).unwrap())
};
}
pub fn op_fetch(rt: &mut Runtime, base: &msg::Base, raw: fly_buf) -> Box<Op> {
let cmd_id = base.cmd_id();
let msg = base.msg_as_http_request().unwrap();
let url = msg.url().unwrap();
if url.starts_with("file://") {
return file_request(rt, cmd_id, url);
}
let ptr = rt.ptr;
let req_id = msg.id();
let http_uri: hyper::Uri = match url.parse() {
Ok(u) => u,
Err(e) => return odd_future(format!("{}", e).into()),
};
// for the metrics
let host_str = http_uri.host().unwrap_or("unknown");
let host = if let Some(port) = http_uri.port_part() {
format!("{}:{}", host_str, port.as_str())
} else {
let port = if let Some(scheme) = http_uri.scheme_part() {
if scheme == &Scheme::HTTPS {
"443"
} else {
"80"
}
} else {
"80"
};
format!("{}:{}", host_str, port)
};
FETCH_HTTP_REQUESTS_TOTAL
.with_label_values(&[rt.name.as_str(), rt.version.as_str(), host.as_str()])
.inc();
let method = match msg.method() {
msg::HttpMethod::Get => Method::GET,
msg::HttpMethod::Head => Method::HEAD,
msg::HttpMethod::Post => Method::POST,
msg::HttpMethod::Put => Method::PUT,
msg::HttpMethod::Patch => Method::PATCH,
msg::HttpMethod::Delete => Method::DELETE,
msg::HttpMethod::Connect => Method::CONNECT,
msg::HttpMethod::Options => Method::OPTIONS,
msg::HttpMethod::Trace => Method::TRACE,
};
let msg_headers = msg.headers().unwrap();
let mut headers = HeaderMap::new();
for i in 0..msg_headers.len() {
let h = msg_headers.get(i);
trace!("header: {} => {}", h.key().unwrap(), h.value().unwrap());
headers.insert(
HeaderName::from_bytes(h.key().unwrap().as_bytes()).unwrap(),
h.value().unwrap().parse().unwrap(),
);
}
let has_body = msg.has_body();
trace!("HAS BODY? {}", has_body);
let req_body = if has_body {
if raw.data_len > 0 {
trace!("STATIC BODY!");
Body::from(unsafe { slice::from_raw_parts(raw.data_ptr, raw.data_len) }.to_vec())
} else {
trace!("STREAMING BODY");
let (sender, recver) = mpsc::unbounded::<Vec<u8>>();
{
rt.streams.lock().unwrap().insert(req_id, sender);
}
Body::wrap_stream(recver.map_err(|_| std::sync::mpsc::RecvError {}))
}
} else {
Body::empty()
};
// let req_body = Body::empty();
let mut req = Request::new(req_body);
{
*req.uri_mut() = http_uri.clone();
*req.method_mut() = method;
*req.headers_mut() = headers;
}
let (p, c) = oneshot::channel::<FlyResult<JsHttpResponse>>();
let rt_name = rt.name.clone();
let rt_version = rt.version.clone();
let method = req.method().clone();
rt.spawn(future::lazy(move || {
let timer = time::Instant::now();
HTTP_CLIENT.request(req).then(move |reserr| {
debug!("got http response (or error)");
if let Err(err) = reserr {
if p.send(Err(err.into())).is_err() {
error!("error sending error for http response :/");
}
return Ok(());
}
let res = reserr.unwrap(); // should be safe.
FETCH_HEADERS_DURATION
.with_label_values(&[
rt_name.as_str(),
rt_version.as_str(),
method.as_str(),
host.as_str(),
res.status().as_str(),
])
.observe(timer.elapsed().as_fractional_secs());
let (parts, body) = res.into_parts();
let mut stream_rx: Option<JsBody> = None;
let has_body =!body.is_end_stream();
if has_body {
stream_rx = Some(JsBody::BoxedStream(Box::new(
body.map_err(|e| format!("{}", e).into()).map(move |chunk| {
let bytes = chunk.into_bytes();
DATA_IN_TOTAL
.with_label_values(&[rt_name.as_str(), rt_version.as_str(), "fetch"])
.inc_by(bytes.len() as i64);
bytes.to_vec()
}),
)));
}
if p.send(Ok(JsHttpResponse {
headers: parts.headers,
status: parts.status,
body: stream_rx,
}))
.is_err()
{
error!("error sending fetch http response");
return Ok(());
}
debug!("done with http request");
Ok(())
})
}));
let fut = c
.map_err(|e| {
FlyError::from(io::Error::new(
io::ErrorKind::Other,
format!("err getting response from oneshot: {}", e).as_str(),
))
})
.and_then(move |reserr: FlyResult<JsHttpResponse>| {
if let Err(err) = reserr {
return Err(err);
}
let res = reserr.unwrap();
let builder = &mut FlatBufferBuilder::new();
let headers: Vec<_> = res
.headers
.iter()
.map(|(key, value)| {
let key = builder.create_string(key.as_str());
let value = builder.create_string(value.to_str().unwrap());
msg::HttpHeader::create(
builder,
&msg::HttpHeaderArgs {
key: Some(key),
value: Some(value),
..Default::default()
},
)
})
.collect();
let res_headers = builder.create_vector(&headers);
let msg = msg::FetchHttpResponse::create(
builder,
&msg::FetchHttpResponseArgs {
id: req_id,
headers: Some(res_headers),
status: res.status.as_u16(),
has_body: res.body.is_some(),
..Default::default()
},
);
if let Some(stream) = res.body {
send_body_stream(ptr, req_id, stream);
}
Ok(serialize_response(
cmd_id,
builder,
msg::BaseArgs {
msg: Some(msg.as_union_value()),
msg_type: msg::Any::FetchHttpResponse,
..Default::default()
},
))
});
Box::new(fut)
}
pub fn | (rt: &mut Runtime, base: &msg::Base, raw: fly_buf) -> Box<Op> {
debug!("handling http response");
let msg = base.msg_as_http_response().unwrap();
let req_id = msg.id();
let status = match StatusCode::from_u16(msg.status()) {
Ok(s) => s,
Err(e) => return odd_future(format!("{}", e).into()),
};
let mut headers = HeaderMap::new();
if let Some(msg_headers) = msg.headers() {
for i in 0..msg_headers.len() {
let h = msg_headers.get(i);
headers.insert(
HeaderName::from_bytes(h.key().unwrap().as_bytes()).unwrap(),
h.value().unwrap().parse().unwrap(),
);
}
}
let mut body: Option<JsBody> = None;
let has_body = msg.has_body();
if has_body {
if raw.data_len == 0 {
debug!("http response will have a streaming body");
let (sender, recver) = mpsc::unbounded::<Vec<u8>>();
{
let mut streams = rt.streams.lock().unwrap();
streams.insert(req_id, sender);
}
body = Some(JsBody::Stream(recver));
} else {
debug!("http response will have a static body");
body = Some(JsBody::Static(
unsafe { slice::from_raw_parts(raw.data_ptr, raw.data_len) }.to_vec(),
));
}
}
let mut responses = rt.responses.lock().unwrap();
match responses.remove(&req_id) {
Some(sender) => {
if sender
.send(JsHttpResponse {
headers: headers,
status: status,
body: body,
})
.is_err()
{
return odd_future("error sending http response".to_string().into());
}
}
None => return odd_future("no response receiver!".to_string().into()),
};
ok_future(None)
}
fn file_request(rt: &mut Runtime, cmd_id: u32, url: &str) -> Box<Op> {
let req_id = get_next_stream_id();
let path: String = url.chars().skip(7).collect();
let ptr = rt.ptr;
Box::new(
rt.fs_store
.read(path)
.map_err(|e| format!("fs error: {:?}", e).into())
.and_then(move |maybe_entry| {
let builder = &mut FlatBufferBuilder::new();
let msg = msg::FetchHttpResponse::create(
builder,
&msg::FetchHttpResponseArgs {
id: req_id,
headers: None,
status: if maybe_entry.is_some() { 404 } else { 200 },
has_body: maybe_entry.is_some(),
..Default::default()
},
);
if let Some(entry) = maybe_entry {
send_body_stream(
ptr,
req_id,
JsBody::BoxedStream(Box::new(
entry.stream.map_err(|e| format!("{:?}", e).into()),
)),
);
}
Ok(serialize_response(
cmd_id,
builder,
msg::BaseArgs {
msg: Some(msg.as_union_value()),
msg_type: msg::Any::FetchHttpResponse,
..Default::default()
},
))
}),
)
}
| op_http_response | identifier_name |
migrations.rs | // This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
//! Storage migrations for the Staking pallet.
use super::*;
use frame_election_provider_support::SortedListProvider;
use frame_support::{
dispatch::GetStorageVersion, pallet_prelude::ValueQuery, storage_alias,
traits::OnRuntimeUpgrade,
};
#[cfg(feature = "try-runtime")]
use frame_support::ensure;
#[cfg(feature = "try-runtime")]
use sp_runtime::TryRuntimeError;
/// Used for release versioning upto v12.
///
/// Obsolete from v13. Keeping around to make encoding/decoding of old migration code easier.
#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
enum ObsoleteReleases {
V1_0_0Ancient,
V2_0_0,
V3_0_0,
V4_0_0,
V5_0_0, // blockable validators.
V6_0_0, // removal of all storage associated with offchain phragmen.
V7_0_0, // keep track of number of nominators / validators in map
V8_0_0, // populate `VoterList`.
V9_0_0, // inject validators into `VoterList` as well.
V10_0_0, // remove `EarliestUnappliedSlash`.
V11_0_0, // Move pallet storage prefix, e.g. BagsList -> VoterBagsList
V12_0_0, // remove `HistoryDepth`.
}
impl Default for ObsoleteReleases {
fn default() -> Self {
ObsoleteReleases::V12_0_0
}
}
/// Alias to the old storage item used for release versioning. Obsolete since v13.
#[storage_alias]
type StorageVersion<T: Config> = StorageValue<Pallet<T>, ObsoleteReleases, ValueQuery>;
pub mod v13 {
use super::*;
pub struct MigrateToV13<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV13<T> {
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V12_0_0,
"Required v12 before upgrading to v13"
);
Ok(Default::default())
}
fn on_runtime_upgrade() -> Weight {
let current = Pallet::<T>::current_storage_version();
let onchain = StorageVersion::<T>::get();
if current == 13 && onchain == ObsoleteReleases::V12_0_0 {
StorageVersion::<T>::kill();
current.put::<Pallet<T>>();
log!(info, "v13 applied successfully");
T::DbWeight::get().reads_writes(1, 2)
} else {
log!(warn, "Skipping v13, should be removed");
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
frame_support::ensure!(
Pallet::<T>::on_chain_storage_version() == 13,
"v13 not applied"
);
frame_support::ensure!(
!StorageVersion::<T>::exists(),
"Storage version not migrated correctly"
);
Ok(())
}
}
}
pub mod v12 {
use super::*;
use frame_support::{pallet_prelude::ValueQuery, storage_alias};
#[storage_alias]
type HistoryDepth<T: Config> = StorageValue<Pallet<T>, u32, ValueQuery>;
/// Clean up `HistoryDepth` from storage.
///
/// We will be depending on the configurable value of `HistoryDepth` post
/// this release.
pub struct MigrateToV12<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV12<T> {
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0,
"Expected v11 before upgrading to v12"
);
if HistoryDepth::<T>::exists() {
frame_support::ensure!(
T::HistoryDepth::get() == HistoryDepth::<T>::get(),
"Provided value of HistoryDepth should be same as the existing storage value"
);
} else {
log::info!("No HistoryDepth in storage; nothing to remove");
}
Ok(Default::default())
}
fn on_runtime_upgrade() -> frame_support::weights::Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0 {
HistoryDepth::<T>::kill();
StorageVersion::<T>::put(ObsoleteReleases::V12_0_0);
log!(info, "v12 applied successfully");
T::DbWeight::get().reads_writes(1, 2)
} else {
log!(warn, "Skipping v12, should be removed");
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V12_0_0,
"v12 not applied"
);
Ok(())
}
}
}
pub mod v11 { | traits::{GetStorageVersion, PalletInfoAccess},
};
#[cfg(feature = "try-runtime")]
use sp_io::hashing::twox_128;
pub struct MigrateToV11<T, P, N>(sp_std::marker::PhantomData<(T, P, N)>);
impl<T: Config, P: GetStorageVersion + PalletInfoAccess, N: Get<&'static str>> OnRuntimeUpgrade
for MigrateToV11<T, P, N>
{
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V10_0_0,
"must upgrade linearly"
);
let old_pallet_prefix = twox_128(N::get().as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&old_pallet_prefix).is_some(),
"no data for the old pallet name has been detected"
);
Ok(Default::default())
}
/// Migrate the entire storage of this pallet to a new prefix.
///
/// This new prefix must be the same as the one set in construct_runtime. For safety, use
/// `PalletInfo` to get it, as:
/// `<Runtime as frame_system::Config>::PalletInfo::name::<VoterBagsList>`.
///
/// The migration will look into the storage version in order to avoid triggering a
/// migration on an up to date storage.
fn on_runtime_upgrade() -> Weight {
let old_pallet_name = N::get();
let new_pallet_name = <P as PalletInfoAccess>::name();
if StorageVersion::<T>::get() == ObsoleteReleases::V10_0_0 {
// bump version anyway, even if we don't need to move the prefix
StorageVersion::<T>::put(ObsoleteReleases::V11_0_0);
if new_pallet_name == old_pallet_name {
log!(
warn,
"new bags-list name is equal to the old one, only bumping the version"
);
return T::DbWeight::get().reads(1).saturating_add(T::DbWeight::get().writes(1))
}
move_pallet(old_pallet_name.as_bytes(), new_pallet_name.as_bytes());
<T as frame_system::Config>::BlockWeights::get().max_block
} else {
log!(warn, "v11::migrate should be removed.");
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0,
"wrong version after the upgrade"
);
let old_pallet_name = N::get();
let new_pallet_name = <P as PalletInfoAccess>::name();
// skip storage prefix checks for the same pallet names
if new_pallet_name == old_pallet_name {
return Ok(())
}
let old_pallet_prefix = twox_128(N::get().as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&old_pallet_prefix).is_none(),
"old pallet data hasn't been removed"
);
let new_pallet_name = <P as PalletInfoAccess>::name();
let new_pallet_prefix = twox_128(new_pallet_name.as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&new_pallet_prefix).is_some(),
"new pallet data hasn't been created"
);
Ok(())
}
}
}
pub mod v10 {
use super::*;
use frame_support::storage_alias;
#[storage_alias]
type EarliestUnappliedSlash<T: Config> = StorageValue<Pallet<T>, EraIndex>;
/// Apply any pending slashes that where queued.
///
/// That means we might slash someone a bit too early, but we will definitely
/// won't forget to slash them. The cap of 512 is somewhat randomly taken to
/// prevent us from iterating over an arbitrary large number of keys `on_runtime_upgrade`.
pub struct MigrateToV10<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV10<T> {
fn on_runtime_upgrade() -> frame_support::weights::Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V9_0_0 {
let pending_slashes = UnappliedSlashes::<T>::iter().take(512);
for (era, slashes) in pending_slashes {
for slash in slashes {
// in the old slashing scheme, the slash era was the key at which we read
// from `UnappliedSlashes`.
log!(warn, "prematurely applying a slash ({:?}) for era {:?}", slash, era);
slashing::apply_slash::<T>(slash, era);
}
}
EarliestUnappliedSlash::<T>::kill();
StorageVersion::<T>::put(ObsoleteReleases::V10_0_0);
log!(info, "MigrateToV10 executed successfully");
T::DbWeight::get().reads_writes(1, 1)
} else {
log!(warn, "MigrateToV10 should be removed.");
T::DbWeight::get().reads(1)
}
}
}
}
pub mod v9 {
use super::*;
#[cfg(feature = "try-runtime")]
use codec::{Decode, Encode};
#[cfg(feature = "try-runtime")]
use sp_std::vec::Vec;
/// Migration implementation that injects all validators into sorted list.
///
/// This is only useful for chains that started their `VoterList` just based on nominators.
pub struct InjectValidatorsIntoVoterList<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for InjectValidatorsIntoVoterList<T> {
fn on_runtime_upgrade() -> Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V8_0_0 {
let prev_count = T::VoterList::count();
let weight_of_cached = Pallet::<T>::weight_of_fn();
for (v, _) in Validators::<T>::iter() {
let weight = weight_of_cached(&v);
let _ = T::VoterList::on_insert(v.clone(), weight).map_err(|err| {
log!(warn, "failed to insert {:?} into VoterList: {:?}", v, err)
});
}
log!(
info,
"injected a total of {} new voters, prev count: {} next count: {}, updating to version 9",
Validators::<T>::count(),
prev_count,
T::VoterList::count(),
);
StorageVersion::<T>::put(ObsoleteReleases::V9_0_0);
T::BlockWeights::get().max_block
} else {
log!(
warn,
"InjectValidatorsIntoVoterList being executed on the wrong storage \
version, expected ObsoleteReleases::V8_0_0"
);
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V8_0_0,
"must upgrade linearly"
);
let prev_count = T::VoterList::count();
Ok(prev_count.encode())
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(prev_count: Vec<u8>) -> Result<(), TryRuntimeError> {
let prev_count: u32 = Decode::decode(&mut prev_count.as_slice()).expect(
"the state parameter should be something that was generated by pre_upgrade",
);
let post_count = T::VoterList::count();
let validators = Validators::<T>::count();
ensure!(
post_count == prev_count + validators,
"`VoterList` count after the migration must equal to the sum of \
previous count and the current number of validators"
);
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V9_0_0,
"must upgrade"
);
Ok(())
}
}
}
pub mod v8 {
use super::*;
use crate::{Config, Nominators, Pallet, Weight};
use frame_election_provider_support::SortedListProvider;
use frame_support::traits::Get;
#[cfg(feature = "try-runtime")]
pub fn pre_migrate<T: Config>() -> Result<(), &'static str> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V7_0_0,
"must upgrade linearly"
);
crate::log!(info, "👜 staking bags-list migration passes PRE migrate checks ✅",);
Ok(())
}
/// Migration to sorted `VoterList`.
pub fn migrate<T: Config>() -> Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V7_0_0 {
crate::log!(info, "migrating staking to ObsoleteReleases::V8_0_0");
let migrated = T::VoterList::unsafe_regenerate(
Nominators::<T>::iter().map(|(id, _)| id),
Pallet::<T>::weight_of_fn(),
);
StorageVersion::<T>::put(ObsoleteReleases::V8_0_0);
crate::log!(
info,
"👜 completed staking migration to ObsoleteReleases::V8_0_0 with {} voters migrated",
migrated,
);
T::BlockWeights::get().max_block
} else {
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
pub fn post_migrate<T: Config>() -> Result<(), &'static str> {
T::VoterList::try_state().map_err(|_| "VoterList is not in a sane state.")?;
crate::log!(info, "👜 staking bags-list migration passes POST migrate checks ✅",);
Ok(())
}
}
pub mod v7 {
use super::*;
use frame_support::storage_alias;
#[storage_alias]
type CounterForValidators<T: Config> = StorageValue<Pallet<T>, u32>;
#[storage_alias]
type CounterForNominators<T: Config> = StorageValue<Pallet<T>, u32>;
pub fn pre_migrate<T: Config>() -> Result<(), &'static str> {
assert!(
CounterForValidators::<T>::get().unwrap().is_zero(),
"CounterForValidators already set."
);
assert!(
CounterForNominators::<T>::get().unwrap().is_zero(),
"CounterForNominators already set."
);
assert!(Validators::<T>::count().is_zero(), "Validators already set.");
assert!(Nominators::<T>::count().is_zero(), "Nominators already set.");
assert!(StorageVersion::<T>::get() == ObsoleteReleases::V6_0_0);
Ok(())
}
pub fn migrate<T: Config>() -> Weight {
log!(info, "Migrating staking to ObsoleteReleases::V7_0_0");
let validator_count = Validators::<T>::iter().count() as u32;
let nominator_count = Nominators::<T>::iter().count() as u32;
CounterForValidators::<T>::put(validator_count);
CounterForNominators::<T>::put(nominator_count);
StorageVersion::<T>::put(ObsoleteReleases::V7_0_0);
log!(info, "Completed staking migration to ObsoleteReleases::V7_0_0");
T::DbWeight::get().reads_writes(validator_count.saturating_add(nominator_count).into(), 2)
}
}
pub mod v6 {
use super::*;
use frame_support::{storage_alias, traits::Get, weights::Weight};
// NOTE: value type doesn't matter, we just set it to () here.
#[storage_alias]
type SnapshotValidators<T: Config> = StorageValue<Pallet<T>, ()>;
#[storage_alias]
type SnapshotNominators<T: Config> = StorageValue<Pallet<T>, ()>;
#[storage_alias]
type QueuedElected<T: Config> = StorageValue<Pallet<T>, ()>;
#[storage_alias]
type QueuedScore<T: Config> = StorageValue<Pallet<T>, ()>;
#[storage_alias]
type EraElectionStatus<T: Config> = StorageValue<Pallet<T>, ()>;
#[storage_alias]
type IsCurrentSessionFinal<T: Config> = StorageValue<Pallet<T>, ()>;
/// check to execute prior to migration.
pub fn pre_migrate<T: Config>() -> Result<(), &'static str> {
// these may or may not exist.
log!(info, "SnapshotValidators.exits()? {:?}", SnapshotValidators::<T>::exists());
log!(info, "SnapshotNominators.exits()? {:?}", SnapshotNominators::<T>::exists());
log!(info, "QueuedElected.exits()? {:?}", QueuedElected::<T>::exists());
log!(info, "QueuedScore.exits()? {:?}", QueuedScore::<T>::exists());
// these must exist.
assert!(
IsCurrentSessionFinal::<T>::exists(),
"IsCurrentSessionFinal storage item not found!"
);
assert!(EraElectionStatus::<T>::exists(), "EraElectionStatus storage item not found!");
Ok(())
}
/// Migrate storage to v6.
pub fn migrate<T: Config>() -> Weight {
log!(info, "Migrating staking to ObsoleteReleases::V6_0_0");
SnapshotValidators::<T>::kill();
SnapshotNominators::<T>::kill();
QueuedElected::<T>::kill();
QueuedScore::<T>::kill();
EraElectionStatus::<T>::kill();
IsCurrentSessionFinal::<T>::kill();
StorageVersion::<T>::put(ObsoleteReleases::V6_0_0);
log!(info, "Done.");
T::DbWeight::get().writes(6 + 1)
}
} | use super::*;
use frame_support::{
storage::migration::move_pallet, | random_line_split |
migrations.rs | // This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
//! Storage migrations for the Staking pallet.
use super::*;
use frame_election_provider_support::SortedListProvider;
use frame_support::{
dispatch::GetStorageVersion, pallet_prelude::ValueQuery, storage_alias,
traits::OnRuntimeUpgrade,
};
#[cfg(feature = "try-runtime")]
use frame_support::ensure;
#[cfg(feature = "try-runtime")]
use sp_runtime::TryRuntimeError;
/// Used for release versioning upto v12.
///
/// Obsolete from v13. Keeping around to make encoding/decoding of old migration code easier.
#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
enum ObsoleteReleases {
V1_0_0Ancient,
V2_0_0,
V3_0_0,
V4_0_0,
V5_0_0, // blockable validators.
V6_0_0, // removal of all storage associated with offchain phragmen.
V7_0_0, // keep track of number of nominators / validators in map
V8_0_0, // populate `VoterList`.
V9_0_0, // inject validators into `VoterList` as well.
V10_0_0, // remove `EarliestUnappliedSlash`.
V11_0_0, // Move pallet storage prefix, e.g. BagsList -> VoterBagsList
V12_0_0, // remove `HistoryDepth`.
}
impl Default for ObsoleteReleases {
fn default() -> Self {
ObsoleteReleases::V12_0_0
}
}
/// Alias to the old storage item used for release versioning. Obsolete since v13.
#[storage_alias]
type StorageVersion<T: Config> = StorageValue<Pallet<T>, ObsoleteReleases, ValueQuery>;
pub mod v13 {
use super::*;
pub struct MigrateToV13<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV13<T> {
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V12_0_0,
"Required v12 before upgrading to v13"
);
Ok(Default::default())
}
fn on_runtime_upgrade() -> Weight {
let current = Pallet::<T>::current_storage_version();
let onchain = StorageVersion::<T>::get();
if current == 13 && onchain == ObsoleteReleases::V12_0_0 {
StorageVersion::<T>::kill();
current.put::<Pallet<T>>();
log!(info, "v13 applied successfully");
T::DbWeight::get().reads_writes(1, 2)
} else {
log!(warn, "Skipping v13, should be removed");
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
frame_support::ensure!(
Pallet::<T>::on_chain_storage_version() == 13,
"v13 not applied"
);
frame_support::ensure!(
!StorageVersion::<T>::exists(),
"Storage version not migrated correctly"
);
Ok(())
}
}
}
pub mod v12 {
use super::*;
use frame_support::{pallet_prelude::ValueQuery, storage_alias};
#[storage_alias]
type HistoryDepth<T: Config> = StorageValue<Pallet<T>, u32, ValueQuery>;
/// Clean up `HistoryDepth` from storage.
///
/// We will be depending on the configurable value of `HistoryDepth` post
/// this release.
pub struct | <T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV12<T> {
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0,
"Expected v11 before upgrading to v12"
);
if HistoryDepth::<T>::exists() {
frame_support::ensure!(
T::HistoryDepth::get() == HistoryDepth::<T>::get(),
"Provided value of HistoryDepth should be same as the existing storage value"
);
} else {
log::info!("No HistoryDepth in storage; nothing to remove");
}
Ok(Default::default())
}
fn on_runtime_upgrade() -> frame_support::weights::Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0 {
HistoryDepth::<T>::kill();
StorageVersion::<T>::put(ObsoleteReleases::V12_0_0);
log!(info, "v12 applied successfully");
T::DbWeight::get().reads_writes(1, 2)
} else {
log!(warn, "Skipping v12, should be removed");
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V12_0_0,
"v12 not applied"
);
Ok(())
}
}
}
pub mod v11 {
use super::*;
use frame_support::{
storage::migration::move_pallet,
traits::{GetStorageVersion, PalletInfoAccess},
};
#[cfg(feature = "try-runtime")]
use sp_io::hashing::twox_128;
pub struct MigrateToV11<T, P, N>(sp_std::marker::PhantomData<(T, P, N)>);
impl<T: Config, P: GetStorageVersion + PalletInfoAccess, N: Get<&'static str>> OnRuntimeUpgrade
for MigrateToV11<T, P, N>
{
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V10_0_0,
"must upgrade linearly"
);
let old_pallet_prefix = twox_128(N::get().as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&old_pallet_prefix).is_some(),
"no data for the old pallet name has been detected"
);
Ok(Default::default())
}
/// Migrate the entire storage of this pallet to a new prefix.
///
/// This new prefix must be the same as the one set in construct_runtime. For safety, use
/// `PalletInfo` to get it, as:
/// `<Runtime as frame_system::Config>::PalletInfo::name::<VoterBagsList>`.
///
/// The migration will look into the storage version in order to avoid triggering a
/// migration on an up to date storage.
fn on_runtime_upgrade() -> Weight {
let old_pallet_name = N::get();
let new_pallet_name = <P as PalletInfoAccess>::name();
if StorageVersion::<T>::get() == ObsoleteReleases::V10_0_0 {
// bump version anyway, even if we don't need to move the prefix
StorageVersion::<T>::put(ObsoleteReleases::V11_0_0);
if new_pallet_name == old_pallet_name {
log!(
warn,
"new bags-list name is equal to the old one, only bumping the version"
);
return T::DbWeight::get().reads(1).saturating_add(T::DbWeight::get().writes(1))
}
move_pallet(old_pallet_name.as_bytes(), new_pallet_name.as_bytes());
<T as frame_system::Config>::BlockWeights::get().max_block
} else {
log!(warn, "v11::migrate should be removed.");
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0,
"wrong version after the upgrade"
);
let old_pallet_name = N::get();
let new_pallet_name = <P as PalletInfoAccess>::name();
// skip storage prefix checks for the same pallet names
if new_pallet_name == old_pallet_name {
return Ok(())
}
let old_pallet_prefix = twox_128(N::get().as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&old_pallet_prefix).is_none(),
"old pallet data hasn't been removed"
);
let new_pallet_name = <P as PalletInfoAccess>::name();
let new_pallet_prefix = twox_128(new_pallet_name.as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&new_pallet_prefix).is_some(),
"new pallet data hasn't been created"
);
Ok(())
}
}
}
pub mod v10 {
use super::*;
use frame_support::storage_alias;
#[storage_alias]
type EarliestUnappliedSlash<T: Config> = StorageValue<Pallet<T>, EraIndex>;
/// Apply any pending slashes that where queued.
///
/// That means we might slash someone a bit too early, but we will definitely
/// won't forget to slash them. The cap of 512 is somewhat randomly taken to
/// prevent us from iterating over an arbitrary large number of keys `on_runtime_upgrade`.
pub struct MigrateToV10<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV10<T> {
fn on_runtime_upgrade() -> frame_support::weights::Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V9_0_0 {
let pending_slashes = UnappliedSlashes::<T>::iter().take(512);
for (era, slashes) in pending_slashes {
for slash in slashes {
// in the old slashing scheme, the slash era was the key at which we read
// from `UnappliedSlashes`.
log!(warn, "prematurely applying a slash ({:?}) for era {:?}", slash, era);
slashing::apply_slash::<T>(slash, era);
}
}
EarliestUnappliedSlash::<T>::kill();
StorageVersion::<T>::put(ObsoleteReleases::V10_0_0);
log!(info, "MigrateToV10 executed successfully");
T::DbWeight::get().reads_writes(1, 1)
} else {
log!(warn, "MigrateToV10 should be removed.");
T::DbWeight::get().reads(1)
}
}
}
}
pub mod v9 {
use super::*;
#[cfg(feature = "try-runtime")]
use codec::{Decode, Encode};
#[cfg(feature = "try-runtime")]
use sp_std::vec::Vec;
/// Migration implementation that injects all validators into sorted list.
///
/// This is only useful for chains that started their `VoterList` just based on nominators.
pub struct InjectValidatorsIntoVoterList<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for InjectValidatorsIntoVoterList<T> {
fn on_runtime_upgrade() -> Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V8_0_0 {
let prev_count = T::VoterList::count();
let weight_of_cached = Pallet::<T>::weight_of_fn();
for (v, _) in Validators::<T>::iter() {
let weight = weight_of_cached(&v);
let _ = T::VoterList::on_insert(v.clone(), weight).map_err(|err| {
log!(warn, "failed to insert {:?} into VoterList: {:?}", v, err)
});
}
log!(
info,
"injected a total of {} new voters, prev count: {} next count: {}, updating to version 9",
Validators::<T>::count(),
prev_count,
T::VoterList::count(),
);
StorageVersion::<T>::put(ObsoleteReleases::V9_0_0);
T::BlockWeights::get().max_block
} else {
log!(
warn,
"InjectValidatorsIntoVoterList being executed on the wrong storage \
version, expected ObsoleteReleases::V8_0_0"
);
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V8_0_0,
"must upgrade linearly"
);
let prev_count = T::VoterList::count();
Ok(prev_count.encode())
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(prev_count: Vec<u8>) -> Result<(), TryRuntimeError> {
let prev_count: u32 = Decode::decode(&mut prev_count.as_slice()).expect(
"the state parameter should be something that was generated by pre_upgrade",
);
let post_count = T::VoterList::count();
let validators = Validators::<T>::count();
ensure!(
post_count == prev_count + validators,
"`VoterList` count after the migration must equal to the sum of \
previous count and the current number of validators"
);
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V9_0_0,
"must upgrade"
);
Ok(())
}
}
}
pub mod v8 {
use super::*;
use crate::{Config, Nominators, Pallet, Weight};
use frame_election_provider_support::SortedListProvider;
use frame_support::traits::Get;
#[cfg(feature = "try-runtime")]
pub fn pre_migrate<T: Config>() -> Result<(), &'static str> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V7_0_0,
"must upgrade linearly"
);
crate::log!(info, "👜 staking bags-list migration passes PRE migrate checks ✅",);
Ok(())
}
/// Migration to sorted `VoterList`.
pub fn migrate<T: Config>() -> Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V7_0_0 {
crate::log!(info, "migrating staking to ObsoleteReleases::V8_0_0");
let migrated = T::VoterList::unsafe_regenerate(
Nominators::<T>::iter().map(|(id, _)| id),
Pallet::<T>::weight_of_fn(),
);
StorageVersion::<T>::put(ObsoleteReleases::V8_0_0);
crate::log!(
info,
"👜 completed staking migration to ObsoleteReleases::V8_0_0 with {} voters migrated",
migrated,
);
T::BlockWeights::get().max_block
} else {
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
pub fn post_migrate<T: Config>() -> Result<(), &'static str> {
T::VoterList::try_state().map_err(|_| "VoterList is not in a sane state.")?;
crate::log!(info, "👜 staking bags-list migration passes POST migrate checks ✅",);
Ok(())
}
}
pub mod v7 {
use super::*;
use frame_support::storage_alias;
#[storage_alias]
type CounterForValidators<T: Config> = StorageValue<Pallet<T>, u32>;
#[storage_alias]
type CounterForNominators<T: Config> = StorageValue<Pallet<T>, u32>;
pub fn pre_migrate<T: Config>() -> Result<(), &'static str> {
assert!(
CounterForValidators::<T>::get().unwrap().is_zero(),
"CounterForValidators already set."
);
assert!(
CounterForNominators::<T>::get().unwrap().is_zero(),
"CounterForNominators already set."
);
assert!(Validators::<T>::count().is_zero(), "Validators already set.");
assert!(Nominators::<T>::count().is_zero(), "Nominators already set.");
assert!(StorageVersion::<T>::get() == ObsoleteReleases::V6_0_0);
Ok(())
}
pub fn migrate<T: Config>() -> Weight {
log!(info, "Migrating staking to ObsoleteReleases::V7_0_0");
let validator_count = Validators::<T>::iter().count() as u32;
let nominator_count = Nominators::<T>::iter().count() as u32;
CounterForValidators::<T>::put(validator_count);
CounterForNominators::<T>::put(nominator_count);
StorageVersion::<T>::put(ObsoleteReleases::V7_0_0);
log!(info, "Completed staking migration to ObsoleteReleases::V7_0_0");
T::DbWeight::get().reads_writes(validator_count.saturating_add(nominator_count).into(), 2)
}
}
pub mod v6 {
use super::*;
use frame_support::{storage_alias, traits::Get, weights::Weight};
// NOTE: value type doesn't matter, we just set it to () here.
#[storage_alias]
type SnapshotValidators<T: Config> = StorageValue<Pallet<T>, ()>;
#[storage_alias]
type SnapshotNominators<T: Config> = StorageValue<Pallet<T>, ()>;
#[storage_alias]
type QueuedElected<T: Config> = StorageValue<Pallet<T>, ()>;
#[storage_alias]
type QueuedScore<T: Config> = StorageValue<Pallet<T>, ()>;
#[storage_alias]
type EraElectionStatus<T: Config> = StorageValue<Pallet<T>, ()>;
#[storage_alias]
type IsCurrentSessionFinal<T: Config> = StorageValue<Pallet<T>, ()>;
/// check to execute prior to migration.
pub fn pre_migrate<T: Config>() -> Result<(), &'static str> {
// these may or may not exist.
log!(info, "SnapshotValidators.exits()? {:?}", SnapshotValidators::<T>::exists());
log!(info, "SnapshotNominators.exits()? {:?}", SnapshotNominators::<T>::exists());
log!(info, "QueuedElected.exits()? {:?}", QueuedElected::<T>::exists());
log!(info, "QueuedScore.exits()? {:?}", QueuedScore::<T>::exists());
// these must exist.
assert!(
IsCurrentSessionFinal::<T>::exists(),
"IsCurrentSessionFinal storage item not found!"
);
assert!(EraElectionStatus::<T>::exists(), "EraElectionStatus storage item not found!");
Ok(())
}
/// Migrate storage to v6.
pub fn migrate<T: Config>() -> Weight {
log!(info, "Migrating staking to ObsoleteReleases::V6_0_0");
SnapshotValidators::<T>::kill();
SnapshotNominators::<T>::kill();
QueuedElected::<T>::kill();
QueuedScore::<T>::kill();
EraElectionStatus::<T>::kill();
IsCurrentSessionFinal::<T>::kill();
StorageVersion::<T>::put(ObsoleteReleases::V6_0_0);
log!(info, "Done.");
T::DbWeight::get().writes(6 + 1)
}
}
| MigrateToV12 | identifier_name |
migrations.rs | // This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
//! Storage migrations for the Staking pallet.
use super::*;
use frame_election_provider_support::SortedListProvider;
use frame_support::{
dispatch::GetStorageVersion, pallet_prelude::ValueQuery, storage_alias,
traits::OnRuntimeUpgrade,
};
#[cfg(feature = "try-runtime")]
use frame_support::ensure;
#[cfg(feature = "try-runtime")]
use sp_runtime::TryRuntimeError;
/// Used for release versioning upto v12.
///
/// Obsolete from v13. Keeping around to make encoding/decoding of old migration code easier.
#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
enum ObsoleteReleases {
V1_0_0Ancient,
V2_0_0,
V3_0_0,
V4_0_0,
V5_0_0, // blockable validators.
V6_0_0, // removal of all storage associated with offchain phragmen.
V7_0_0, // keep track of number of nominators / validators in map
V8_0_0, // populate `VoterList`.
V9_0_0, // inject validators into `VoterList` as well.
V10_0_0, // remove `EarliestUnappliedSlash`.
V11_0_0, // Move pallet storage prefix, e.g. BagsList -> VoterBagsList
V12_0_0, // remove `HistoryDepth`.
}
impl Default for ObsoleteReleases {
fn default() -> Self {
ObsoleteReleases::V12_0_0
}
}
/// Alias to the old storage item used for release versioning. Obsolete since v13.
#[storage_alias]
type StorageVersion<T: Config> = StorageValue<Pallet<T>, ObsoleteReleases, ValueQuery>;
pub mod v13 {
use super::*;
pub struct MigrateToV13<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV13<T> {
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V12_0_0,
"Required v12 before upgrading to v13"
);
Ok(Default::default())
}
fn on_runtime_upgrade() -> Weight {
let current = Pallet::<T>::current_storage_version();
let onchain = StorageVersion::<T>::get();
if current == 13 && onchain == ObsoleteReleases::V12_0_0 {
StorageVersion::<T>::kill();
current.put::<Pallet<T>>();
log!(info, "v13 applied successfully");
T::DbWeight::get().reads_writes(1, 2)
} else {
log!(warn, "Skipping v13, should be removed");
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
frame_support::ensure!(
Pallet::<T>::on_chain_storage_version() == 13,
"v13 not applied"
);
frame_support::ensure!(
!StorageVersion::<T>::exists(),
"Storage version not migrated correctly"
);
Ok(())
}
}
}
pub mod v12 {
use super::*;
use frame_support::{pallet_prelude::ValueQuery, storage_alias};
#[storage_alias]
type HistoryDepth<T: Config> = StorageValue<Pallet<T>, u32, ValueQuery>;
/// Clean up `HistoryDepth` from storage.
///
/// We will be depending on the configurable value of `HistoryDepth` post
/// this release.
pub struct MigrateToV12<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV12<T> {
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0,
"Expected v11 before upgrading to v12"
);
if HistoryDepth::<T>::exists() {
frame_support::ensure!(
T::HistoryDepth::get() == HistoryDepth::<T>::get(),
"Provided value of HistoryDepth should be same as the existing storage value"
);
} else {
log::info!("No HistoryDepth in storage; nothing to remove");
}
Ok(Default::default())
}
fn on_runtime_upgrade() -> frame_support::weights::Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0 {
HistoryDepth::<T>::kill();
StorageVersion::<T>::put(ObsoleteReleases::V12_0_0);
log!(info, "v12 applied successfully");
T::DbWeight::get().reads_writes(1, 2)
} else {
log!(warn, "Skipping v12, should be removed");
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V12_0_0,
"v12 not applied"
);
Ok(())
}
}
}
pub mod v11 {
use super::*;
use frame_support::{
storage::migration::move_pallet,
traits::{GetStorageVersion, PalletInfoAccess},
};
#[cfg(feature = "try-runtime")]
use sp_io::hashing::twox_128;
pub struct MigrateToV11<T, P, N>(sp_std::marker::PhantomData<(T, P, N)>);
impl<T: Config, P: GetStorageVersion + PalletInfoAccess, N: Get<&'static str>> OnRuntimeUpgrade
for MigrateToV11<T, P, N>
{
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V10_0_0,
"must upgrade linearly"
);
let old_pallet_prefix = twox_128(N::get().as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&old_pallet_prefix).is_some(),
"no data for the old pallet name has been detected"
);
Ok(Default::default())
}
/// Migrate the entire storage of this pallet to a new prefix.
///
/// This new prefix must be the same as the one set in construct_runtime. For safety, use
/// `PalletInfo` to get it, as:
/// `<Runtime as frame_system::Config>::PalletInfo::name::<VoterBagsList>`.
///
/// The migration will look into the storage version in order to avoid triggering a
/// migration on an up to date storage.
fn on_runtime_upgrade() -> Weight {
let old_pallet_name = N::get();
let new_pallet_name = <P as PalletInfoAccess>::name();
if StorageVersion::<T>::get() == ObsoleteReleases::V10_0_0 {
// bump version anyway, even if we don't need to move the prefix
StorageVersion::<T>::put(ObsoleteReleases::V11_0_0);
if new_pallet_name == old_pallet_name {
log!(
warn,
"new bags-list name is equal to the old one, only bumping the version"
);
return T::DbWeight::get().reads(1).saturating_add(T::DbWeight::get().writes(1))
}
move_pallet(old_pallet_name.as_bytes(), new_pallet_name.as_bytes());
<T as frame_system::Config>::BlockWeights::get().max_block
} else {
log!(warn, "v11::migrate should be removed.");
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0,
"wrong version after the upgrade"
);
let old_pallet_name = N::get();
let new_pallet_name = <P as PalletInfoAccess>::name();
// skip storage prefix checks for the same pallet names
if new_pallet_name == old_pallet_name {
return Ok(())
}
let old_pallet_prefix = twox_128(N::get().as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&old_pallet_prefix).is_none(),
"old pallet data hasn't been removed"
);
let new_pallet_name = <P as PalletInfoAccess>::name();
let new_pallet_prefix = twox_128(new_pallet_name.as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&new_pallet_prefix).is_some(),
"new pallet data hasn't been created"
);
Ok(())
}
}
}
pub mod v10 {
use super::*;
use frame_support::storage_alias;
#[storage_alias]
type EarliestUnappliedSlash<T: Config> = StorageValue<Pallet<T>, EraIndex>;
/// Apply any pending slashes that where queued.
///
/// That means we might slash someone a bit too early, but we will definitely
/// won't forget to slash them. The cap of 512 is somewhat randomly taken to
/// prevent us from iterating over an arbitrary large number of keys `on_runtime_upgrade`.
pub struct MigrateToV10<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV10<T> {
fn on_runtime_upgrade() -> frame_support::weights::Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V9_0_0 {
let pending_slashes = UnappliedSlashes::<T>::iter().take(512);
for (era, slashes) in pending_slashes {
for slash in slashes {
// in the old slashing scheme, the slash era was the key at which we read
// from `UnappliedSlashes`.
log!(warn, "prematurely applying a slash ({:?}) for era {:?}", slash, era);
slashing::apply_slash::<T>(slash, era);
}
}
EarliestUnappliedSlash::<T>::kill();
StorageVersion::<T>::put(ObsoleteReleases::V10_0_0);
log!(info, "MigrateToV10 executed successfully");
T::DbWeight::get().reads_writes(1, 1)
} else {
log!(warn, "MigrateToV10 should be removed.");
T::DbWeight::get().reads(1)
}
}
}
}
pub mod v9 {
use super::*;
#[cfg(feature = "try-runtime")]
use codec::{Decode, Encode};
#[cfg(feature = "try-runtime")]
use sp_std::vec::Vec;
/// Migration implementation that injects all validators into sorted list.
///
/// This is only useful for chains that started their `VoterList` just based on nominators.
pub struct InjectValidatorsIntoVoterList<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for InjectValidatorsIntoVoterList<T> {
fn on_runtime_upgrade() -> Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V8_0_0 | }
else {
log!(
warn,
"InjectValidatorsIntoVoterList being executed on the wrong storage \
version, expected ObsoleteReleases::V8_0_0"
);
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V8_0_0,
"must upgrade linearly"
);
let prev_count = T::VoterList::count();
Ok(prev_count.encode())
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(prev_count: Vec<u8>) -> Result<(), TryRuntimeError> {
let prev_count: u32 = Decode::decode(&mut prev_count.as_slice()).expect(
"the state parameter should be something that was generated by pre_upgrade",
);
let post_count = T::VoterList::count();
let validators = Validators::<T>::count();
ensure!(
post_count == prev_count + validators,
"`VoterList` count after the migration must equal to the sum of \
previous count and the current number of validators"
);
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V9_0_0,
"must upgrade"
);
Ok(())
}
}
}
pub mod v8 {
use super::*;
use crate::{Config, Nominators, Pallet, Weight};
use frame_election_provider_support::SortedListProvider;
use frame_support::traits::Get;
#[cfg(feature = "try-runtime")]
pub fn pre_migrate<T: Config>() -> Result<(), &'static str> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V7_0_0,
"must upgrade linearly"
);
crate::log!(info, "👜 staking bags-list migration passes PRE migrate checks ✅",);
Ok(())
}
/// Migration to sorted `VoterList`.
pub fn migrate<T: Config>() -> Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V7_0_0 {
crate::log!(info, "migrating staking to ObsoleteReleases::V8_0_0");
let migrated = T::VoterList::unsafe_regenerate(
Nominators::<T>::iter().map(|(id, _)| id),
Pallet::<T>::weight_of_fn(),
);
StorageVersion::<T>::put(ObsoleteReleases::V8_0_0);
crate::log!(
info,
"👜 completed staking migration to ObsoleteReleases::V8_0_0 with {} voters migrated",
migrated,
);
T::BlockWeights::get().max_block
} else {
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
pub fn post_migrate<T: Config>() -> Result<(), &'static str> {
T::VoterList::try_state().map_err(|_| "VoterList is not in a sane state.")?;
crate::log!(info, "👜 staking bags-list migration passes POST migrate checks ✅",);
Ok(())
}
}
pub mod v7 {
use super::*;
use frame_support::storage_alias;
#[storage_alias]
type CounterForValidators<T: Config> = StorageValue<Pallet<T>, u32>;
#[storage_alias]
type CounterForNominators<T: Config> = StorageValue<Pallet<T>, u32>;
pub fn pre_migrate<T: Config>() -> Result<(), &'static str> {
assert!(
CounterForValidators::<T>::get().unwrap().is_zero(),
"CounterForValidators already set."
);
assert!(
CounterForNominators::<T>::get().unwrap().is_zero(),
"CounterForNominators already set."
);
assert!(Validators::<T>::count().is_zero(), "Validators already set.");
assert!(Nominators::<T>::count().is_zero(), "Nominators already set.");
assert!(StorageVersion::<T>::get() == ObsoleteReleases::V6_0_0);
Ok(())
}
pub fn migrate<T: Config>() -> Weight {
log!(info, "Migrating staking to ObsoleteReleases::V7_0_0");
let validator_count = Validators::<T>::iter().count() as u32;
let nominator_count = Nominators::<T>::iter().count() as u32;
CounterForValidators::<T>::put(validator_count);
CounterForNominators::<T>::put(nominator_count);
StorageVersion::<T>::put(ObsoleteReleases::V7_0_0);
log!(info, "Completed staking migration to ObsoleteReleases::V7_0_0");
T::DbWeight::get().reads_writes(validator_count.saturating_add(nominator_count).into(), 2)
}
}
pub mod v6 {
use super::*;
use frame_support::{storage_alias, traits::Get, weights::Weight};
// NOTE: value type doesn't matter, we just set it to () here.
#[storage_alias]
type SnapshotValidators<T: Config> = StorageValue<Pallet<T>, ()>;
#[storage_alias]
type SnapshotNominators<T: Config> = StorageValue<Pallet<T>, ()>;
#[storage_alias]
type QueuedElected<T: Config> = StorageValue<Pallet<T>, ()>;
#[storage_alias]
type QueuedScore<T: Config> = StorageValue<Pallet<T>, ()>;
#[storage_alias]
type EraElectionStatus<T: Config> = StorageValue<Pallet<T>, ()>;
#[storage_alias]
type IsCurrentSessionFinal<T: Config> = StorageValue<Pallet<T>, ()>;
/// check to execute prior to migration.
pub fn pre_migrate<T: Config>() -> Result<(), &'static str> {
// these may or may not exist.
log!(info, "SnapshotValidators.exits()? {:?}", SnapshotValidators::<T>::exists());
log!(info, "SnapshotNominators.exits()? {:?}", SnapshotNominators::<T>::exists());
log!(info, "QueuedElected.exits()? {:?}", QueuedElected::<T>::exists());
log!(info, "QueuedScore.exits()? {:?}", QueuedScore::<T>::exists());
// these must exist.
assert!(
IsCurrentSessionFinal::<T>::exists(),
"IsCurrentSessionFinal storage item not found!"
);
assert!(EraElectionStatus::<T>::exists(), "EraElectionStatus storage item not found!");
Ok(())
}
/// Migrate storage to v6.
pub fn migrate<T: Config>() -> Weight {
log!(info, "Migrating staking to ObsoleteReleases::V6_0_0");
SnapshotValidators::<T>::kill();
SnapshotNominators::<T>::kill();
QueuedElected::<T>::kill();
QueuedScore::<T>::kill();
EraElectionStatus::<T>::kill();
IsCurrentSessionFinal::<T>::kill();
StorageVersion::<T>::put(ObsoleteReleases::V6_0_0);
log!(info, "Done.");
T::DbWeight::get().writes(6 + 1)
}
}
| {
let prev_count = T::VoterList::count();
let weight_of_cached = Pallet::<T>::weight_of_fn();
for (v, _) in Validators::<T>::iter() {
let weight = weight_of_cached(&v);
let _ = T::VoterList::on_insert(v.clone(), weight).map_err(|err| {
log!(warn, "failed to insert {:?} into VoterList: {:?}", v, err)
});
}
log!(
info,
"injected a total of {} new voters, prev count: {} next count: {}, updating to version 9",
Validators::<T>::count(),
prev_count,
T::VoterList::count(),
);
StorageVersion::<T>::put(ObsoleteReleases::V9_0_0);
T::BlockWeights::get().max_block | conditional_block |
migrations.rs | // This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
//! Storage migrations for the Staking pallet.
use super::*;
use frame_election_provider_support::SortedListProvider;
use frame_support::{
dispatch::GetStorageVersion, pallet_prelude::ValueQuery, storage_alias,
traits::OnRuntimeUpgrade,
};
#[cfg(feature = "try-runtime")]
use frame_support::ensure;
#[cfg(feature = "try-runtime")]
use sp_runtime::TryRuntimeError;
/// Used for release versioning upto v12.
///
/// Obsolete from v13. Keeping around to make encoding/decoding of old migration code easier.
#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
enum ObsoleteReleases {
V1_0_0Ancient,
V2_0_0,
V3_0_0,
V4_0_0,
V5_0_0, // blockable validators.
V6_0_0, // removal of all storage associated with offchain phragmen.
V7_0_0, // keep track of number of nominators / validators in map
V8_0_0, // populate `VoterList`.
V9_0_0, // inject validators into `VoterList` as well.
V10_0_0, // remove `EarliestUnappliedSlash`.
V11_0_0, // Move pallet storage prefix, e.g. BagsList -> VoterBagsList
V12_0_0, // remove `HistoryDepth`.
}
impl Default for ObsoleteReleases {
fn default() -> Self {
ObsoleteReleases::V12_0_0
}
}
/// Alias to the old storage item used for release versioning. Obsolete since v13.
#[storage_alias]
type StorageVersion<T: Config> = StorageValue<Pallet<T>, ObsoleteReleases, ValueQuery>;
pub mod v13 {
use super::*;
pub struct MigrateToV13<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV13<T> {
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> |
fn on_runtime_upgrade() -> Weight {
let current = Pallet::<T>::current_storage_version();
let onchain = StorageVersion::<T>::get();
if current == 13 && onchain == ObsoleteReleases::V12_0_0 {
StorageVersion::<T>::kill();
current.put::<Pallet<T>>();
log!(info, "v13 applied successfully");
T::DbWeight::get().reads_writes(1, 2)
} else {
log!(warn, "Skipping v13, should be removed");
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
frame_support::ensure!(
Pallet::<T>::on_chain_storage_version() == 13,
"v13 not applied"
);
frame_support::ensure!(
!StorageVersion::<T>::exists(),
"Storage version not migrated correctly"
);
Ok(())
}
}
}
pub mod v12 {
use super::*;
use frame_support::{pallet_prelude::ValueQuery, storage_alias};
#[storage_alias]
type HistoryDepth<T: Config> = StorageValue<Pallet<T>, u32, ValueQuery>;
/// Clean up `HistoryDepth` from storage.
///
/// We will be depending on the configurable value of `HistoryDepth` post
/// this release.
pub struct MigrateToV12<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV12<T> {
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0,
"Expected v11 before upgrading to v12"
);
if HistoryDepth::<T>::exists() {
frame_support::ensure!(
T::HistoryDepth::get() == HistoryDepth::<T>::get(),
"Provided value of HistoryDepth should be same as the existing storage value"
);
} else {
log::info!("No HistoryDepth in storage; nothing to remove");
}
Ok(Default::default())
}
fn on_runtime_upgrade() -> frame_support::weights::Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0 {
HistoryDepth::<T>::kill();
StorageVersion::<T>::put(ObsoleteReleases::V12_0_0);
log!(info, "v12 applied successfully");
T::DbWeight::get().reads_writes(1, 2)
} else {
log!(warn, "Skipping v12, should be removed");
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V12_0_0,
"v12 not applied"
);
Ok(())
}
}
}
pub mod v11 {
use super::*;
use frame_support::{
storage::migration::move_pallet,
traits::{GetStorageVersion, PalletInfoAccess},
};
#[cfg(feature = "try-runtime")]
use sp_io::hashing::twox_128;
pub struct MigrateToV11<T, P, N>(sp_std::marker::PhantomData<(T, P, N)>);
impl<T: Config, P: GetStorageVersion + PalletInfoAccess, N: Get<&'static str>> OnRuntimeUpgrade
for MigrateToV11<T, P, N>
{
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V10_0_0,
"must upgrade linearly"
);
let old_pallet_prefix = twox_128(N::get().as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&old_pallet_prefix).is_some(),
"no data for the old pallet name has been detected"
);
Ok(Default::default())
}
/// Migrate the entire storage of this pallet to a new prefix.
///
/// This new prefix must be the same as the one set in construct_runtime. For safety, use
/// `PalletInfo` to get it, as:
/// `<Runtime as frame_system::Config>::PalletInfo::name::<VoterBagsList>`.
///
/// The migration will look into the storage version in order to avoid triggering a
/// migration on an up to date storage.
fn on_runtime_upgrade() -> Weight {
let old_pallet_name = N::get();
let new_pallet_name = <P as PalletInfoAccess>::name();
if StorageVersion::<T>::get() == ObsoleteReleases::V10_0_0 {
// bump version anyway, even if we don't need to move the prefix
StorageVersion::<T>::put(ObsoleteReleases::V11_0_0);
if new_pallet_name == old_pallet_name {
log!(
warn,
"new bags-list name is equal to the old one, only bumping the version"
);
return T::DbWeight::get().reads(1).saturating_add(T::DbWeight::get().writes(1))
}
move_pallet(old_pallet_name.as_bytes(), new_pallet_name.as_bytes());
<T as frame_system::Config>::BlockWeights::get().max_block
} else {
log!(warn, "v11::migrate should be removed.");
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V11_0_0,
"wrong version after the upgrade"
);
let old_pallet_name = N::get();
let new_pallet_name = <P as PalletInfoAccess>::name();
// skip storage prefix checks for the same pallet names
if new_pallet_name == old_pallet_name {
return Ok(())
}
let old_pallet_prefix = twox_128(N::get().as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&old_pallet_prefix).is_none(),
"old pallet data hasn't been removed"
);
let new_pallet_name = <P as PalletInfoAccess>::name();
let new_pallet_prefix = twox_128(new_pallet_name.as_bytes());
frame_support::ensure!(
sp_io::storage::next_key(&new_pallet_prefix).is_some(),
"new pallet data hasn't been created"
);
Ok(())
}
}
}
pub mod v10 {
use super::*;
use frame_support::storage_alias;
#[storage_alias]
type EarliestUnappliedSlash<T: Config> = StorageValue<Pallet<T>, EraIndex>;
/// Apply any pending slashes that where queued.
///
/// That means we might slash someone a bit too early, but we will definitely
/// won't forget to slash them. The cap of 512 is somewhat randomly taken to
/// prevent us from iterating over an arbitrary large number of keys `on_runtime_upgrade`.
pub struct MigrateToV10<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV10<T> {
fn on_runtime_upgrade() -> frame_support::weights::Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V9_0_0 {
let pending_slashes = UnappliedSlashes::<T>::iter().take(512);
for (era, slashes) in pending_slashes {
for slash in slashes {
// in the old slashing scheme, the slash era was the key at which we read
// from `UnappliedSlashes`.
log!(warn, "prematurely applying a slash ({:?}) for era {:?}", slash, era);
slashing::apply_slash::<T>(slash, era);
}
}
EarliestUnappliedSlash::<T>::kill();
StorageVersion::<T>::put(ObsoleteReleases::V10_0_0);
log!(info, "MigrateToV10 executed successfully");
T::DbWeight::get().reads_writes(1, 1)
} else {
log!(warn, "MigrateToV10 should be removed.");
T::DbWeight::get().reads(1)
}
}
}
}
pub mod v9 {
use super::*;
#[cfg(feature = "try-runtime")]
use codec::{Decode, Encode};
#[cfg(feature = "try-runtime")]
use sp_std::vec::Vec;
/// Migration implementation that injects all validators into sorted list.
///
/// This is only useful for chains that started their `VoterList` just based on nominators.
pub struct InjectValidatorsIntoVoterList<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for InjectValidatorsIntoVoterList<T> {
fn on_runtime_upgrade() -> Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V8_0_0 {
let prev_count = T::VoterList::count();
let weight_of_cached = Pallet::<T>::weight_of_fn();
for (v, _) in Validators::<T>::iter() {
let weight = weight_of_cached(&v);
let _ = T::VoterList::on_insert(v.clone(), weight).map_err(|err| {
log!(warn, "failed to insert {:?} into VoterList: {:?}", v, err)
});
}
log!(
info,
"injected a total of {} new voters, prev count: {} next count: {}, updating to version 9",
Validators::<T>::count(),
prev_count,
T::VoterList::count(),
);
StorageVersion::<T>::put(ObsoleteReleases::V9_0_0);
T::BlockWeights::get().max_block
} else {
log!(
warn,
"InjectValidatorsIntoVoterList being executed on the wrong storage \
version, expected ObsoleteReleases::V8_0_0"
);
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V8_0_0,
"must upgrade linearly"
);
let prev_count = T::VoterList::count();
Ok(prev_count.encode())
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(prev_count: Vec<u8>) -> Result<(), TryRuntimeError> {
let prev_count: u32 = Decode::decode(&mut prev_count.as_slice()).expect(
"the state parameter should be something that was generated by pre_upgrade",
);
let post_count = T::VoterList::count();
let validators = Validators::<T>::count();
ensure!(
post_count == prev_count + validators,
"`VoterList` count after the migration must equal to the sum of \
previous count and the current number of validators"
);
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V9_0_0,
"must upgrade"
);
Ok(())
}
}
}
pub mod v8 {
use super::*;
use crate::{Config, Nominators, Pallet, Weight};
use frame_election_provider_support::SortedListProvider;
use frame_support::traits::Get;
#[cfg(feature = "try-runtime")]
pub fn pre_migrate<T: Config>() -> Result<(), &'static str> {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V7_0_0,
"must upgrade linearly"
);
crate::log!(info, "👜 staking bags-list migration passes PRE migrate checks ✅",);
Ok(())
}
/// Migration to sorted `VoterList`.
pub fn migrate<T: Config>() -> Weight {
if StorageVersion::<T>::get() == ObsoleteReleases::V7_0_0 {
crate::log!(info, "migrating staking to ObsoleteReleases::V8_0_0");
let migrated = T::VoterList::unsafe_regenerate(
Nominators::<T>::iter().map(|(id, _)| id),
Pallet::<T>::weight_of_fn(),
);
StorageVersion::<T>::put(ObsoleteReleases::V8_0_0);
crate::log!(
info,
"👜 completed staking migration to ObsoleteReleases::V8_0_0 with {} voters migrated",
migrated,
);
T::BlockWeights::get().max_block
} else {
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
pub fn post_migrate<T: Config>() -> Result<(), &'static str> {
T::VoterList::try_state().map_err(|_| "VoterList is not in a sane state.")?;
crate::log!(info, "👜 staking bags-list migration passes POST migrate checks ✅",);
Ok(())
}
}
pub mod v7 {
use super::*;
use frame_support::storage_alias;
#[storage_alias]
type CounterForValidators<T: Config> = StorageValue<Pallet<T>, u32>;
#[storage_alias]
type CounterForNominators<T: Config> = StorageValue<Pallet<T>, u32>;
pub fn pre_migrate<T: Config>() -> Result<(), &'static str> {
assert!(
CounterForValidators::<T>::get().unwrap().is_zero(),
"CounterForValidators already set."
);
assert!(
CounterForNominators::<T>::get().unwrap().is_zero(),
"CounterForNominators already set."
);
assert!(Validators::<T>::count().is_zero(), "Validators already set.");
assert!(Nominators::<T>::count().is_zero(), "Nominators already set.");
assert!(StorageVersion::<T>::get() == ObsoleteReleases::V6_0_0);
Ok(())
}
pub fn migrate<T: Config>() -> Weight {
log!(info, "Migrating staking to ObsoleteReleases::V7_0_0");
let validator_count = Validators::<T>::iter().count() as u32;
let nominator_count = Nominators::<T>::iter().count() as u32;
CounterForValidators::<T>::put(validator_count);
CounterForNominators::<T>::put(nominator_count);
StorageVersion::<T>::put(ObsoleteReleases::V7_0_0);
log!(info, "Completed staking migration to ObsoleteReleases::V7_0_0");
T::DbWeight::get().reads_writes(validator_count.saturating_add(nominator_count).into(), 2)
}
}
pub mod v6 {
use super::*;
use frame_support::{storage_alias, traits::Get, weights::Weight};
// NOTE: value type doesn't matter, we just set it to () here.
#[storage_alias]
type SnapshotValidators<T: Config> = StorageValue<Pallet<T>, ()>;
#[storage_alias]
type SnapshotNominators<T: Config> = StorageValue<Pallet<T>, ()>;
#[storage_alias]
type QueuedElected<T: Config> = StorageValue<Pallet<T>, ()>;
#[storage_alias]
type QueuedScore<T: Config> = StorageValue<Pallet<T>, ()>;
#[storage_alias]
type EraElectionStatus<T: Config> = StorageValue<Pallet<T>, ()>;
#[storage_alias]
type IsCurrentSessionFinal<T: Config> = StorageValue<Pallet<T>, ()>;
/// check to execute prior to migration.
pub fn pre_migrate<T: Config>() -> Result<(), &'static str> {
// these may or may not exist.
log!(info, "SnapshotValidators.exits()? {:?}", SnapshotValidators::<T>::exists());
log!(info, "SnapshotNominators.exits()? {:?}", SnapshotNominators::<T>::exists());
log!(info, "QueuedElected.exits()? {:?}", QueuedElected::<T>::exists());
log!(info, "QueuedScore.exits()? {:?}", QueuedScore::<T>::exists());
// these must exist.
assert!(
IsCurrentSessionFinal::<T>::exists(),
"IsCurrentSessionFinal storage item not found!"
);
assert!(EraElectionStatus::<T>::exists(), "EraElectionStatus storage item not found!");
Ok(())
}
/// Migrate storage to v6.
pub fn migrate<T: Config>() -> Weight {
log!(info, "Migrating staking to ObsoleteReleases::V6_0_0");
SnapshotValidators::<T>::kill();
SnapshotNominators::<T>::kill();
QueuedElected::<T>::kill();
QueuedScore::<T>::kill();
EraElectionStatus::<T>::kill();
IsCurrentSessionFinal::<T>::kill();
StorageVersion::<T>::put(ObsoleteReleases::V6_0_0);
log!(info, "Done.");
T::DbWeight::get().writes(6 + 1)
}
}
| {
frame_support::ensure!(
StorageVersion::<T>::get() == ObsoleteReleases::V12_0_0,
"Required v12 before upgrading to v13"
);
Ok(Default::default())
} | identifier_body |
day20.rs | #![allow(dead_code)]
use std::collections::HashMap;
mod lib;
// Width (and height) of each tile.
const SIZE: usize = 10;
// One row/side of each tile.
type Line = u16;
#[derive(PartialEq, Eq, Debug, Clone, Hash, Default)]
struct Tile {
id: usize,
rows: Vec<Line>,
flipped: bool,
rotated: u32,
}
fn bit_to_char(input: bool) -> char {
if input {
'#'
} else {
'.'
}
}
// Converts a single line to a "#.#.#...." string.
fn line_to_string(input: Line) -> String {
(0..SIZE)
.rev()
.map(|i| bit_to_char(((input >> i) & 1) == 1))
.collect()
}
fn line_to_string_u128(input: u128, size: usize) -> String {
(0..size)
.rev()
.map(|i| bit_to_char(((input >> i) & 1) == 1))
.collect()
}
fn invert(input: Line) -> Line {
(0..SIZE).fold(0, |out, i| (out << 1) | ((input >> i) & 1))
}
fn parse_line(line: &str) -> u128 {
let mut out = 0;
for c in line.chars() {
out = (out << 1)
| match c {
'.' => 0,
'#' => 1,
_ => {
panic!("invalid char: {}", c);
}
};
}
out
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_line_to_string() {
assert_eq!(line_to_string(1), ".........#");
assert_eq!(line_to_string(391), ".##....###");
}
#[test]
fn test_invert() {
assert_eq!(invert(1), 512);
assert_eq!(invert(512), 1);
assert_eq!(invert(2), 256);
}
}
impl std::fmt::Display for Tile {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut first = true;
for line in self.rows.iter() {
if first {
first = false;
} else {
f.write_str("\n")?;
}
f.write_str(&line_to_string(*line))?;
}
Ok(())
}
}
impl Tile {
fn parse(input: &str) -> Tile {
let mut lines = input.lines();
let id = Tile::parse_id(lines.next().unwrap());
let rows = lines.map(|line| parse_line(line) as Line).collect();
Tile {
id,
rows,
..Tile::default()
}
}
fn parse_id(line: &str) -> usize {
assert!(line.starts_with("Tile "));
assert!(line.ends_with(':'));
line[5..SIZE - 1].parse().unwrap()
}
fn north_edge(self: &Self) -> Line { |
fn west_edge(self: &Self) -> Line {
self.rotate_cw().rows[0]
}
fn east_edge(self: &Self) -> Line {
self.rotate_cw().rotate_cw().rotate_cw().rows[0]
}
fn south_edge(self: &Self) -> Line {
self.rotate_cw().rotate_cw().rows[0]
}
fn get_edges(self: &Self) -> [Line; 4] {
let rot1 = self.rotate_cw();
let rot2 = rot1.rotate_cw();
let rot3 = rot2.rotate_cw();
[
self.north_edge(),
rot1.north_edge(),
rot2.north_edge(),
rot3.north_edge(),
]
}
fn rotate_cw(self: &Self) -> Tile {
let mut rows: Vec<Line> = Vec::new();
for i in 0..SIZE {
let mut line = 0;
for j in 0..SIZE {
line = (line << 1) | ((self.rows[SIZE - j - 1] >> (SIZE - i - 1)) & 1);
}
rows.push(line);
}
Tile {
id: self.id,
rows,
flipped: self.flipped,
rotated: (self.rotated + 1) % 4,
// links: [self.links[3], self.links[0], self.links[1], self.links[2]],
}
}
fn mirror_vertical(self: &Self) -> Tile {
Tile {
id: self.id,
rows: self.rows.iter().cloned().rev().collect(),
flipped:!self.flipped,
rotated: 0,
// links: [self.links[2], self.links[1], self.links[0], self.links[3]],
}
}
// Builds all 8 variants of rotations + flippity.
fn make_variants(self: &Self) -> [Self; 8] {
let rot1 = self.rotate_cw();
let rot2 = rot1.rotate_cw();
let rot3 = rot2.rotate_cw();
let flip0 = self.mirror_vertical();
let flip1 = flip0.rotate_cw();
let flip2 = flip1.rotate_cw();
let flip3 = flip2.rotate_cw();
[self.clone(), rot1, rot2, rot3, flip0, flip1, flip2, flip3]
}
}
// All tiles and various helper structs.
#[derive(Debug)]
struct TileBag {
// Mapping from tile id to tile. These tiles will get rotated once we start linking them
// together.
tiles: HashMap<usize, Tile>,
// Mapping from edges to a list of Tiles, rotated such that the top edge is used as the key.
// Each Tile here repeats 8 times with all of its variants.
edges: HashMap<Line, Vec<Tile>>,
// Once we perform assembly, this is where we store the tiles, first rows, then columns.
assembled: Vec<Vec<Tile>>,
}
struct MergedTiles {
rows: Vec<u128>,
}
impl TileBag {
fn parse(input: &str) -> TileBag {
let tiles = input
.split("\n\n")
.map(|tile_lines| {
let t = Tile::parse(tile_lines);
(t.id, t)
})
.collect();
let mut out = TileBag {
tiles,
edges: HashMap::new(),
assembled: Vec::new(),
};
out.build_edges();
out
}
fn build_edges(self: &mut Self) {
for t in self.tiles.values() {
for tt in t.make_variants().iter() {
self.edges
.entry(tt.north_edge())
.or_insert_with(|| Vec::new())
.push(tt.clone());
}
}
}
// Counts how many entries we have in edge map ignoring given id.
fn count_edges(self: &Self, edge: &Line, id_to_ignore: usize) -> usize {
let mut cnt = 0;
for other_edge in self.edges.get(edge).unwrap() {
// Don't count ourselves.
if other_edge.id!= id_to_ignore {
cnt += 1;
}
}
cnt
}
// Calculates how many other tile edges this given tile can link to.
fn linked_tiles(self: &Self, tile: &Tile) -> usize {
let mut cnt = 0;
for edge in tile.get_edges().iter() {
cnt += self.count_edges(edge, tile.id);
}
cnt
}
// Finds corner tiles - tiles with only two other tiles linked.
fn find_corners(self: &Self) -> Vec<Tile> {
let corners = self
.tiles
.values()
.filter_map(|t| {
if self.linked_tiles(t) == 2 {
Some(t.clone())
} else {
None
}
})
.collect::<Vec<Tile>>();
assert_eq!(corners.len(), 4);
corners
}
fn orient_starting_tile(self: &Self, tile: &Tile) -> Tile {
for t in tile.make_variants().iter() {
if self.count_edges(&t.north_edge(), tile.id) == 0
&& self.count_edges(&t.west_edge(), tile.id) == 0
{
return t.clone();
}
}
panic!();
}
fn get_tile_for_edge(self: &Self, edge: Line, id_to_ignore: usize) -> Option<Tile> {
let edge_tiles = self.edges.get(&edge).unwrap();
if edge_tiles.len()!= 2 {
return None;
}
for tile in edge_tiles.iter() {
if tile.id!= id_to_ignore {
return Some(tile.clone());
}
}
panic!("shouldn't get here");
}
// Finds a tile that matches given east edge, ignoring given tile (so to not match ourselves).
fn find_east_neighbor(self: &Self, tile: &Tile) -> Option<Tile> {
Some(
self.get_tile_for_edge(invert(tile.east_edge()), tile.id)?
.rotate_cw()
.rotate_cw()
.rotate_cw(),
)
}
// Finds a tile that matches given south edge, ignoring given tile (so to not match ourselves).
fn find_south_neighbor(self: &Self, tile: &Tile) -> Option<Tile> {
self.get_tile_for_edge(invert(tile.south_edge()), tile.id)
}
// Fills in.assembled with all tiles, rotating/flipping them as needed.
fn assemble(self: &mut Self) {
// Pick one of the corner tiles to start with. Doesn't matter which, so we'll pick the last
// one. Rotate the tile so that it is in top-left corner of the assembled picture (only
// east and south links are used).
let mut tile = self.orient_starting_tile(&self.find_corners().pop().unwrap());
loop {
self.assembled.push(Vec::new());
loop {
// println!("{}\n", tile);
self.assembled.last_mut().unwrap().push(tile.clone());
match self.find_east_neighbor(&tile) {
Some(t) => {
tile = t;
}
None => {
break;
}
}
}
// Go to next row. Find the south neighbor of the first tile from previous row.
match self.find_south_neighbor(&self.assembled.last().unwrap()[0]) {
Some(t) => {
tile = t;
}
None => {
break;
}
}
}
}
// Takes self.assembled and turns it into a giant quilt.
fn merge(self: &mut Self) -> MergedTiles {
const SIZE_INNER: usize = SIZE - 2;
let quilt_side_tiles = self.assembled.len();
let mut out: Vec<u128> = Vec::new();
for _ in 0..(self.assembled.len() * SIZE_INNER) {
out.push(0);
}
for (tile_row_idx, tile_row) in self.assembled.iter().enumerate() {
for (tile_col_idx, tile) in tile_row.iter().enumerate() {
for (tile_row2_idx, row) in tile.rows[1..tile.rows.len() - 1].iter().enumerate() {
let out_row = tile_row_idx * SIZE_INNER + tile_row2_idx;
// dbg!(&out_row);
let out_shift = (quilt_side_tiles - tile_col_idx - 1) * SIZE_INNER;
// dbg!(&out_shift);
out[out_row] |= (((*row as u128) >> 1) & 0xff) << out_shift;
}
}
}
MergedTiles { rows: out }
}
}
type MonsterPattern = [u128; 3];
const MONSTER_WIDTH: usize = 20;
fn make_sea_monster_pattern() -> MonsterPattern {
[
parse_line("..................#."),
parse_line("#....##....##....###"),
parse_line(".#..#..#..#..#..#..."),
]
}
impl MergedTiles {
// Counts number of sea monsters and tiles without sea monsters on them.
fn count_sea_monsters(self: &mut Self, monster: &MonsterPattern) -> (usize, usize) {
let mut cnt = 0;
for r in 0..(self.rows.len() - monster.len()) {
for c in 0..(self.rows.len() - MONSTER_WIDTH) {
if self.is_sea_monster_at(monster, r, c) {
cnt += 1;
self.remove_sea_monster_at(monster, r, c);
}
}
}
let mut other = 0;
if cnt > 0 {
for r in self.rows.iter() {
let mut i: u128 = *r;
while i > 0 {
if (i & 1) == 1 {
other += 1;
}
i >>= 1;
}
}
}
(cnt, other)
}
fn remove_sea_monster_at(self: &mut Self, monster: &MonsterPattern, row: usize, col: usize) {
for r in 0..monster.len() {
for c in 0..MONSTER_WIDTH {
if (monster[r] >> (MONSTER_WIDTH - c - 1)) & 1 == 1 {
self.rows[row + r] &=!(1 << (c + col));
}
}
}
}
fn is_sea_monster_at(self: &Self, monster: &MonsterPattern, row: usize, col: usize) -> bool {
for r in 0..monster.len() {
for c in 0..MONSTER_WIDTH {
if ((monster[r] >> (MONSTER_WIDTH - c - 1)) & 1 == 1)
&& ((self.rows[row + r] >> (c + col)) & 1 == 0)
{
return false;
}
}
}
true
}
fn rotate_cw(self: &Self) -> MergedTiles {
let mut rows: Vec<u128> = Vec::new();
for i in 0..self.rows.len() {
let mut line = 0;
for j in 0..self.rows.len() {
line = (line << 1)
| ((self.rows[self.rows.len() - j - 1] >> (self.rows.len() - i - 1)) & 1);
}
rows.push(line);
}
MergedTiles { rows }
}
fn mirror_vertical(self: &Self) -> MergedTiles {
MergedTiles {
rows: self.rows.iter().cloned().rev().collect(),
}
}
}
fn main() {
let contents = std::fs::read_to_string("input/20.txt").expect("read failed");
let mut bag = TileBag::parse(&contents);
// Part 1: find corners, multiply their ids together.
let corners = bag.find_corners();
// dbg!(&corners);
let product = corners.iter().fold(1, |p, tile| p * tile.id);
dbg!(product);
// Part 2: reassembly & monster finding.
bag.assemble();
let mut merged = bag.merge();
let monster = make_sea_monster_pattern();
for i in 0..8 {
let (sea_monsters, other_tiles) = merged.count_sea_monsters(&monster);
if sea_monsters > 0 {
dbg!(sea_monsters, other_tiles);
break;
}
merged = merged.rotate_cw();
if i == 4 {
merged = merged.mirror_vertical();
}
}
} | self.rows[0]
} | random_line_split |
day20.rs | #![allow(dead_code)]
use std::collections::HashMap;
mod lib;
// Width (and height) of each tile.
const SIZE: usize = 10;
// One row/side of each tile.
type Line = u16;
#[derive(PartialEq, Eq, Debug, Clone, Hash, Default)]
struct Tile {
id: usize,
rows: Vec<Line>,
flipped: bool,
rotated: u32,
}
fn bit_to_char(input: bool) -> char {
if input {
'#'
} else {
'.'
}
}
// Converts a single line to a "#.#.#...." string.
fn line_to_string(input: Line) -> String {
(0..SIZE)
.rev()
.map(|i| bit_to_char(((input >> i) & 1) == 1))
.collect()
}
fn line_to_string_u128(input: u128, size: usize) -> String {
(0..size)
.rev()
.map(|i| bit_to_char(((input >> i) & 1) == 1))
.collect()
}
fn invert(input: Line) -> Line {
(0..SIZE).fold(0, |out, i| (out << 1) | ((input >> i) & 1))
}
fn parse_line(line: &str) -> u128 {
let mut out = 0;
for c in line.chars() {
out = (out << 1)
| match c {
'.' => 0,
'#' => 1,
_ => {
panic!("invalid char: {}", c);
}
};
}
out
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_line_to_string() {
assert_eq!(line_to_string(1), ".........#");
assert_eq!(line_to_string(391), ".##....###");
}
#[test]
fn test_invert() {
assert_eq!(invert(1), 512);
assert_eq!(invert(512), 1);
assert_eq!(invert(2), 256);
}
}
impl std::fmt::Display for Tile {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut first = true;
for line in self.rows.iter() {
if first {
first = false;
} else {
f.write_str("\n")?;
}
f.write_str(&line_to_string(*line))?;
}
Ok(())
}
}
impl Tile {
fn parse(input: &str) -> Tile {
let mut lines = input.lines();
let id = Tile::parse_id(lines.next().unwrap());
let rows = lines.map(|line| parse_line(line) as Line).collect();
Tile {
id,
rows,
..Tile::default()
}
}
fn parse_id(line: &str) -> usize {
assert!(line.starts_with("Tile "));
assert!(line.ends_with(':'));
line[5..SIZE - 1].parse().unwrap()
}
fn north_edge(self: &Self) -> Line {
self.rows[0]
}
fn west_edge(self: &Self) -> Line {
self.rotate_cw().rows[0]
}
fn east_edge(self: &Self) -> Line {
self.rotate_cw().rotate_cw().rotate_cw().rows[0]
}
fn south_edge(self: &Self) -> Line {
self.rotate_cw().rotate_cw().rows[0]
}
fn get_edges(self: &Self) -> [Line; 4] {
let rot1 = self.rotate_cw();
let rot2 = rot1.rotate_cw();
let rot3 = rot2.rotate_cw();
[
self.north_edge(),
rot1.north_edge(),
rot2.north_edge(),
rot3.north_edge(),
]
}
fn rotate_cw(self: &Self) -> Tile {
let mut rows: Vec<Line> = Vec::new();
for i in 0..SIZE {
let mut line = 0;
for j in 0..SIZE {
line = (line << 1) | ((self.rows[SIZE - j - 1] >> (SIZE - i - 1)) & 1);
}
rows.push(line);
}
Tile {
id: self.id,
rows,
flipped: self.flipped,
rotated: (self.rotated + 1) % 4,
// links: [self.links[3], self.links[0], self.links[1], self.links[2]],
}
}
fn mirror_vertical(self: &Self) -> Tile {
Tile {
id: self.id,
rows: self.rows.iter().cloned().rev().collect(),
flipped:!self.flipped,
rotated: 0,
// links: [self.links[2], self.links[1], self.links[0], self.links[3]],
}
}
// Builds all 8 variants of rotations + flippity.
fn make_variants(self: &Self) -> [Self; 8] {
let rot1 = self.rotate_cw();
let rot2 = rot1.rotate_cw();
let rot3 = rot2.rotate_cw();
let flip0 = self.mirror_vertical();
let flip1 = flip0.rotate_cw();
let flip2 = flip1.rotate_cw();
let flip3 = flip2.rotate_cw();
[self.clone(), rot1, rot2, rot3, flip0, flip1, flip2, flip3]
}
}
// All tiles and various helper structs.
#[derive(Debug)]
struct TileBag {
// Mapping from tile id to tile. These tiles will get rotated once we start linking them
// together.
tiles: HashMap<usize, Tile>,
// Mapping from edges to a list of Tiles, rotated such that the top edge is used as the key.
// Each Tile here repeats 8 times with all of its variants.
edges: HashMap<Line, Vec<Tile>>,
// Once we perform assembly, this is where we store the tiles, first rows, then columns.
assembled: Vec<Vec<Tile>>,
}
struct MergedTiles {
rows: Vec<u128>,
}
impl TileBag {
fn parse(input: &str) -> TileBag {
let tiles = input
.split("\n\n")
.map(|tile_lines| {
let t = Tile::parse(tile_lines);
(t.id, t)
})
.collect();
let mut out = TileBag {
tiles,
edges: HashMap::new(),
assembled: Vec::new(),
};
out.build_edges();
out
}
fn build_edges(self: &mut Self) {
for t in self.tiles.values() {
for tt in t.make_variants().iter() {
self.edges
.entry(tt.north_edge())
.or_insert_with(|| Vec::new())
.push(tt.clone());
}
}
}
// Counts how many entries we have in edge map ignoring given id.
fn count_edges(self: &Self, edge: &Line, id_to_ignore: usize) -> usize {
let mut cnt = 0;
for other_edge in self.edges.get(edge).unwrap() {
// Don't count ourselves.
if other_edge.id!= id_to_ignore {
cnt += 1;
}
}
cnt
}
// Calculates how many other tile edges this given tile can link to.
fn linked_tiles(self: &Self, tile: &Tile) -> usize {
let mut cnt = 0;
for edge in tile.get_edges().iter() {
cnt += self.count_edges(edge, tile.id);
}
cnt
}
// Finds corner tiles - tiles with only two other tiles linked.
fn find_corners(self: &Self) -> Vec<Tile> {
let corners = self
.tiles
.values()
.filter_map(|t| {
if self.linked_tiles(t) == 2 {
Some(t.clone())
} else {
None
}
})
.collect::<Vec<Tile>>();
assert_eq!(corners.len(), 4);
corners
}
fn orient_starting_tile(self: &Self, tile: &Tile) -> Tile {
for t in tile.make_variants().iter() {
if self.count_edges(&t.north_edge(), tile.id) == 0
&& self.count_edges(&t.west_edge(), tile.id) == 0
{
return t.clone();
}
}
panic!();
}
fn get_tile_for_edge(self: &Self, edge: Line, id_to_ignore: usize) -> Option<Tile> {
let edge_tiles = self.edges.get(&edge).unwrap();
if edge_tiles.len()!= 2 {
return None;
}
for tile in edge_tiles.iter() {
if tile.id!= id_to_ignore {
return Some(tile.clone());
}
}
panic!("shouldn't get here");
}
// Finds a tile that matches given east edge, ignoring given tile (so to not match ourselves).
fn find_east_neighbor(self: &Self, tile: &Tile) -> Option<Tile> {
Some(
self.get_tile_for_edge(invert(tile.east_edge()), tile.id)?
.rotate_cw()
.rotate_cw()
.rotate_cw(),
)
}
// Finds a tile that matches given south edge, ignoring given tile (so to not match ourselves).
fn find_south_neighbor(self: &Self, tile: &Tile) -> Option<Tile> {
self.get_tile_for_edge(invert(tile.south_edge()), tile.id)
}
// Fills in.assembled with all tiles, rotating/flipping them as needed.
fn assemble(self: &mut Self) {
// Pick one of the corner tiles to start with. Doesn't matter which, so we'll pick the last
// one. Rotate the tile so that it is in top-left corner of the assembled picture (only
// east and south links are used).
let mut tile = self.orient_starting_tile(&self.find_corners().pop().unwrap());
loop {
self.assembled.push(Vec::new());
loop {
// println!("{}\n", tile);
self.assembled.last_mut().unwrap().push(tile.clone());
match self.find_east_neighbor(&tile) {
Some(t) => {
tile = t;
}
None => {
break;
}
}
}
// Go to next row. Find the south neighbor of the first tile from previous row.
match self.find_south_neighbor(&self.assembled.last().unwrap()[0]) {
Some(t) => {
tile = t;
}
None => {
break;
}
}
}
}
// Takes self.assembled and turns it into a giant quilt.
fn merge(self: &mut Self) -> MergedTiles {
const SIZE_INNER: usize = SIZE - 2;
let quilt_side_tiles = self.assembled.len();
let mut out: Vec<u128> = Vec::new();
for _ in 0..(self.assembled.len() * SIZE_INNER) {
out.push(0);
}
for (tile_row_idx, tile_row) in self.assembled.iter().enumerate() {
for (tile_col_idx, tile) in tile_row.iter().enumerate() {
for (tile_row2_idx, row) in tile.rows[1..tile.rows.len() - 1].iter().enumerate() {
let out_row = tile_row_idx * SIZE_INNER + tile_row2_idx;
// dbg!(&out_row);
let out_shift = (quilt_side_tiles - tile_col_idx - 1) * SIZE_INNER;
// dbg!(&out_shift);
out[out_row] |= (((*row as u128) >> 1) & 0xff) << out_shift;
}
}
}
MergedTiles { rows: out }
}
}
type MonsterPattern = [u128; 3];
const MONSTER_WIDTH: usize = 20;
fn make_sea_monster_pattern() -> MonsterPattern {
[
parse_line("..................#."),
parse_line("#....##....##....###"),
parse_line(".#..#..#..#..#..#..."),
]
}
impl MergedTiles {
// Counts number of sea monsters and tiles without sea monsters on them.
fn count_sea_monsters(self: &mut Self, monster: &MonsterPattern) -> (usize, usize) {
let mut cnt = 0;
for r in 0..(self.rows.len() - monster.len()) {
for c in 0..(self.rows.len() - MONSTER_WIDTH) {
if self.is_sea_monster_at(monster, r, c) {
cnt += 1;
self.remove_sea_monster_at(monster, r, c);
}
}
}
let mut other = 0;
if cnt > 0 {
for r in self.rows.iter() {
let mut i: u128 = *r;
while i > 0 {
if (i & 1) == 1 {
other += 1;
}
i >>= 1;
}
}
}
(cnt, other)
}
fn remove_sea_monster_at(self: &mut Self, monster: &MonsterPattern, row: usize, col: usize) {
for r in 0..monster.len() {
for c in 0..MONSTER_WIDTH {
if (monster[r] >> (MONSTER_WIDTH - c - 1)) & 1 == 1 {
self.rows[row + r] &=!(1 << (c + col));
}
}
}
}
fn is_sea_monster_at(self: &Self, monster: &MonsterPattern, row: usize, col: usize) -> bool {
for r in 0..monster.len() {
for c in 0..MONSTER_WIDTH {
if ((monster[r] >> (MONSTER_WIDTH - c - 1)) & 1 == 1)
&& ((self.rows[row + r] >> (c + col)) & 1 == 0)
{
return false;
}
}
}
true
}
fn rotate_cw(self: &Self) -> MergedTiles {
let mut rows: Vec<u128> = Vec::new();
for i in 0..self.rows.len() {
let mut line = 0;
for j in 0..self.rows.len() {
line = (line << 1)
| ((self.rows[self.rows.len() - j - 1] >> (self.rows.len() - i - 1)) & 1);
}
rows.push(line);
}
MergedTiles { rows }
}
fn | (self: &Self) -> MergedTiles {
MergedTiles {
rows: self.rows.iter().cloned().rev().collect(),
}
}
}
fn main() {
let contents = std::fs::read_to_string("input/20.txt").expect("read failed");
let mut bag = TileBag::parse(&contents);
// Part 1: find corners, multiply their ids together.
let corners = bag.find_corners();
// dbg!(&corners);
let product = corners.iter().fold(1, |p, tile| p * tile.id);
dbg!(product);
// Part 2: reassembly & monster finding.
bag.assemble();
let mut merged = bag.merge();
let monster = make_sea_monster_pattern();
for i in 0..8 {
let (sea_monsters, other_tiles) = merged.count_sea_monsters(&monster);
if sea_monsters > 0 {
dbg!(sea_monsters, other_tiles);
break;
}
merged = merged.rotate_cw();
if i == 4 {
merged = merged.mirror_vertical();
}
}
}
| mirror_vertical | identifier_name |
day20.rs | #![allow(dead_code)]
use std::collections::HashMap;
mod lib;
// Width (and height) of each tile.
const SIZE: usize = 10;
// One row/side of each tile.
type Line = u16;
#[derive(PartialEq, Eq, Debug, Clone, Hash, Default)]
struct Tile {
id: usize,
rows: Vec<Line>,
flipped: bool,
rotated: u32,
}
fn bit_to_char(input: bool) -> char {
if input {
'#'
} else {
'.'
}
}
// Converts a single line to a "#.#.#...." string.
fn line_to_string(input: Line) -> String {
(0..SIZE)
.rev()
.map(|i| bit_to_char(((input >> i) & 1) == 1))
.collect()
}
fn line_to_string_u128(input: u128, size: usize) -> String {
(0..size)
.rev()
.map(|i| bit_to_char(((input >> i) & 1) == 1))
.collect()
}
fn invert(input: Line) -> Line {
(0..SIZE).fold(0, |out, i| (out << 1) | ((input >> i) & 1))
}
fn parse_line(line: &str) -> u128 {
let mut out = 0;
for c in line.chars() {
out = (out << 1)
| match c {
'.' => 0,
'#' => 1,
_ => {
panic!("invalid char: {}", c);
}
};
}
out
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_line_to_string() |
#[test]
fn test_invert() {
assert_eq!(invert(1), 512);
assert_eq!(invert(512), 1);
assert_eq!(invert(2), 256);
}
}
impl std::fmt::Display for Tile {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut first = true;
for line in self.rows.iter() {
if first {
first = false;
} else {
f.write_str("\n")?;
}
f.write_str(&line_to_string(*line))?;
}
Ok(())
}
}
impl Tile {
fn parse(input: &str) -> Tile {
let mut lines = input.lines();
let id = Tile::parse_id(lines.next().unwrap());
let rows = lines.map(|line| parse_line(line) as Line).collect();
Tile {
id,
rows,
..Tile::default()
}
}
fn parse_id(line: &str) -> usize {
assert!(line.starts_with("Tile "));
assert!(line.ends_with(':'));
line[5..SIZE - 1].parse().unwrap()
}
fn north_edge(self: &Self) -> Line {
self.rows[0]
}
fn west_edge(self: &Self) -> Line {
self.rotate_cw().rows[0]
}
fn east_edge(self: &Self) -> Line {
self.rotate_cw().rotate_cw().rotate_cw().rows[0]
}
fn south_edge(self: &Self) -> Line {
self.rotate_cw().rotate_cw().rows[0]
}
fn get_edges(self: &Self) -> [Line; 4] {
let rot1 = self.rotate_cw();
let rot2 = rot1.rotate_cw();
let rot3 = rot2.rotate_cw();
[
self.north_edge(),
rot1.north_edge(),
rot2.north_edge(),
rot3.north_edge(),
]
}
fn rotate_cw(self: &Self) -> Tile {
let mut rows: Vec<Line> = Vec::new();
for i in 0..SIZE {
let mut line = 0;
for j in 0..SIZE {
line = (line << 1) | ((self.rows[SIZE - j - 1] >> (SIZE - i - 1)) & 1);
}
rows.push(line);
}
Tile {
id: self.id,
rows,
flipped: self.flipped,
rotated: (self.rotated + 1) % 4,
// links: [self.links[3], self.links[0], self.links[1], self.links[2]],
}
}
fn mirror_vertical(self: &Self) -> Tile {
Tile {
id: self.id,
rows: self.rows.iter().cloned().rev().collect(),
flipped:!self.flipped,
rotated: 0,
// links: [self.links[2], self.links[1], self.links[0], self.links[3]],
}
}
// Builds all 8 variants of rotations + flippity.
fn make_variants(self: &Self) -> [Self; 8] {
let rot1 = self.rotate_cw();
let rot2 = rot1.rotate_cw();
let rot3 = rot2.rotate_cw();
let flip0 = self.mirror_vertical();
let flip1 = flip0.rotate_cw();
let flip2 = flip1.rotate_cw();
let flip3 = flip2.rotate_cw();
[self.clone(), rot1, rot2, rot3, flip0, flip1, flip2, flip3]
}
}
// All tiles and various helper structs.
#[derive(Debug)]
struct TileBag {
// Mapping from tile id to tile. These tiles will get rotated once we start linking them
// together.
tiles: HashMap<usize, Tile>,
// Mapping from edges to a list of Tiles, rotated such that the top edge is used as the key.
// Each Tile here repeats 8 times with all of its variants.
edges: HashMap<Line, Vec<Tile>>,
// Once we perform assembly, this is where we store the tiles, first rows, then columns.
assembled: Vec<Vec<Tile>>,
}
struct MergedTiles {
rows: Vec<u128>,
}
impl TileBag {
fn parse(input: &str) -> TileBag {
let tiles = input
.split("\n\n")
.map(|tile_lines| {
let t = Tile::parse(tile_lines);
(t.id, t)
})
.collect();
let mut out = TileBag {
tiles,
edges: HashMap::new(),
assembled: Vec::new(),
};
out.build_edges();
out
}
fn build_edges(self: &mut Self) {
for t in self.tiles.values() {
for tt in t.make_variants().iter() {
self.edges
.entry(tt.north_edge())
.or_insert_with(|| Vec::new())
.push(tt.clone());
}
}
}
// Counts how many entries we have in edge map ignoring given id.
fn count_edges(self: &Self, edge: &Line, id_to_ignore: usize) -> usize {
let mut cnt = 0;
for other_edge in self.edges.get(edge).unwrap() {
// Don't count ourselves.
if other_edge.id!= id_to_ignore {
cnt += 1;
}
}
cnt
}
// Calculates how many other tile edges this given tile can link to.
fn linked_tiles(self: &Self, tile: &Tile) -> usize {
let mut cnt = 0;
for edge in tile.get_edges().iter() {
cnt += self.count_edges(edge, tile.id);
}
cnt
}
// Finds corner tiles - tiles with only two other tiles linked.
fn find_corners(self: &Self) -> Vec<Tile> {
let corners = self
.tiles
.values()
.filter_map(|t| {
if self.linked_tiles(t) == 2 {
Some(t.clone())
} else {
None
}
})
.collect::<Vec<Tile>>();
assert_eq!(corners.len(), 4);
corners
}
fn orient_starting_tile(self: &Self, tile: &Tile) -> Tile {
for t in tile.make_variants().iter() {
if self.count_edges(&t.north_edge(), tile.id) == 0
&& self.count_edges(&t.west_edge(), tile.id) == 0
{
return t.clone();
}
}
panic!();
}
fn get_tile_for_edge(self: &Self, edge: Line, id_to_ignore: usize) -> Option<Tile> {
let edge_tiles = self.edges.get(&edge).unwrap();
if edge_tiles.len()!= 2 {
return None;
}
for tile in edge_tiles.iter() {
if tile.id!= id_to_ignore {
return Some(tile.clone());
}
}
panic!("shouldn't get here");
}
// Finds a tile that matches given east edge, ignoring given tile (so to not match ourselves).
fn find_east_neighbor(self: &Self, tile: &Tile) -> Option<Tile> {
Some(
self.get_tile_for_edge(invert(tile.east_edge()), tile.id)?
.rotate_cw()
.rotate_cw()
.rotate_cw(),
)
}
// Finds a tile that matches given south edge, ignoring given tile (so to not match ourselves).
fn find_south_neighbor(self: &Self, tile: &Tile) -> Option<Tile> {
self.get_tile_for_edge(invert(tile.south_edge()), tile.id)
}
// Fills in.assembled with all tiles, rotating/flipping them as needed.
fn assemble(self: &mut Self) {
// Pick one of the corner tiles to start with. Doesn't matter which, so we'll pick the last
// one. Rotate the tile so that it is in top-left corner of the assembled picture (only
// east and south links are used).
let mut tile = self.orient_starting_tile(&self.find_corners().pop().unwrap());
loop {
self.assembled.push(Vec::new());
loop {
// println!("{}\n", tile);
self.assembled.last_mut().unwrap().push(tile.clone());
match self.find_east_neighbor(&tile) {
Some(t) => {
tile = t;
}
None => {
break;
}
}
}
// Go to next row. Find the south neighbor of the first tile from previous row.
match self.find_south_neighbor(&self.assembled.last().unwrap()[0]) {
Some(t) => {
tile = t;
}
None => {
break;
}
}
}
}
// Takes self.assembled and turns it into a giant quilt.
fn merge(self: &mut Self) -> MergedTiles {
const SIZE_INNER: usize = SIZE - 2;
let quilt_side_tiles = self.assembled.len();
let mut out: Vec<u128> = Vec::new();
for _ in 0..(self.assembled.len() * SIZE_INNER) {
out.push(0);
}
for (tile_row_idx, tile_row) in self.assembled.iter().enumerate() {
for (tile_col_idx, tile) in tile_row.iter().enumerate() {
for (tile_row2_idx, row) in tile.rows[1..tile.rows.len() - 1].iter().enumerate() {
let out_row = tile_row_idx * SIZE_INNER + tile_row2_idx;
// dbg!(&out_row);
let out_shift = (quilt_side_tiles - tile_col_idx - 1) * SIZE_INNER;
// dbg!(&out_shift);
out[out_row] |= (((*row as u128) >> 1) & 0xff) << out_shift;
}
}
}
MergedTiles { rows: out }
}
}
type MonsterPattern = [u128; 3];
const MONSTER_WIDTH: usize = 20;
fn make_sea_monster_pattern() -> MonsterPattern {
[
parse_line("..................#."),
parse_line("#....##....##....###"),
parse_line(".#..#..#..#..#..#..."),
]
}
impl MergedTiles {
// Counts number of sea monsters and tiles without sea monsters on them.
fn count_sea_monsters(self: &mut Self, monster: &MonsterPattern) -> (usize, usize) {
let mut cnt = 0;
for r in 0..(self.rows.len() - monster.len()) {
for c in 0..(self.rows.len() - MONSTER_WIDTH) {
if self.is_sea_monster_at(monster, r, c) {
cnt += 1;
self.remove_sea_monster_at(monster, r, c);
}
}
}
let mut other = 0;
if cnt > 0 {
for r in self.rows.iter() {
let mut i: u128 = *r;
while i > 0 {
if (i & 1) == 1 {
other += 1;
}
i >>= 1;
}
}
}
(cnt, other)
}
fn remove_sea_monster_at(self: &mut Self, monster: &MonsterPattern, row: usize, col: usize) {
for r in 0..monster.len() {
for c in 0..MONSTER_WIDTH {
if (monster[r] >> (MONSTER_WIDTH - c - 1)) & 1 == 1 {
self.rows[row + r] &=!(1 << (c + col));
}
}
}
}
fn is_sea_monster_at(self: &Self, monster: &MonsterPattern, row: usize, col: usize) -> bool {
for r in 0..monster.len() {
for c in 0..MONSTER_WIDTH {
if ((monster[r] >> (MONSTER_WIDTH - c - 1)) & 1 == 1)
&& ((self.rows[row + r] >> (c + col)) & 1 == 0)
{
return false;
}
}
}
true
}
fn rotate_cw(self: &Self) -> MergedTiles {
let mut rows: Vec<u128> = Vec::new();
for i in 0..self.rows.len() {
let mut line = 0;
for j in 0..self.rows.len() {
line = (line << 1)
| ((self.rows[self.rows.len() - j - 1] >> (self.rows.len() - i - 1)) & 1);
}
rows.push(line);
}
MergedTiles { rows }
}
fn mirror_vertical(self: &Self) -> MergedTiles {
MergedTiles {
rows: self.rows.iter().cloned().rev().collect(),
}
}
}
fn main() {
let contents = std::fs::read_to_string("input/20.txt").expect("read failed");
let mut bag = TileBag::parse(&contents);
// Part 1: find corners, multiply their ids together.
let corners = bag.find_corners();
// dbg!(&corners);
let product = corners.iter().fold(1, |p, tile| p * tile.id);
dbg!(product);
// Part 2: reassembly & monster finding.
bag.assemble();
let mut merged = bag.merge();
let monster = make_sea_monster_pattern();
for i in 0..8 {
let (sea_monsters, other_tiles) = merged.count_sea_monsters(&monster);
if sea_monsters > 0 {
dbg!(sea_monsters, other_tiles);
break;
}
merged = merged.rotate_cw();
if i == 4 {
merged = merged.mirror_vertical();
}
}
}
| {
assert_eq!(line_to_string(1), ".........#");
assert_eq!(line_to_string(391), ".##....###");
} | identifier_body |
write.rs | unreachable = sess.target.target.options.trap_unreachable;
let emit_stack_size_section = sess.opts.debugging_opts.emit_stack_sizes;
let asm_comments = sess.asm_comments();
Arc::new(move || {
let tm = unsafe {
llvm::LLVMRustCreateTargetMachine(
triple.as_ptr(), cpu.as_ptr(), features.as_ptr(),
code_model,
reloc_model,
opt_level,
use_softfp,
is_pie_binary,
ffunction_sections,
fdata_sections,
trap_unreachable,
singlethread,
asm_comments,
emit_stack_size_section,
)
};
tm.ok_or_else(|| {
format!("Could not create LLVM TargetMachine for triple: {}",
triple.to_str().unwrap())
})
})
}
pub(crate) fn save_temp_bitcode(
cgcx: &CodegenContext<LlvmCodegenBackend>,
module: &ModuleCodegen<ModuleLlvm>,
name: &str
) {
if!cgcx.save_temps {
return
}
unsafe {
let ext = format!("{}.bc", name);
let cgu = Some(&module.name[..]);
let path = cgcx.output_filenames.temp_path_ext(&ext, cgu);
let cstr = path_to_c_string(&path);
let llmod = module.module_llvm.llmod();
llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr());
}
}
pub struct DiagnosticHandlers<'a> {
data: *mut (&'a CodegenContext<LlvmCodegenBackend>, &'a Handler),
llcx: &'a llvm::Context,
}
impl<'a> DiagnosticHandlers<'a> {
pub fn new(cgcx: &'a CodegenContext<LlvmCodegenBackend>,
handler: &'a Handler,
llcx: &'a llvm::Context) -> Self {
let data = Box::into_raw(Box::new((cgcx, handler)));
unsafe {
llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, data.cast());
llvm::LLVMContextSetDiagnosticHandler(llcx, diagnostic_handler, data.cast());
}
DiagnosticHandlers { data, llcx }
}
}
impl<'a> Drop for DiagnosticHandlers<'a> {
fn drop(&mut self) {
use std::ptr::null_mut;
unsafe {
llvm::LLVMRustSetInlineAsmDiagnosticHandler(self.llcx, inline_asm_handler, null_mut());
llvm::LLVMContextSetDiagnosticHandler(self.llcx, diagnostic_handler, null_mut());
drop(Box::from_raw(self.data));
}
}
}
unsafe extern "C" fn report_inline_asm(cgcx: &CodegenContext<LlvmCodegenBackend>,
msg: &str,
cookie: c_uint) {
cgcx.diag_emitter.inline_asm_error(cookie as u32, msg.to_owned());
}
unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic,
user: *const c_void,
cookie: c_uint) {
if user.is_null() {
return
}
let (cgcx, _) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
let msg = llvm::build_string(|s| llvm::LLVMRustWriteSMDiagnosticToString(diag, s))
.expect("non-UTF8 SMDiagnostic");
report_inline_asm(cgcx, &msg, cookie);
}
unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void) {
if user.is_null() {
return
}
let (cgcx, diag_handler) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
match llvm::diagnostic::Diagnostic::unpack(info) {
llvm::diagnostic::InlineAsm(inline) => {
report_inline_asm(cgcx,
&llvm::twine_to_string(inline.message),
inline.cookie);
}
llvm::diagnostic::Optimization(opt) => {
let enabled = match cgcx.remark {
Passes::All => true,
Passes::Some(ref v) => v.iter().any(|s| *s == opt.pass_name),
};
if enabled {
diag_handler.note_without_error(&format!("optimization {} for {} at {}:{}:{}: {}",
opt.kind.describe(),
opt.pass_name,
opt.filename,
opt.line,
opt.column,
opt.message));
}
}
llvm::diagnostic::PGO(diagnostic_ref) |
llvm::diagnostic::Linker(diagnostic_ref) => {
let msg = llvm::build_string(|s| {
llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
}).expect("non-UTF8 diagnostic");
diag_handler.warn(&msg);
}
llvm::diagnostic::UnknownDiagnostic(..) => {},
}
}
// Unsafe due to LLVM calls.
pub(crate) unsafe fn optimize(cgcx: &CodegenContext<LlvmCodegenBackend>,
diag_handler: &Handler,
module: &ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig)
-> Result<(), FatalError>
{
let _timer = cgcx.prof.generic_activity("LLVM_module_optimize");
let llmod = module.module_llvm.llmod();
let llcx = &*module.module_llvm.llcx;
let tm = &*module.module_llvm.tm;
let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
let module_name = module.name.clone();
let module_name = Some(&module_name[..]);
if config.emit_no_opt_bc {
let out = cgcx.output_filenames.temp_path_ext("no-opt.bc", module_name);
let out = path_to_c_string(&out);
llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr());
}
if config.opt_level.is_some() {
// Create the two optimizing pass managers. These mirror what clang
// does, and are by populated by LLVM's default PassManagerBuilder.
// Each manager has a different set of passes, but they also share
// some common passes.
let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod);
let mpm = llvm::LLVMCreatePassManager();
{
let find_pass = |pass_name: &str| {
let pass_name = SmallCStr::new(pass_name);
llvm::LLVMRustFindAndCreatePass(pass_name.as_ptr())
};
if config.verify_llvm_ir {
// Verification should run as the very first pass.
llvm::LLVMRustAddPass(fpm, find_pass("verify").unwrap());
}
let mut extra_passes = Vec::new();
let mut have_name_anon_globals_pass = false;
for pass_name in &config.passes {
if pass_name == "lint" {
// Linting should also be performed early, directly on the generated IR.
llvm::LLVMRustAddPass(fpm, find_pass("lint").unwrap());
continue;
}
if let Some(pass) = find_pass(pass_name) {
extra_passes.push(pass);
} else {
diag_handler.warn(&format!("unknown pass `{}`, ignoring", pass_name));
}
if pass_name == "name-anon-globals" {
have_name_anon_globals_pass = true;
}
}
for pass_name in &cgcx.plugin_passes {
if let Some(pass) = find_pass(pass_name) {
extra_passes.push(pass);
} else {
diag_handler.err(&format!("a plugin asked for LLVM pass \
`{}` but LLVM does not \
recognize it", pass_name));
}
if pass_name == "name-anon-globals" {
have_name_anon_globals_pass = true;
}
}
// Some options cause LLVM bitcode to be emitted, which uses ThinLTOBuffers, so we need
// to make sure we run LLVM's NameAnonGlobals pass when emitting bitcode; otherwise
// we'll get errors in LLVM.
let using_thin_buffers = config.bitcode_needed();
if!config.no_prepopulate_passes {
llvm::LLVMRustAddAnalysisPasses(tm, fpm, llmod);
llvm::LLVMRustAddAnalysisPasses(tm, mpm, llmod);
let opt_level = config.opt_level.map(|x| to_llvm_opt_settings(x).0)
.unwrap_or(llvm::CodeGenOptLevel::None);
let prepare_for_thin_lto = cgcx.lto == Lto::Thin || cgcx.lto == Lto::ThinLocal ||
(cgcx.lto!= Lto::Fat && cgcx.opts.cg.linker_plugin_lto.enabled());
with_llvm_pmb(llmod, &config, opt_level, prepare_for_thin_lto, &mut |b| {
llvm::LLVMRustAddLastExtensionPasses(
b, extra_passes.as_ptr(), extra_passes.len() as size_t);
llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(b, fpm);
llvm::LLVMPassManagerBuilderPopulateModulePassManager(b, mpm);
});
have_name_anon_globals_pass = have_name_anon_globals_pass || prepare_for_thin_lto;
if using_thin_buffers &&!prepare_for_thin_lto {
llvm::LLVMRustAddPass(mpm, find_pass("name-anon-globals").unwrap());
have_name_anon_globals_pass = true;
}
} else {
// If we don't use the standard pipeline, directly populate the MPM
// with the extra passes.
for pass in extra_passes {
llvm::LLVMRustAddPass(mpm, pass);
}
}
if using_thin_buffers &&!have_name_anon_globals_pass {
// As described above, this will probably cause an error in LLVM
if config.no_prepopulate_passes {
diag_handler.err("The current compilation is going to use thin LTO buffers \
without running LLVM's NameAnonGlobals pass. \
This will likely cause errors in LLVM. Consider adding \
-C passes=name-anon-globals to the compiler command line.");
} else {
bug!("We are using thin LTO buffers without running the NameAnonGlobals pass. \
This will likely cause errors in LLVM and should never happen.");
}
}
}
diag_handler.abort_if_errors();
// Finally, run the actual optimization passes
{
let _timer = cgcx.prof.generic_activity("LLVM_module_optimize_function_passes");
time_ext(config.time_passes,
&format!("llvm function passes [{}]", module_name.unwrap()),
|| {
llvm::LLVMRustRunFunctionPassManager(fpm, llmod)
});
}
{
let _timer = cgcx.prof.generic_activity("LLVM_module_optimize_module_passes");
time_ext(config.time_passes,
&format!("llvm module passes [{}]", module_name.unwrap()),
|| {
llvm::LLVMRunPassManager(mpm, llmod)
});
}
// Deallocate managers that we're now done with
llvm::LLVMDisposePassManager(fpm);
llvm::LLVMDisposePassManager(mpm);
}
Ok(())
}
pub(crate) unsafe fn codegen(cgcx: &CodegenContext<LlvmCodegenBackend>,
diag_handler: &Handler,
module: ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig)
-> Result<CompiledModule, FatalError>
{
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen");
{
let llmod = module.module_llvm.llmod();
let llcx = &*module.module_llvm.llcx;
let tm = &*module.module_llvm.tm;
let module_name = module.name.clone();
let module_name = Some(&module_name[..]);
let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
if cgcx.msvc_imps_needed {
create_msvc_imps(cgcx, llcx, llmod);
}
// A codegen-specific pass manager is used to generate object
// files for an LLVM module.
//
// Apparently each of these pass managers is a one-shot kind of
// thing, so we create a new one for each type of output. The
// pass manager passed to the closure should be ensured to not
// escape the closure itself, and the manager should only be
// used once.
unsafe fn with_codegen<'ll, F, R>(tm: &'ll llvm::TargetMachine,
llmod: &'ll llvm::Module,
no_builtins: bool,
f: F) -> R
where F: FnOnce(&'ll mut PassManager<'ll>) -> R,
{
let cpm = llvm::LLVMCreatePassManager();
llvm::LLVMRustAddAnalysisPasses(tm, cpm, llmod);
llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
f(cpm)
}
// If we don't have the integrated assembler, then we need to emit asm
// from LLVM and use `gcc` to create the object file.
let asm_to_obj = config.emit_obj && config.no_integrated_as;
// Change what we write and cleanup based on whether obj files are
// just llvm bitcode. In that case write bitcode, and possibly
// delete the bitcode if it wasn't requested. Don't generate the
// machine code, instead copy the.o file from the.bc
let write_bc = config.emit_bc || config.obj_is_bitcode;
let rm_bc =!config.emit_bc && config.obj_is_bitcode;
let write_obj = config.emit_obj &&!config.obj_is_bitcode &&!asm_to_obj;
let copy_bc_to_obj = config.emit_obj && config.obj_is_bitcode;
let bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
if write_bc || config.emit_bc_compressed || config.embed_bitcode {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_make_bitcode");
let thin = ThinBuffer::new(llmod);
let data = thin.data();
if write_bc {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_emit_bitcode");
if let Err(e) = fs::write(&bc_out, data) {
let msg = format!("failed to write bytecode to {}: {}", bc_out.display(), e);
diag_handler.err(&msg);
}
}
if config.embed_bitcode {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_embed_bitcode");
embed_bitcode(cgcx, llcx, llmod, Some(data));
}
if config.emit_bc_compressed {
let _timer =
cgcx.prof.generic_activity("LLVM_module_codegen_emit_compressed_bitcode");
let dst = bc_out.with_extension(RLIB_BYTECODE_EXTENSION);
let data = bytecode::encode(&module.name, data);
if let Err(e) = fs::write(&dst, data) { | diag_handler.err(&msg);
}
}
} else if config.embed_bitcode_marker {
embed_bitcode(cgcx, llcx, llmod, None);
}
time_ext(config.time_passes, &format!("codegen passes [{}]", module_name.unwrap()),
|| -> Result<(), FatalError> {
if config.emit_ir {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_emit_ir");
let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name);
let out_c = path_to_c_string(&out);
extern "C" fn demangle_callback(input_ptr: *const c_char,
input_len: size_t,
output_ptr: *mut c_char,
output_len: size_t) -> size_t {
let input = unsafe {
slice::from_raw_parts(input_ptr as *const u8, input_len as usize)
};
let input = match str::from_utf8(input) {
Ok(s) => s,
Err(_) => return 0,
};
| let msg = format!("failed to write bytecode to {}: {}", dst.display(), e); | random_line_split |
write.rs | = sess.target.target.options.trap_unreachable;
let emit_stack_size_section = sess.opts.debugging_opts.emit_stack_sizes;
let asm_comments = sess.asm_comments();
Arc::new(move || {
let tm = unsafe {
llvm::LLVMRustCreateTargetMachine(
triple.as_ptr(), cpu.as_ptr(), features.as_ptr(),
code_model,
reloc_model,
opt_level,
use_softfp,
is_pie_binary,
ffunction_sections,
fdata_sections,
trap_unreachable,
singlethread,
asm_comments,
emit_stack_size_section,
)
};
tm.ok_or_else(|| {
format!("Could not create LLVM TargetMachine for triple: {}",
triple.to_str().unwrap())
})
})
}
pub(crate) fn save_temp_bitcode(
cgcx: &CodegenContext<LlvmCodegenBackend>,
module: &ModuleCodegen<ModuleLlvm>,
name: &str
) {
if!cgcx.save_temps {
return
}
unsafe {
let ext = format!("{}.bc", name);
let cgu = Some(&module.name[..]);
let path = cgcx.output_filenames.temp_path_ext(&ext, cgu);
let cstr = path_to_c_string(&path);
let llmod = module.module_llvm.llmod();
llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr());
}
}
pub struct DiagnosticHandlers<'a> {
data: *mut (&'a CodegenContext<LlvmCodegenBackend>, &'a Handler),
llcx: &'a llvm::Context,
}
impl<'a> DiagnosticHandlers<'a> {
pub fn new(cgcx: &'a CodegenContext<LlvmCodegenBackend>,
handler: &'a Handler,
llcx: &'a llvm::Context) -> Self {
let data = Box::into_raw(Box::new((cgcx, handler)));
unsafe {
llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, data.cast());
llvm::LLVMContextSetDiagnosticHandler(llcx, diagnostic_handler, data.cast());
}
DiagnosticHandlers { data, llcx }
}
}
impl<'a> Drop for DiagnosticHandlers<'a> {
fn drop(&mut self) {
use std::ptr::null_mut;
unsafe {
llvm::LLVMRustSetInlineAsmDiagnosticHandler(self.llcx, inline_asm_handler, null_mut());
llvm::LLVMContextSetDiagnosticHandler(self.llcx, diagnostic_handler, null_mut());
drop(Box::from_raw(self.data));
}
}
}
unsafe extern "C" fn | (cgcx: &CodegenContext<LlvmCodegenBackend>,
msg: &str,
cookie: c_uint) {
cgcx.diag_emitter.inline_asm_error(cookie as u32, msg.to_owned());
}
unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic,
user: *const c_void,
cookie: c_uint) {
if user.is_null() {
return
}
let (cgcx, _) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
let msg = llvm::build_string(|s| llvm::LLVMRustWriteSMDiagnosticToString(diag, s))
.expect("non-UTF8 SMDiagnostic");
report_inline_asm(cgcx, &msg, cookie);
}
unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void) {
if user.is_null() {
return
}
let (cgcx, diag_handler) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
match llvm::diagnostic::Diagnostic::unpack(info) {
llvm::diagnostic::InlineAsm(inline) => {
report_inline_asm(cgcx,
&llvm::twine_to_string(inline.message),
inline.cookie);
}
llvm::diagnostic::Optimization(opt) => {
let enabled = match cgcx.remark {
Passes::All => true,
Passes::Some(ref v) => v.iter().any(|s| *s == opt.pass_name),
};
if enabled {
diag_handler.note_without_error(&format!("optimization {} for {} at {}:{}:{}: {}",
opt.kind.describe(),
opt.pass_name,
opt.filename,
opt.line,
opt.column,
opt.message));
}
}
llvm::diagnostic::PGO(diagnostic_ref) |
llvm::diagnostic::Linker(diagnostic_ref) => {
let msg = llvm::build_string(|s| {
llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
}).expect("non-UTF8 diagnostic");
diag_handler.warn(&msg);
}
llvm::diagnostic::UnknownDiagnostic(..) => {},
}
}
// Unsafe due to LLVM calls.
pub(crate) unsafe fn optimize(cgcx: &CodegenContext<LlvmCodegenBackend>,
diag_handler: &Handler,
module: &ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig)
-> Result<(), FatalError>
{
let _timer = cgcx.prof.generic_activity("LLVM_module_optimize");
let llmod = module.module_llvm.llmod();
let llcx = &*module.module_llvm.llcx;
let tm = &*module.module_llvm.tm;
let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
let module_name = module.name.clone();
let module_name = Some(&module_name[..]);
if config.emit_no_opt_bc {
let out = cgcx.output_filenames.temp_path_ext("no-opt.bc", module_name);
let out = path_to_c_string(&out);
llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr());
}
if config.opt_level.is_some() {
// Create the two optimizing pass managers. These mirror what clang
// does, and are by populated by LLVM's default PassManagerBuilder.
// Each manager has a different set of passes, but they also share
// some common passes.
let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod);
let mpm = llvm::LLVMCreatePassManager();
{
let find_pass = |pass_name: &str| {
let pass_name = SmallCStr::new(pass_name);
llvm::LLVMRustFindAndCreatePass(pass_name.as_ptr())
};
if config.verify_llvm_ir {
// Verification should run as the very first pass.
llvm::LLVMRustAddPass(fpm, find_pass("verify").unwrap());
}
let mut extra_passes = Vec::new();
let mut have_name_anon_globals_pass = false;
for pass_name in &config.passes {
if pass_name == "lint" {
// Linting should also be performed early, directly on the generated IR.
llvm::LLVMRustAddPass(fpm, find_pass("lint").unwrap());
continue;
}
if let Some(pass) = find_pass(pass_name) {
extra_passes.push(pass);
} else {
diag_handler.warn(&format!("unknown pass `{}`, ignoring", pass_name));
}
if pass_name == "name-anon-globals" {
have_name_anon_globals_pass = true;
}
}
for pass_name in &cgcx.plugin_passes {
if let Some(pass) = find_pass(pass_name) {
extra_passes.push(pass);
} else {
diag_handler.err(&format!("a plugin asked for LLVM pass \
`{}` but LLVM does not \
recognize it", pass_name));
}
if pass_name == "name-anon-globals" {
have_name_anon_globals_pass = true;
}
}
// Some options cause LLVM bitcode to be emitted, which uses ThinLTOBuffers, so we need
// to make sure we run LLVM's NameAnonGlobals pass when emitting bitcode; otherwise
// we'll get errors in LLVM.
let using_thin_buffers = config.bitcode_needed();
if!config.no_prepopulate_passes {
llvm::LLVMRustAddAnalysisPasses(tm, fpm, llmod);
llvm::LLVMRustAddAnalysisPasses(tm, mpm, llmod);
let opt_level = config.opt_level.map(|x| to_llvm_opt_settings(x).0)
.unwrap_or(llvm::CodeGenOptLevel::None);
let prepare_for_thin_lto = cgcx.lto == Lto::Thin || cgcx.lto == Lto::ThinLocal ||
(cgcx.lto!= Lto::Fat && cgcx.opts.cg.linker_plugin_lto.enabled());
with_llvm_pmb(llmod, &config, opt_level, prepare_for_thin_lto, &mut |b| {
llvm::LLVMRustAddLastExtensionPasses(
b, extra_passes.as_ptr(), extra_passes.len() as size_t);
llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(b, fpm);
llvm::LLVMPassManagerBuilderPopulateModulePassManager(b, mpm);
});
have_name_anon_globals_pass = have_name_anon_globals_pass || prepare_for_thin_lto;
if using_thin_buffers &&!prepare_for_thin_lto {
llvm::LLVMRustAddPass(mpm, find_pass("name-anon-globals").unwrap());
have_name_anon_globals_pass = true;
}
} else {
// If we don't use the standard pipeline, directly populate the MPM
// with the extra passes.
for pass in extra_passes {
llvm::LLVMRustAddPass(mpm, pass);
}
}
if using_thin_buffers &&!have_name_anon_globals_pass {
// As described above, this will probably cause an error in LLVM
if config.no_prepopulate_passes {
diag_handler.err("The current compilation is going to use thin LTO buffers \
without running LLVM's NameAnonGlobals pass. \
This will likely cause errors in LLVM. Consider adding \
-C passes=name-anon-globals to the compiler command line.");
} else {
bug!("We are using thin LTO buffers without running the NameAnonGlobals pass. \
This will likely cause errors in LLVM and should never happen.");
}
}
}
diag_handler.abort_if_errors();
// Finally, run the actual optimization passes
{
let _timer = cgcx.prof.generic_activity("LLVM_module_optimize_function_passes");
time_ext(config.time_passes,
&format!("llvm function passes [{}]", module_name.unwrap()),
|| {
llvm::LLVMRustRunFunctionPassManager(fpm, llmod)
});
}
{
let _timer = cgcx.prof.generic_activity("LLVM_module_optimize_module_passes");
time_ext(config.time_passes,
&format!("llvm module passes [{}]", module_name.unwrap()),
|| {
llvm::LLVMRunPassManager(mpm, llmod)
});
}
// Deallocate managers that we're now done with
llvm::LLVMDisposePassManager(fpm);
llvm::LLVMDisposePassManager(mpm);
}
Ok(())
}
pub(crate) unsafe fn codegen(cgcx: &CodegenContext<LlvmCodegenBackend>,
diag_handler: &Handler,
module: ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig)
-> Result<CompiledModule, FatalError>
{
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen");
{
let llmod = module.module_llvm.llmod();
let llcx = &*module.module_llvm.llcx;
let tm = &*module.module_llvm.tm;
let module_name = module.name.clone();
let module_name = Some(&module_name[..]);
let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
if cgcx.msvc_imps_needed {
create_msvc_imps(cgcx, llcx, llmod);
}
// A codegen-specific pass manager is used to generate object
// files for an LLVM module.
//
// Apparently each of these pass managers is a one-shot kind of
// thing, so we create a new one for each type of output. The
// pass manager passed to the closure should be ensured to not
// escape the closure itself, and the manager should only be
// used once.
unsafe fn with_codegen<'ll, F, R>(tm: &'ll llvm::TargetMachine,
llmod: &'ll llvm::Module,
no_builtins: bool,
f: F) -> R
where F: FnOnce(&'ll mut PassManager<'ll>) -> R,
{
let cpm = llvm::LLVMCreatePassManager();
llvm::LLVMRustAddAnalysisPasses(tm, cpm, llmod);
llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
f(cpm)
}
// If we don't have the integrated assembler, then we need to emit asm
// from LLVM and use `gcc` to create the object file.
let asm_to_obj = config.emit_obj && config.no_integrated_as;
// Change what we write and cleanup based on whether obj files are
// just llvm bitcode. In that case write bitcode, and possibly
// delete the bitcode if it wasn't requested. Don't generate the
// machine code, instead copy the.o file from the.bc
let write_bc = config.emit_bc || config.obj_is_bitcode;
let rm_bc =!config.emit_bc && config.obj_is_bitcode;
let write_obj = config.emit_obj &&!config.obj_is_bitcode &&!asm_to_obj;
let copy_bc_to_obj = config.emit_obj && config.obj_is_bitcode;
let bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
if write_bc || config.emit_bc_compressed || config.embed_bitcode {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_make_bitcode");
let thin = ThinBuffer::new(llmod);
let data = thin.data();
if write_bc {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_emit_bitcode");
if let Err(e) = fs::write(&bc_out, data) {
let msg = format!("failed to write bytecode to {}: {}", bc_out.display(), e);
diag_handler.err(&msg);
}
}
if config.embed_bitcode {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_embed_bitcode");
embed_bitcode(cgcx, llcx, llmod, Some(data));
}
if config.emit_bc_compressed {
let _timer =
cgcx.prof.generic_activity("LLVM_module_codegen_emit_compressed_bitcode");
let dst = bc_out.with_extension(RLIB_BYTECODE_EXTENSION);
let data = bytecode::encode(&module.name, data);
if let Err(e) = fs::write(&dst, data) {
let msg = format!("failed to write bytecode to {}: {}", dst.display(), e);
diag_handler.err(&msg);
}
}
} else if config.embed_bitcode_marker {
embed_bitcode(cgcx, llcx, llmod, None);
}
time_ext(config.time_passes, &format!("codegen passes [{}]", module_name.unwrap()),
|| -> Result<(), FatalError> {
if config.emit_ir {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_emit_ir");
let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name);
let out_c = path_to_c_string(&out);
extern "C" fn demangle_callback(input_ptr: *const c_char,
input_len: size_t,
output_ptr: *mut c_char,
output_len: size_t) -> size_t {
let input = unsafe {
slice::from_raw_parts(input_ptr as *const u8, input_len as usize)
};
let input = match str::from_utf8(input) {
Ok(s) => s,
Err(_) => return 0,
| report_inline_asm | identifier_name |
write.rs | llvm::LLVMRustAddPass(mpm, find_pass("name-anon-globals").unwrap());
have_name_anon_globals_pass = true;
}
} else {
// If we don't use the standard pipeline, directly populate the MPM
// with the extra passes.
for pass in extra_passes {
llvm::LLVMRustAddPass(mpm, pass);
}
}
if using_thin_buffers &&!have_name_anon_globals_pass {
// As described above, this will probably cause an error in LLVM
if config.no_prepopulate_passes {
diag_handler.err("The current compilation is going to use thin LTO buffers \
without running LLVM's NameAnonGlobals pass. \
This will likely cause errors in LLVM. Consider adding \
-C passes=name-anon-globals to the compiler command line.");
} else {
bug!("We are using thin LTO buffers without running the NameAnonGlobals pass. \
This will likely cause errors in LLVM and should never happen.");
}
}
}
diag_handler.abort_if_errors();
// Finally, run the actual optimization passes
{
let _timer = cgcx.prof.generic_activity("LLVM_module_optimize_function_passes");
time_ext(config.time_passes,
&format!("llvm function passes [{}]", module_name.unwrap()),
|| {
llvm::LLVMRustRunFunctionPassManager(fpm, llmod)
});
}
{
let _timer = cgcx.prof.generic_activity("LLVM_module_optimize_module_passes");
time_ext(config.time_passes,
&format!("llvm module passes [{}]", module_name.unwrap()),
|| {
llvm::LLVMRunPassManager(mpm, llmod)
});
}
// Deallocate managers that we're now done with
llvm::LLVMDisposePassManager(fpm);
llvm::LLVMDisposePassManager(mpm);
}
Ok(())
}
pub(crate) unsafe fn codegen(cgcx: &CodegenContext<LlvmCodegenBackend>,
diag_handler: &Handler,
module: ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig)
-> Result<CompiledModule, FatalError>
{
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen");
{
let llmod = module.module_llvm.llmod();
let llcx = &*module.module_llvm.llcx;
let tm = &*module.module_llvm.tm;
let module_name = module.name.clone();
let module_name = Some(&module_name[..]);
let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
if cgcx.msvc_imps_needed {
create_msvc_imps(cgcx, llcx, llmod);
}
// A codegen-specific pass manager is used to generate object
// files for an LLVM module.
//
// Apparently each of these pass managers is a one-shot kind of
// thing, so we create a new one for each type of output. The
// pass manager passed to the closure should be ensured to not
// escape the closure itself, and the manager should only be
// used once.
unsafe fn with_codegen<'ll, F, R>(tm: &'ll llvm::TargetMachine,
llmod: &'ll llvm::Module,
no_builtins: bool,
f: F) -> R
where F: FnOnce(&'ll mut PassManager<'ll>) -> R,
{
let cpm = llvm::LLVMCreatePassManager();
llvm::LLVMRustAddAnalysisPasses(tm, cpm, llmod);
llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
f(cpm)
}
// If we don't have the integrated assembler, then we need to emit asm
// from LLVM and use `gcc` to create the object file.
let asm_to_obj = config.emit_obj && config.no_integrated_as;
// Change what we write and cleanup based on whether obj files are
// just llvm bitcode. In that case write bitcode, and possibly
// delete the bitcode if it wasn't requested. Don't generate the
// machine code, instead copy the.o file from the.bc
let write_bc = config.emit_bc || config.obj_is_bitcode;
let rm_bc =!config.emit_bc && config.obj_is_bitcode;
let write_obj = config.emit_obj &&!config.obj_is_bitcode &&!asm_to_obj;
let copy_bc_to_obj = config.emit_obj && config.obj_is_bitcode;
let bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
if write_bc || config.emit_bc_compressed || config.embed_bitcode {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_make_bitcode");
let thin = ThinBuffer::new(llmod);
let data = thin.data();
if write_bc {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_emit_bitcode");
if let Err(e) = fs::write(&bc_out, data) {
let msg = format!("failed to write bytecode to {}: {}", bc_out.display(), e);
diag_handler.err(&msg);
}
}
if config.embed_bitcode {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_embed_bitcode");
embed_bitcode(cgcx, llcx, llmod, Some(data));
}
if config.emit_bc_compressed {
let _timer =
cgcx.prof.generic_activity("LLVM_module_codegen_emit_compressed_bitcode");
let dst = bc_out.with_extension(RLIB_BYTECODE_EXTENSION);
let data = bytecode::encode(&module.name, data);
if let Err(e) = fs::write(&dst, data) {
let msg = format!("failed to write bytecode to {}: {}", dst.display(), e);
diag_handler.err(&msg);
}
}
} else if config.embed_bitcode_marker {
embed_bitcode(cgcx, llcx, llmod, None);
}
time_ext(config.time_passes, &format!("codegen passes [{}]", module_name.unwrap()),
|| -> Result<(), FatalError> {
if config.emit_ir {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_emit_ir");
let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name);
let out_c = path_to_c_string(&out);
extern "C" fn demangle_callback(input_ptr: *const c_char,
input_len: size_t,
output_ptr: *mut c_char,
output_len: size_t) -> size_t {
let input = unsafe {
slice::from_raw_parts(input_ptr as *const u8, input_len as usize)
};
let input = match str::from_utf8(input) {
Ok(s) => s,
Err(_) => return 0,
};
let output = unsafe {
slice::from_raw_parts_mut(output_ptr as *mut u8, output_len as usize)
};
let mut cursor = io::Cursor::new(output);
let demangled = match rustc_demangle::try_demangle(input) {
Ok(d) => d,
Err(_) => return 0,
};
if let Err(_) = write!(cursor, "{:#}", demangled) {
// Possible only if provided buffer is not big enough
return 0;
}
cursor.position() as size_t
}
with_codegen(tm, llmod, config.no_builtins, |cpm| {
let result =
llvm::LLVMRustPrintModule(cpm, llmod, out_c.as_ptr(), demangle_callback);
llvm::LLVMDisposePassManager(cpm);
result.into_result().map_err(|()| {
let msg = format!("failed to write LLVM IR to {}", out.display());
llvm_err(diag_handler, &msg)
})
})?;
}
if config.emit_asm || asm_to_obj {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_emit_asm");
let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
// We can't use the same module for asm and binary output, because that triggers
// various errors like invalid IR or broken binaries, so we might have to clone the
// module to produce the asm output
let llmod = if config.emit_obj {
llvm::LLVMCloneModule(llmod)
} else {
llmod
};
with_codegen(tm, llmod, config.no_builtins, |cpm| {
write_output_file(diag_handler, tm, cpm, llmod, &path,
llvm::FileType::AssemblyFile)
})?;
}
if write_obj {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_emit_obj");
with_codegen(tm, llmod, config.no_builtins, |cpm| {
write_output_file(diag_handler, tm, cpm, llmod, &obj_out,
llvm::FileType::ObjectFile)
})?;
} else if asm_to_obj {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_asm_to_obj");
let assembly = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
run_assembler(cgcx, diag_handler, &assembly, &obj_out);
if!config.emit_asm &&!cgcx.save_temps {
drop(fs::remove_file(&assembly));
}
}
Ok(())
})?;
if copy_bc_to_obj {
debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out);
if let Err(e) = link_or_copy(&bc_out, &obj_out) {
diag_handler.err(&format!("failed to copy bitcode to object file: {}", e));
}
}
if rm_bc {
debug!("removing_bitcode {:?}", bc_out);
if let Err(e) = fs::remove_file(&bc_out) {
diag_handler.err(&format!("failed to remove bitcode: {}", e));
}
}
drop(handlers);
}
Ok(module.into_compiled_module(config.emit_obj,
config.emit_bc,
config.emit_bc_compressed,
&cgcx.output_filenames))
}
/// Embed the bitcode of an LLVM module in the LLVM module itself.
///
/// This is done primarily for iOS where it appears to be standard to compile C
/// code at least with `-fembed-bitcode` which creates two sections in the
/// executable:
///
/// * __LLVM,__bitcode
/// * __LLVM,__cmdline
///
/// It appears *both* of these sections are necessary to get the linker to
/// recognize what's going on. For us though we just always throw in an empty
/// cmdline section.
///
/// Furthermore debug/O1 builds don't actually embed bitcode but rather just
/// embed an empty section.
///
/// Basically all of this is us attempting to follow in the footsteps of clang
/// on iOS. See #35968 for lots more info.
unsafe fn embed_bitcode(cgcx: &CodegenContext<LlvmCodegenBackend>,
llcx: &llvm::Context,
llmod: &llvm::Module,
bitcode: Option<&[u8]>) {
let llconst = common::bytes_in_context(llcx, bitcode.unwrap_or(&[]));
let llglobal = llvm::LLVMAddGlobal(
llmod,
common::val_ty(llconst),
"rustc.embedded.module\0".as_ptr().cast(),
);
llvm::LLVMSetInitializer(llglobal, llconst);
let is_apple = cgcx.opts.target_triple.triple().contains("-ios") ||
cgcx.opts.target_triple.triple().contains("-darwin");
let section = if is_apple {
"__LLVM,__bitcode\0"
} else {
".llvmbc\0"
};
llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
let llconst = common::bytes_in_context(llcx, &[]);
let llglobal = llvm::LLVMAddGlobal(
llmod,
common::val_ty(llconst),
"rustc.embedded.cmdline\0".as_ptr().cast(),
);
llvm::LLVMSetInitializer(llglobal, llconst);
let section = if is_apple {
"__LLVM,__cmdline\0"
} else {
".llvmcmd\0"
};
llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
}
pub unsafe fn with_llvm_pmb(llmod: &llvm::Module,
config: &ModuleConfig,
opt_level: llvm::CodeGenOptLevel,
prepare_for_thin_lto: bool,
f: &mut dyn FnMut(&llvm::PassManagerBuilder)) {
use std::ptr;
// Create the PassManagerBuilder for LLVM. We configure it with
// reasonable defaults and prepare it to actually populate the pass
// manager.
let builder = llvm::LLVMPassManagerBuilderCreate();
let opt_size = config.opt_size.map(|x| to_llvm_opt_settings(x).1)
.unwrap_or(llvm::CodeGenOptSizeNone);
let inline_threshold = config.inline_threshold;
let pgo_gen_path = match config.pgo_gen {
SwitchWithOptPath::Enabled(ref opt_dir_path) => {
let path = if let Some(dir_path) = opt_dir_path {
dir_path.join("default_%m.profraw")
} else {
PathBuf::from("default_%m.profraw")
};
Some(CString::new(format!("{}", path.display())).unwrap())
}
SwitchWithOptPath::Disabled => {
None
}
};
let pgo_use_path = config.pgo_use.as_ref().map(|path_buf| {
CString::new(path_buf.to_string_lossy().as_bytes()).unwrap()
});
llvm::LLVMRustConfigurePassManagerBuilder(
builder,
opt_level,
config.merge_functions,
config.vectorize_slp,
config.vectorize_loop,
prepare_for_thin_lto,
pgo_gen_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
pgo_use_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
);
llvm::LLVMPassManagerBuilderSetSizeLevel(builder, opt_size as u32);
if opt_size!= llvm::CodeGenOptSizeNone {
llvm::LLVMPassManagerBuilderSetDisableUnrollLoops(builder, 1);
}
llvm::LLVMRustAddBuilderLibraryInfo(builder, llmod, config.no_builtins);
// Here we match what clang does (kinda). For O0 we only inline
// always-inline functions (but don't add lifetime intrinsics), at O1 we
// inline with lifetime intrinsics, and O2+ we add an inliner with a
// thresholds copied from clang.
match (opt_level, opt_size, inline_threshold) {
(.., Some(t)) => {
llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, t as u32);
}
(llvm::CodeGenOptLevel::Aggressive,..) => {
llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 275);
}
(_, llvm::CodeGenOptSizeDefault, _) => {
llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 75);
}
(_, llvm::CodeGenOptSizeAggressive, _) => {
llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 25);
}
(llvm::CodeGenOptLevel::None,..) => {
llvm::LLVMRustAddAlwaysInlinePass(builder, false);
}
(llvm::CodeGenOptLevel::Less,..) => {
llvm::LLVMRustAddAlwaysInlinePass(builder, true);
}
(llvm::CodeGenOptLevel::Default,..) => {
llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 225);
}
(llvm::CodeGenOptLevel::Other,..) => {
bug!("CodeGenOptLevel::Other selected")
}
}
f(builder);
llvm::LLVMPassManagerBuilderDispose(builder);
}
// Create a `__imp_<symbol> = &symbol` global for every public static `symbol`.
// This is required to satisfy `dllimport` references to static data in.rlibs
// when using MSVC linker. We do this only for data, as linker can fix up
// code references on its own.
// See #26591, #27438
fn create_msvc_imps(
cgcx: &CodegenContext<LlvmCodegenBackend>,
llcx: &llvm::Context,
llmod: &llvm::Module
) {
if!cgcx.msvc_imps_needed {
return
}
// The x86 ABI seems to require that leading underscores are added to symbol
// names, so we need an extra underscore on x86. There's also a leading
// '\x01' here which disables LLVM's symbol mangling (e.g., no extra
// underscores added in front).
let prefix = if cgcx.target_arch == "x86" | {
"\x01__imp__"
} | conditional_block |
|
write.rs |
pub fn to_llvm_opt_settings(cfg: config::OptLevel) -> (llvm::CodeGenOptLevel, llvm::CodeGenOptSize)
{
use self::config::OptLevel::*;
match cfg {
No => (llvm::CodeGenOptLevel::None, llvm::CodeGenOptSizeNone),
Less => (llvm::CodeGenOptLevel::Less, llvm::CodeGenOptSizeNone),
Default => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeNone),
Aggressive => (llvm::CodeGenOptLevel::Aggressive, llvm::CodeGenOptSizeNone),
Size => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeDefault),
SizeMin => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeAggressive),
}
}
// If find_features is true this won't access `sess.crate_types` by assuming
// that `is_pie_binary` is false. When we discover LLVM target features
// `sess.crate_types` is uninitialized so we cannot access it.
pub fn target_machine_factory(sess: &Session, optlvl: config::OptLevel, find_features: bool)
-> Arc<dyn Fn() -> Result<&'static mut llvm::TargetMachine, String> + Send + Sync>
{
let reloc_model = get_reloc_model(sess);
let (opt_level, _) = to_llvm_opt_settings(optlvl);
let use_softfp = sess.opts.cg.soft_float;
let ffunction_sections = sess.target.target.options.function_sections;
let fdata_sections = ffunction_sections;
let code_model_arg = sess.opts.cg.code_model.as_ref().or(
sess.target.target.options.code_model.as_ref(),
);
let code_model = match code_model_arg {
Some(s) => {
match CODE_GEN_MODEL_ARGS.iter().find(|arg| arg.0 == s) {
Some(x) => x.1,
_ => {
sess.err(&format!("{:?} is not a valid code model",
code_model_arg));
sess.abort_if_errors();
bug!();
}
}
}
None => llvm::CodeModel::None,
};
let features = attributes::llvm_target_features(sess).collect::<Vec<_>>();
let mut singlethread = sess.target.target.options.singlethread;
// On the wasm target once the `atomics` feature is enabled that means that
// we're no longer single-threaded, or otherwise we don't want LLVM to
// lower atomic operations to single-threaded operations.
if singlethread &&
sess.target.target.llvm_target.contains("wasm32") &&
features.iter().any(|s| *s == "+atomics")
{
singlethread = false;
}
let triple = SmallCStr::new(&sess.target.target.llvm_target);
let cpu = SmallCStr::new(llvm_util::target_cpu(sess));
let features = features.join(",");
let features = CString::new(features).unwrap();
let is_pie_binary =!find_features && is_pie_binary(sess);
let trap_unreachable = sess.target.target.options.trap_unreachable;
let emit_stack_size_section = sess.opts.debugging_opts.emit_stack_sizes;
let asm_comments = sess.asm_comments();
Arc::new(move || {
let tm = unsafe {
llvm::LLVMRustCreateTargetMachine(
triple.as_ptr(), cpu.as_ptr(), features.as_ptr(),
code_model,
reloc_model,
opt_level,
use_softfp,
is_pie_binary,
ffunction_sections,
fdata_sections,
trap_unreachable,
singlethread,
asm_comments,
emit_stack_size_section,
)
};
tm.ok_or_else(|| {
format!("Could not create LLVM TargetMachine for triple: {}",
triple.to_str().unwrap())
})
})
}
pub(crate) fn save_temp_bitcode(
cgcx: &CodegenContext<LlvmCodegenBackend>,
module: &ModuleCodegen<ModuleLlvm>,
name: &str
) {
if!cgcx.save_temps {
return
}
unsafe {
let ext = format!("{}.bc", name);
let cgu = Some(&module.name[..]);
let path = cgcx.output_filenames.temp_path_ext(&ext, cgu);
let cstr = path_to_c_string(&path);
let llmod = module.module_llvm.llmod();
llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr());
}
}
pub struct DiagnosticHandlers<'a> {
data: *mut (&'a CodegenContext<LlvmCodegenBackend>, &'a Handler),
llcx: &'a llvm::Context,
}
impl<'a> DiagnosticHandlers<'a> {
pub fn new(cgcx: &'a CodegenContext<LlvmCodegenBackend>,
handler: &'a Handler,
llcx: &'a llvm::Context) -> Self {
let data = Box::into_raw(Box::new((cgcx, handler)));
unsafe {
llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, data.cast());
llvm::LLVMContextSetDiagnosticHandler(llcx, diagnostic_handler, data.cast());
}
DiagnosticHandlers { data, llcx }
}
}
impl<'a> Drop for DiagnosticHandlers<'a> {
fn drop(&mut self) {
use std::ptr::null_mut;
unsafe {
llvm::LLVMRustSetInlineAsmDiagnosticHandler(self.llcx, inline_asm_handler, null_mut());
llvm::LLVMContextSetDiagnosticHandler(self.llcx, diagnostic_handler, null_mut());
drop(Box::from_raw(self.data));
}
}
}
unsafe extern "C" fn report_inline_asm(cgcx: &CodegenContext<LlvmCodegenBackend>,
msg: &str,
cookie: c_uint) {
cgcx.diag_emitter.inline_asm_error(cookie as u32, msg.to_owned());
}
unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic,
user: *const c_void,
cookie: c_uint) {
if user.is_null() {
return
}
let (cgcx, _) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
let msg = llvm::build_string(|s| llvm::LLVMRustWriteSMDiagnosticToString(diag, s))
.expect("non-UTF8 SMDiagnostic");
report_inline_asm(cgcx, &msg, cookie);
}
unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void) {
if user.is_null() {
return
}
let (cgcx, diag_handler) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
match llvm::diagnostic::Diagnostic::unpack(info) {
llvm::diagnostic::InlineAsm(inline) => {
report_inline_asm(cgcx,
&llvm::twine_to_string(inline.message),
inline.cookie);
}
llvm::diagnostic::Optimization(opt) => {
let enabled = match cgcx.remark {
Passes::All => true,
Passes::Some(ref v) => v.iter().any(|s| *s == opt.pass_name),
};
if enabled {
diag_handler.note_without_error(&format!("optimization {} for {} at {}:{}:{}: {}",
opt.kind.describe(),
opt.pass_name,
opt.filename,
opt.line,
opt.column,
opt.message));
}
}
llvm::diagnostic::PGO(diagnostic_ref) |
llvm::diagnostic::Linker(diagnostic_ref) => {
let msg = llvm::build_string(|s| {
llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
}).expect("non-UTF8 diagnostic");
diag_handler.warn(&msg);
}
llvm::diagnostic::UnknownDiagnostic(..) => {},
}
}
// Unsafe due to LLVM calls.
pub(crate) unsafe fn optimize(cgcx: &CodegenContext<LlvmCodegenBackend>,
diag_handler: &Handler,
module: &ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig)
-> Result<(), FatalError>
{
let _timer = cgcx.prof.generic_activity("LLVM_module_optimize");
let llmod = module.module_llvm.llmod();
let llcx = &*module.module_llvm.llcx;
let tm = &*module.module_llvm.tm;
let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
let module_name = module.name.clone();
let module_name = Some(&module_name[..]);
if config.emit_no_opt_bc {
let out = cgcx.output_filenames.temp_path_ext("no-opt.bc", module_name);
let out = path_to_c_string(&out);
llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr());
}
if config.opt_level.is_some() {
// Create the two optimizing pass managers. These mirror what clang
// does, and are by populated by LLVM's default PassManagerBuilder.
// Each manager has a different set of passes, but they also share
// some common passes.
let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod);
let mpm = llvm::LLVMCreatePassManager();
{
let find_pass = |pass_name: &str| {
let pass_name = SmallCStr::new(pass_name);
llvm::LLVMRustFindAndCreatePass(pass_name.as_ptr())
};
if config.verify_llvm_ir {
// Verification should run as the very first pass.
llvm::LLVMRustAddPass(fpm, find_pass("verify").unwrap());
}
let mut extra_passes = Vec::new();
let mut have_name_anon_globals_pass = false;
for pass_name in &config.passes {
if pass_name == "lint" {
// Linting should also be performed early, directly on the generated IR.
llvm::LLVMRustAddPass(fpm, find_pass("lint").unwrap());
continue;
}
if let Some(pass) = find_pass(pass_name) {
extra_passes.push(pass);
} else {
diag_handler.warn(&format!("unknown pass `{}`, ignoring", pass_name));
}
if pass_name == "name-anon-globals" {
have_name_anon_globals_pass = true;
}
}
for pass_name in &cgcx.plugin_passes {
if let Some(pass) = find_pass(pass_name) {
extra_passes.push(pass);
} else {
diag_handler.err(&format!("a plugin asked for LLVM pass \
`{}` but LLVM does not \
recognize it", pass_name));
}
if pass_name == "name-anon-globals" {
have_name_anon_globals_pass = true;
}
}
// Some options cause LLVM bitcode to be emitted, which uses ThinLTOBuffers, so we need
// to make sure we run LLVM's NameAnonGlobals pass when emitting bitcode; otherwise
// we'll get errors in LLVM.
let using_thin_buffers = config.bitcode_needed();
if!config.no_prepopulate_passes {
llvm::LLVMRustAddAnalysisPasses(tm, fpm, llmod);
llvm::LLVMRustAddAnalysisPasses(tm, mpm, llmod);
let opt_level = config.opt_level.map(|x| to_llvm_opt_settings(x).0)
.unwrap_or(llvm::CodeGenOptLevel::None);
let prepare_for_thin_lto = cgcx.lto == Lto::Thin || cgcx.lto == Lto::ThinLocal ||
(cgcx.lto!= Lto::Fat && cgcx.opts.cg.linker_plugin_lto.enabled());
with_llvm_pmb(llmod, &config, opt_level, prepare_for_thin_lto, &mut |b| {
llvm::LLVMRustAddLastExtensionPasses(
b, extra_passes.as_ptr(), extra_passes.len() as size_t);
llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(b, fpm);
llvm::LLVMPassManagerBuilderPopulateModulePassManager(b, mpm);
});
have_name_anon_globals_pass = have_name_anon_globals_pass || prepare_for_thin_lto;
if using_thin_buffers &&!prepare_for_thin_lto {
llvm::LLVMRustAddPass(mpm, find_pass("name-anon-globals").unwrap());
have_name_anon_globals_pass = true;
}
} else {
// If we don't use the standard pipeline, directly populate the MPM
// with the extra passes.
for pass in extra_passes {
llvm::LLVMRustAddPass(mpm, pass);
}
}
if using_thin_buffers &&!have_name_anon_globals_pass {
// As described above, this will probably cause an error in LLVM
if config.no_prepopulate_passes {
diag_handler.err("The current compilation is going to use thin LTO buffers \
without running LLVM's NameAnonGlobals pass. \
This will likely cause errors in LLVM. Consider adding \
-C passes=name-anon-globals to the compiler command line.");
} else {
bug!("We are using thin LTO buffers without running the NameAnonGlobals pass. \
This will likely cause errors in LLVM and should never happen.");
}
}
}
diag_handler.abort_if_errors();
// Finally, run the actual optimization passes
{
let _timer = cgcx.prof.generic_activity("LLVM_module_optimize_function_passes");
time_ext(config.time_passes,
&format!("llvm function passes [{}]", module_name.unwrap()),
|| {
llvm::LLVMRustRunFunctionPassManager(fpm, llmod)
});
}
{
let _timer = cgcx.prof.generic_activity("LLVM_module_optimize_module_passes");
time_ext(config.time_passes,
&format!("llvm module passes [{}]", module_name.unwrap()),
|| {
llvm::LLVMRunPassManager(mpm, llmod)
});
}
// Deallocate managers that we're now done with
llvm::LLVMDisposePassManager(fpm);
llvm::LLVMDisposePassManager(mpm);
}
Ok(())
}
pub(crate) unsafe fn codegen(cgcx: &CodegenContext<LlvmCodegenBackend>,
diag_handler: &Handler,
module: ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig)
-> Result<CompiledModule, FatalError>
{
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen");
{
let llmod = module.module_llvm.llmod();
let llcx = &*module.module_llvm.llcx;
let tm = &*module.module_llvm.tm;
let module_name = module.name.clone();
let module_name = Some(&module_name[..]);
let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
if cgcx.msvc_imps_needed {
create_msvc_imps(cgcx, llcx, llmod);
}
// A codegen-specific pass manager is used to generate object
// files for an LLVM module.
//
// Apparently each of these pass managers is a one-shot kind of
// thing, so we create a new one for each type of output. The
// pass manager passed to the closure should be ensured to not
// escape the closure itself, and the manager should only be
// used once.
unsafe fn with_codegen<'ll, F, R>(tm: &'ll llvm::TargetMachine,
llmod: &'ll llvm::Module,
no_builtins: bool,
f: F) -> R
where F: FnOnce(&'ll mut PassManager<'ll>) -> R,
{
let cpm = llvm::LLVMCreatePassManager();
llvm::LLVMRustAddAnalysisPasses(tm, cpm, llmod);
llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
f(cpm)
}
// If we don't have the integrated assembler, then we need to emit asm
// from LLVM and use `gcc` to create the object file.
let asm_to_obj = config.emit_obj && config.no_integrated_as;
// Change what we write and cleanup based on whether obj files are
// just llvm bitcode. In that case write bitcode, and possibly
// delete the bitcode if it wasn't requested. Don't generate the
// machine code, instead copy the.o file from the.bc
let write_bc = config.emit_bc || config.obj_is_bitcode;
let rm_bc =!config.emit_bc && config.obj_is_bitcode;
let write_obj = config.emit_obj &&!config.obj_is_bitcode &&!asm_to_obj;
let copy_bc_to_obj = config.emit_obj && config.obj_is_bitcode;
let bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
if write_bc || config.emit_bc_compressed || config.embed_bitcode {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_make_bitcode");
let thin = ThinBuffer::new(llmod);
let data = thin.data();
if write_bc {
let _timer = cgcx.prof.generic_activity("LLVM_module_codegen_emit_bitcode");
if let Err(e) = fs::write(& | {
target_machine_factory(&tcx.sess, tcx.backend_optimization_level(LOCAL_CRATE), find_features)()
.unwrap_or_else(|err| {
llvm_err(tcx.sess.diagnostic(), &err).raise()
})
} | identifier_body |
|
lib.rs | //! *“I'm late! I'm late! For a very important date!”*
//! *by “The White Rabbit”* 『Alice's Adventures in Wonderland』
//!
//! `white_rabbit` schedules your tasks and can repeat them!
//!
//! One funny use case are chat bot commands: Imagine a *remind me*-command,
//! the command gets executed and you simply create a one-time job to be
//! scheduled for whatever time the user desires.
//!
//! We are using chrono's `DateTime<Utc>`, enabling you to serialise and thus
//! backup currently running tasks,
//! in case you want to shutdown/restart your application,
//! constructing a new scheduler is doable.
//! However, please make sure your internal clock is synced.
#![deny(rust_2018_idioms)]
use chrono::Duration as ChronoDuration;
use parking_lot::{Condvar, Mutex, RwLock};
use std::{cmp::Ordering, collections::BinaryHeap, sync::Arc, time::Duration as StdDuration};
use threadpool::ThreadPool;
pub use chrono::{DateTime, Duration, Utc};
/// Compare if an `enum`-variant matches another variant.
macro_rules! cmp_variant {
($expression:expr, $($variant:tt)+) => {
match $expression {
$($variant)+ => true,
_ => false
}
}
}
/// When a task is due, this will be passed to the task.
/// Currently, there is not much use to this. However, this might be extended
/// in the future.
pub struct Context {
time: DateTime<Utc>,
}
/// Every task will return this `enum`.
pub enum DateResult {
/// The task is considered finished and can be fully removed.
Done,
/// The task will be scheduled for a new date on passed `DateTime<Utc>`.
Repeat(DateTime<Utc>),
}
/// Every job gets a planned `Date` with the scheduler.
pub struct Date {
pub context: Context,
pub job: Box<dyn FnMut(&mut Context) -> DateResult + Send + Sync +'static>,
}
impl Eq for Date {}
/// Invert comparisions to create a min-heap.
impl Ord for Date {
fn cmp(&self, other: &Date) -> Ordering {
match self.context.time.cmp(&other.context.time) {
Ordering::Less => Ordering::Greater,
Ordering::Greater => Ordering::Less,
Ordering::Equal => Ordering::Equal,
}
}
}
/// Invert comparisions to create a min-heap.
impl PartialOrd for Date {
fn partial_cmp(&self, other: &Date) -> Option<Ordering> {
Some(match self.context.time.cmp(&other.context.time) {
Ordering::Less => Ordering::Greater,
Ordering::Greater => Ordering::Less,
Ordering::Equal => Ordering::Equal,
})
}
}
impl PartialEq for Date {
fn eq(&self, other: &Date) -> bool {
self.context.time == other.context.time
}
}
/// The [`Scheduler`]'s worker thread switches through different states
/// while running, each state changes the behaviour.
///
/// [`Scheduler`]: struct.Scheduler.html
enum SchedulerState {
/// No dates being awaited, sleep until one gets added.
PauseEmpty,
/// Pause until next date is due.
PauseTime(StdDuration),
/// If the next date is already waiting to be executed,
/// the thread continues running without sleeping.
Run,
/// Exits the thread.
Exit,
}
impl SchedulerState {
fn is_running(&self) -> bool {
cmp_variant!(*self, SchedulerState::Run)
}
fn new_pause_time(duration: ChronoDuration) -> Self {
SchedulerState::PauseTime(
duration
.to_std()
.unwrap_or_else(|_| StdDuration::from_millis(0)),
)
}
}
/// This scheduler exists on two levels: The handle, granting you the
/// ability of adding new tasks, and the executor, dating and executing these
/// tasks when specified time is met.
///
/// **Info**: This scheduler may not be precise due to anomalies such as
/// preemption or platform differences.
pub struct Scheduler {
/// The mean of communication with the running scheduler.
condvar: Arc<(Mutex<SchedulerState>, Condvar)>,
/// Every job has its date listed inside this.
dates: Arc<RwLock<BinaryHeap<Date>>>,
}
impl Scheduler {
/// Add a task to be executed when `time` is reached.
pub fn add_task_datetime<T>(&mut self, time: DateTime<Utc>, to_execute: T)
where
T: FnMut(&mut Context) -> DateResult + Send + Sync +'static,
{
let &(ref state_lock, ref notifier) = &*self.condvar;
let task = Date {
context: Context { time },
job: Box::new(to_execute),
};
let mut locked_heap = self.dates.write();
if locked_heap.is_empty() {
let mut scheduler_state = state_lock.lock();
let left = task.context.time.signed_duration_since(Utc::now());
if!scheduler_state.is_running() {
| lse {
let mut scheduler_state = state_lock.lock();
if let SchedulerState::PauseTime(_) = *scheduler_state {
let peeked = locked_heap.peek().expect("Expected heap to be filled.");
if task.context.time < peeked.context.time {
let left = task.context.time.signed_duration_since(Utc::now());
if!scheduler_state.is_running() {
*scheduler_state = SchedulerState::PauseTime(
left.to_std()
.unwrap_or_else(|_| StdDuration::from_millis(0)),
);
notifier.notify_one();
}
}
}
}
locked_heap.push(task);
}
pub fn add_task_duration<T>(&mut self, how_long: ChronoDuration, to_execute: T)
where
T: FnMut(&mut Context) -> DateResult + Send + Sync +'static,
{
let time = Utc::now() + how_long;
self.add_task_datetime(time, to_execute);
}
}
fn set_state_lock(state_lock: &Mutex<SchedulerState>, to_set: SchedulerState) {
let mut state = state_lock.lock();
*state = to_set;
}
#[inline]
fn _push_and_notfiy(date: Date, heap: &mut BinaryHeap<Date>, notifier: &Condvar) {
heap.push(date);
notifier.notify_one();
}
/// This function pushes a `date` onto `data_pooled` and notifies the
/// dispatching-thread in case they are sleeping.
#[inline]
fn push_and_notfiy(
dispatcher_pair: &Arc<(Mutex<SchedulerState>, Condvar)>,
data_pooled: &Arc<RwLock<BinaryHeap<Date>>>,
when: &DateTime<Utc>,
date: Date,
) {
let &(ref state_lock, ref notifier) = &**dispatcher_pair;
let mut state = state_lock.lock();
let mut heap_lock = data_pooled.write();
if let Some(peek) = heap_lock.peek() {
if peek.context.time < *when {
let left = peek.context.time.signed_duration_since(Utc::now());
*state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
} else {
let left = when.signed_duration_since(Utc::now());
*state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
}
} else {
let left = when.signed_duration_since(Utc::now());
*state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
}
}
#[must_use]
enum Break {
Yes,
No,
}
#[inline]
fn process_states(state_lock: &Mutex<SchedulerState>, notifier: &Condvar) -> Break {
let mut scheduler_state = state_lock.lock();
while let SchedulerState::PauseEmpty = *scheduler_state {
notifier.wait(&mut scheduler_state);
}
while let SchedulerState::PauseTime(duration) = *scheduler_state {
if notifier
.wait_for(&mut scheduler_state, duration)
.timed_out()
{
break;
}
}
if let SchedulerState::Exit = *scheduler_state {
return Break::Yes;
}
Break::No
}
fn dispatch_date(
threadpool: &ThreadPool,
dates: &Arc<RwLock<BinaryHeap<Date>>>,
pair_scheduler: &Arc<(Mutex<SchedulerState>, Condvar)>,
) {
let mut date = {
let mut dates = dates.write();
dates.pop().expect("Should not run on empty heap.")
};
let date_dispatcher = dates.clone();
let dispatcher_pair = pair_scheduler.clone();
threadpool.execute(move || {
if let DateResult::Repeat(when) = (date.job)(&mut date.context) {
date.context.time = when;
push_and_notfiy(&dispatcher_pair, &date_dispatcher, &when, date);
}
});
}
fn check_peeking_date(dates: &Arc<RwLock<BinaryHeap<Date>>>, state_lock: &Mutex<SchedulerState>) {
if let Some(next) = dates.read().peek() {
let now = Utc::now();
if next.context.time > now {
let left = next.context.time.signed_duration_since(now);
set_state_lock(&state_lock, SchedulerState::new_pause_time(left));
} else {
set_state_lock(&state_lock, SchedulerState::Run);
}
} else {
set_state_lock(&state_lock, SchedulerState::PauseEmpty);
}
}
impl Scheduler {
/// Creates a new [`Scheduler`] which will use `thread_count` number of
/// threads when tasks are being dispatched/dated.
///
/// [`Scheduler`]: struct.Scheduler.html
pub fn new(thread_count: usize) -> Self {
let pair = Arc::new((Mutex::new(SchedulerState::PauseEmpty), Condvar::new()));
let pair_scheduler = pair.clone();
let dates: Arc<RwLock<BinaryHeap<Date>>> = Arc::new(RwLock::new(BinaryHeap::new()));
let dates_scheduler = Arc::clone(&dates);
std::thread::spawn(move || {
let &(ref state_lock, ref notifier) = &*pair_scheduler;
let threadpool = ThreadPool::new(thread_count);
loop {
if let Break::Yes = process_states(&state_lock, ¬ifier) {
break;
}
dispatch_date(&threadpool, &dates_scheduler, &pair_scheduler);
check_peeking_date(&dates_scheduler, &state_lock);
}
});
Scheduler {
condvar: pair,
dates,
}
}
}
/// Once the scheduler is dropped, we also need to join and finish the thread.
impl<'a> Drop for Scheduler {
fn drop(&mut self) {
let &(ref state_lock, ref notifier) = &*self.condvar;
let mut state = state_lock.lock();
*state = SchedulerState::Exit;
notifier.notify_one();
}
}
| *scheduler_state = SchedulerState::new_pause_time(left);
notifier.notify_one();
}
} e | conditional_block |
lib.rs | //! *“I'm late! I'm late! For a very important date!”*
//! *by “The White Rabbit”* 『Alice's Adventures in Wonderland』
//!
//! `white_rabbit` schedules your tasks and can repeat them!
//!
//! One funny use case are chat bot commands: Imagine a *remind me*-command,
//! the command gets executed and you simply create a one-time job to be
//! scheduled for whatever time the user desires.
//!
//! We are using chrono's `DateTime<Utc>`, enabling you to serialise and thus
//! backup currently running tasks,
//! in case you want to shutdown/restart your application,
//! constructing a new scheduler is doable.
//! However, please make sure your internal clock is synced.
#![deny(rust_2018_idioms)]
use chrono::Duration as ChronoDuration;
use parking_lot::{Condvar, Mutex, RwLock};
use std::{cmp::Ordering, collections::BinaryHeap, sync::Arc, time::Duration as StdDuration};
use threadpool::ThreadPool;
pub use chrono::{DateTime, Duration, Utc};
/// Compare if an `enum`-variant matches another variant.
macro_rules! cmp_variant {
($expression:expr, $($variant:tt)+) => {
match $expression {
$($variant)+ => true,
_ => false
}
}
}
/// When a task is due, this will be passed to the task.
/// Currently, there is not much use to this. However, this might be extended
/// in the future.
pub struct Context {
time: DateTime<Utc>,
}
/// Every task will return this `enum`.
pub enum DateResult {
/// The task is considered finished and can be fully removed.
Done,
/// The task will be scheduled for a new date on passed `DateTime<Utc>`.
Repeat(DateTime<Utc>),
}
/// Every job gets a planned `Date` with the scheduler.
pub struct Date {
pub context: Context,
pub job: Box<dyn FnMut(&mut Context) -> DateResult + Send + Sync +'static>,
}
impl Eq for Date {}
/// Invert comparisions to create a min-heap.
impl Ord for Date {
fn cmp(&self, other: &Date) -> Ordering {
match self.context.time.cmp(&other.context.time) {
Ordering::Less => Ordering::Greater,
Ordering::Greater => Ordering::Less,
Ordering::Equal => Ordering::Equal,
}
}
}
/// Invert comparisions to create a min-heap.
impl PartialOrd for Date {
fn partial_cmp(&self, other: &Date) -> Option<Ordering> {
Some(match self.context.time.cmp(&other.context.time) {
Ordering::Less => Ordering::Greater,
Ordering::Greater => Ordering::Less,
Ordering::Equal => Ordering::Equal,
})
}
}
impl PartialEq for Date {
fn eq(&self, other: &Date) -> bool {
self.context.time == other.context.time
}
}
/// The [`Scheduler`]'s worker thread switches through different states
/// while running, each state changes the behaviour.
///
/// [`Scheduler`]: struct.Scheduler.html
enum SchedulerState {
/// No dates being awaited, sleep until one gets added.
PauseEmpty,
/// Pause until next date is due.
PauseTime(StdDuration),
/// If the next date is already waiting to be executed,
/// the thread continues running without sleeping.
Run,
/// Exits the thread.
Exit,
}
impl SchedulerState {
fn is_running(&self) -> bool {
cmp_variant!(*self, SchedulerState::Run)
}
fn new_pause_time(duration: ChronoDuration) -> Self {
Sc | scheduler exists on two levels: The handle, granting you the
/// ability of adding new tasks, and the executor, dating and executing these
/// tasks when specified time is met.
///
/// **Info**: This scheduler may not be precise due to anomalies such as
/// preemption or platform differences.
pub struct Scheduler {
/// The mean of communication with the running scheduler.
condvar: Arc<(Mutex<SchedulerState>, Condvar)>,
/// Every job has its date listed inside this.
dates: Arc<RwLock<BinaryHeap<Date>>>,
}
impl Scheduler {
/// Add a task to be executed when `time` is reached.
pub fn add_task_datetime<T>(&mut self, time: DateTime<Utc>, to_execute: T)
where
T: FnMut(&mut Context) -> DateResult + Send + Sync +'static,
{
let &(ref state_lock, ref notifier) = &*self.condvar;
let task = Date {
context: Context { time },
job: Box::new(to_execute),
};
let mut locked_heap = self.dates.write();
if locked_heap.is_empty() {
let mut scheduler_state = state_lock.lock();
let left = task.context.time.signed_duration_since(Utc::now());
if!scheduler_state.is_running() {
*scheduler_state = SchedulerState::new_pause_time(left);
notifier.notify_one();
}
} else {
let mut scheduler_state = state_lock.lock();
if let SchedulerState::PauseTime(_) = *scheduler_state {
let peeked = locked_heap.peek().expect("Expected heap to be filled.");
if task.context.time < peeked.context.time {
let left = task.context.time.signed_duration_since(Utc::now());
if!scheduler_state.is_running() {
*scheduler_state = SchedulerState::PauseTime(
left.to_std()
.unwrap_or_else(|_| StdDuration::from_millis(0)),
);
notifier.notify_one();
}
}
}
}
locked_heap.push(task);
}
pub fn add_task_duration<T>(&mut self, how_long: ChronoDuration, to_execute: T)
where
T: FnMut(&mut Context) -> DateResult + Send + Sync +'static,
{
let time = Utc::now() + how_long;
self.add_task_datetime(time, to_execute);
}
}
fn set_state_lock(state_lock: &Mutex<SchedulerState>, to_set: SchedulerState) {
let mut state = state_lock.lock();
*state = to_set;
}
#[inline]
fn _push_and_notfiy(date: Date, heap: &mut BinaryHeap<Date>, notifier: &Condvar) {
heap.push(date);
notifier.notify_one();
}
/// This function pushes a `date` onto `data_pooled` and notifies the
/// dispatching-thread in case they are sleeping.
#[inline]
fn push_and_notfiy(
dispatcher_pair: &Arc<(Mutex<SchedulerState>, Condvar)>,
data_pooled: &Arc<RwLock<BinaryHeap<Date>>>,
when: &DateTime<Utc>,
date: Date,
) {
let &(ref state_lock, ref notifier) = &**dispatcher_pair;
let mut state = state_lock.lock();
let mut heap_lock = data_pooled.write();
if let Some(peek) = heap_lock.peek() {
if peek.context.time < *when {
let left = peek.context.time.signed_duration_since(Utc::now());
*state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
} else {
let left = when.signed_duration_since(Utc::now());
*state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
}
} else {
let left = when.signed_duration_since(Utc::now());
*state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
}
}
#[must_use]
enum Break {
Yes,
No,
}
#[inline]
fn process_states(state_lock: &Mutex<SchedulerState>, notifier: &Condvar) -> Break {
let mut scheduler_state = state_lock.lock();
while let SchedulerState::PauseEmpty = *scheduler_state {
notifier.wait(&mut scheduler_state);
}
while let SchedulerState::PauseTime(duration) = *scheduler_state {
if notifier
.wait_for(&mut scheduler_state, duration)
.timed_out()
{
break;
}
}
if let SchedulerState::Exit = *scheduler_state {
return Break::Yes;
}
Break::No
}
fn dispatch_date(
threadpool: &ThreadPool,
dates: &Arc<RwLock<BinaryHeap<Date>>>,
pair_scheduler: &Arc<(Mutex<SchedulerState>, Condvar)>,
) {
let mut date = {
let mut dates = dates.write();
dates.pop().expect("Should not run on empty heap.")
};
let date_dispatcher = dates.clone();
let dispatcher_pair = pair_scheduler.clone();
threadpool.execute(move || {
if let DateResult::Repeat(when) = (date.job)(&mut date.context) {
date.context.time = when;
push_and_notfiy(&dispatcher_pair, &date_dispatcher, &when, date);
}
});
}
fn check_peeking_date(dates: &Arc<RwLock<BinaryHeap<Date>>>, state_lock: &Mutex<SchedulerState>) {
if let Some(next) = dates.read().peek() {
let now = Utc::now();
if next.context.time > now {
let left = next.context.time.signed_duration_since(now);
set_state_lock(&state_lock, SchedulerState::new_pause_time(left));
} else {
set_state_lock(&state_lock, SchedulerState::Run);
}
} else {
set_state_lock(&state_lock, SchedulerState::PauseEmpty);
}
}
impl Scheduler {
/// Creates a new [`Scheduler`] which will use `thread_count` number of
/// threads when tasks are being dispatched/dated.
///
/// [`Scheduler`]: struct.Scheduler.html
pub fn new(thread_count: usize) -> Self {
let pair = Arc::new((Mutex::new(SchedulerState::PauseEmpty), Condvar::new()));
let pair_scheduler = pair.clone();
let dates: Arc<RwLock<BinaryHeap<Date>>> = Arc::new(RwLock::new(BinaryHeap::new()));
let dates_scheduler = Arc::clone(&dates);
std::thread::spawn(move || {
let &(ref state_lock, ref notifier) = &*pair_scheduler;
let threadpool = ThreadPool::new(thread_count);
loop {
if let Break::Yes = process_states(&state_lock, ¬ifier) {
break;
}
dispatch_date(&threadpool, &dates_scheduler, &pair_scheduler);
check_peeking_date(&dates_scheduler, &state_lock);
}
});
Scheduler {
condvar: pair,
dates,
}
}
}
/// Once the scheduler is dropped, we also need to join and finish the thread.
impl<'a> Drop for Scheduler {
fn drop(&mut self) {
let &(ref state_lock, ref notifier) = &*self.condvar;
let mut state = state_lock.lock();
*state = SchedulerState::Exit;
notifier.notify_one();
}
}
| hedulerState::PauseTime(
duration
.to_std()
.unwrap_or_else(|_| StdDuration::from_millis(0)),
)
}
}
/// This | identifier_body |
lib.rs | //! *“I'm late! I'm late! For a very important date!”*
//! *by “The White Rabbit”* 『Alice's Adventures in Wonderland』
//!
//! `white_rabbit` schedules your tasks and can repeat them!
//!
//! One funny use case are chat bot commands: Imagine a *remind me*-command,
//! the command gets executed and you simply create a one-time job to be
//! scheduled for whatever time the user desires.
//!
//! We are using chrono's `DateTime<Utc>`, enabling you to serialise and thus
//! backup currently running tasks,
//! in case you want to shutdown/restart your application,
//! constructing a new scheduler is doable.
//! However, please make sure your internal clock is synced.
#![deny(rust_2018_idioms)]
use chrono::Duration as ChronoDuration;
use parking_lot::{Condvar, Mutex, RwLock};
use std::{cmp::Ordering, collections::BinaryHeap, sync::Arc, time::Duration as StdDuration};
use threadpool::ThreadPool;
pub use chrono::{DateTime, Duration, Utc};
/// Compare if an `enum`-variant matches another variant.
macro_rules! cmp_variant {
($expression:expr, $($variant:tt)+) => {
match $expression {
$($variant)+ => true,
_ => false
}
}
}
/// When a task is due, this will be passed to the task.
/// Currently, there is not much use to this. However, this might be extended
/// in the future.
pub struct Context {
| DateTime<Utc>,
}
/// Every task will return this `enum`.
pub enum DateResult {
/// The task is considered finished and can be fully removed.
Done,
/// The task will be scheduled for a new date on passed `DateTime<Utc>`.
Repeat(DateTime<Utc>),
}
/// Every job gets a planned `Date` with the scheduler.
pub struct Date {
pub context: Context,
pub job: Box<dyn FnMut(&mut Context) -> DateResult + Send + Sync +'static>,
}
impl Eq for Date {}
/// Invert comparisions to create a min-heap.
impl Ord for Date {
fn cmp(&self, other: &Date) -> Ordering {
match self.context.time.cmp(&other.context.time) {
Ordering::Less => Ordering::Greater,
Ordering::Greater => Ordering::Less,
Ordering::Equal => Ordering::Equal,
}
}
}
/// Invert comparisions to create a min-heap.
impl PartialOrd for Date {
fn partial_cmp(&self, other: &Date) -> Option<Ordering> {
Some(match self.context.time.cmp(&other.context.time) {
Ordering::Less => Ordering::Greater,
Ordering::Greater => Ordering::Less,
Ordering::Equal => Ordering::Equal,
})
}
}
impl PartialEq for Date {
fn eq(&self, other: &Date) -> bool {
self.context.time == other.context.time
}
}
/// The [`Scheduler`]'s worker thread switches through different states
/// while running, each state changes the behaviour.
///
/// [`Scheduler`]: struct.Scheduler.html
enum SchedulerState {
/// No dates being awaited, sleep until one gets added.
PauseEmpty,
/// Pause until next date is due.
PauseTime(StdDuration),
/// If the next date is already waiting to be executed,
/// the thread continues running without sleeping.
Run,
/// Exits the thread.
Exit,
}
impl SchedulerState {
fn is_running(&self) -> bool {
cmp_variant!(*self, SchedulerState::Run)
}
fn new_pause_time(duration: ChronoDuration) -> Self {
SchedulerState::PauseTime(
duration
.to_std()
.unwrap_or_else(|_| StdDuration::from_millis(0)),
)
}
}
/// This scheduler exists on two levels: The handle, granting you the
/// ability of adding new tasks, and the executor, dating and executing these
/// tasks when specified time is met.
///
/// **Info**: This scheduler may not be precise due to anomalies such as
/// preemption or platform differences.
pub struct Scheduler {
/// The mean of communication with the running scheduler.
condvar: Arc<(Mutex<SchedulerState>, Condvar)>,
/// Every job has its date listed inside this.
dates: Arc<RwLock<BinaryHeap<Date>>>,
}
impl Scheduler {
/// Add a task to be executed when `time` is reached.
pub fn add_task_datetime<T>(&mut self, time: DateTime<Utc>, to_execute: T)
where
T: FnMut(&mut Context) -> DateResult + Send + Sync +'static,
{
let &(ref state_lock, ref notifier) = &*self.condvar;
let task = Date {
context: Context { time },
job: Box::new(to_execute),
};
let mut locked_heap = self.dates.write();
if locked_heap.is_empty() {
let mut scheduler_state = state_lock.lock();
let left = task.context.time.signed_duration_since(Utc::now());
if!scheduler_state.is_running() {
*scheduler_state = SchedulerState::new_pause_time(left);
notifier.notify_one();
}
} else {
let mut scheduler_state = state_lock.lock();
if let SchedulerState::PauseTime(_) = *scheduler_state {
let peeked = locked_heap.peek().expect("Expected heap to be filled.");
if task.context.time < peeked.context.time {
let left = task.context.time.signed_duration_since(Utc::now());
if!scheduler_state.is_running() {
*scheduler_state = SchedulerState::PauseTime(
left.to_std()
.unwrap_or_else(|_| StdDuration::from_millis(0)),
);
notifier.notify_one();
}
}
}
}
locked_heap.push(task);
}
pub fn add_task_duration<T>(&mut self, how_long: ChronoDuration, to_execute: T)
where
T: FnMut(&mut Context) -> DateResult + Send + Sync +'static,
{
let time = Utc::now() + how_long;
self.add_task_datetime(time, to_execute);
}
}
fn set_state_lock(state_lock: &Mutex<SchedulerState>, to_set: SchedulerState) {
let mut state = state_lock.lock();
*state = to_set;
}
#[inline]
fn _push_and_notfiy(date: Date, heap: &mut BinaryHeap<Date>, notifier: &Condvar) {
heap.push(date);
notifier.notify_one();
}
/// This function pushes a `date` onto `data_pooled` and notifies the
/// dispatching-thread in case they are sleeping.
#[inline]
fn push_and_notfiy(
dispatcher_pair: &Arc<(Mutex<SchedulerState>, Condvar)>,
data_pooled: &Arc<RwLock<BinaryHeap<Date>>>,
when: &DateTime<Utc>,
date: Date,
) {
let &(ref state_lock, ref notifier) = &**dispatcher_pair;
let mut state = state_lock.lock();
let mut heap_lock = data_pooled.write();
if let Some(peek) = heap_lock.peek() {
if peek.context.time < *when {
let left = peek.context.time.signed_duration_since(Utc::now());
*state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
} else {
let left = when.signed_duration_since(Utc::now());
*state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
}
} else {
let left = when.signed_duration_since(Utc::now());
*state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
}
}
#[must_use]
enum Break {
Yes,
No,
}
#[inline]
fn process_states(state_lock: &Mutex<SchedulerState>, notifier: &Condvar) -> Break {
let mut scheduler_state = state_lock.lock();
while let SchedulerState::PauseEmpty = *scheduler_state {
notifier.wait(&mut scheduler_state);
}
while let SchedulerState::PauseTime(duration) = *scheduler_state {
if notifier
.wait_for(&mut scheduler_state, duration)
.timed_out()
{
break;
}
}
if let SchedulerState::Exit = *scheduler_state {
return Break::Yes;
}
Break::No
}
fn dispatch_date(
threadpool: &ThreadPool,
dates: &Arc<RwLock<BinaryHeap<Date>>>,
pair_scheduler: &Arc<(Mutex<SchedulerState>, Condvar)>,
) {
let mut date = {
let mut dates = dates.write();
dates.pop().expect("Should not run on empty heap.")
};
let date_dispatcher = dates.clone();
let dispatcher_pair = pair_scheduler.clone();
threadpool.execute(move || {
if let DateResult::Repeat(when) = (date.job)(&mut date.context) {
date.context.time = when;
push_and_notfiy(&dispatcher_pair, &date_dispatcher, &when, date);
}
});
}
fn check_peeking_date(dates: &Arc<RwLock<BinaryHeap<Date>>>, state_lock: &Mutex<SchedulerState>) {
if let Some(next) = dates.read().peek() {
let now = Utc::now();
if next.context.time > now {
let left = next.context.time.signed_duration_since(now);
set_state_lock(&state_lock, SchedulerState::new_pause_time(left));
} else {
set_state_lock(&state_lock, SchedulerState::Run);
}
} else {
set_state_lock(&state_lock, SchedulerState::PauseEmpty);
}
}
impl Scheduler {
/// Creates a new [`Scheduler`] which will use `thread_count` number of
/// threads when tasks are being dispatched/dated.
///
/// [`Scheduler`]: struct.Scheduler.html
pub fn new(thread_count: usize) -> Self {
let pair = Arc::new((Mutex::new(SchedulerState::PauseEmpty), Condvar::new()));
let pair_scheduler = pair.clone();
let dates: Arc<RwLock<BinaryHeap<Date>>> = Arc::new(RwLock::new(BinaryHeap::new()));
let dates_scheduler = Arc::clone(&dates);
std::thread::spawn(move || {
let &(ref state_lock, ref notifier) = &*pair_scheduler;
let threadpool = ThreadPool::new(thread_count);
loop {
if let Break::Yes = process_states(&state_lock, ¬ifier) {
break;
}
dispatch_date(&threadpool, &dates_scheduler, &pair_scheduler);
check_peeking_date(&dates_scheduler, &state_lock);
}
});
Scheduler {
condvar: pair,
dates,
}
}
}
/// Once the scheduler is dropped, we also need to join and finish the thread.
impl<'a> Drop for Scheduler {
fn drop(&mut self) {
let &(ref state_lock, ref notifier) = &*self.condvar;
let mut state = state_lock.lock();
*state = SchedulerState::Exit;
notifier.notify_one();
}
}
| time: | identifier_name |
lib.rs | //! *“I'm late! I'm late! For a very important date!”*
//! *by “The White Rabbit”* 『Alice's Adventures in Wonderland』
//!
//! `white_rabbit` schedules your tasks and can repeat them!
//!
//! One funny use case are chat bot commands: Imagine a *remind me*-command,
//! the command gets executed and you simply create a one-time job to be
//! scheduled for whatever time the user desires.
//!
//! We are using chrono's `DateTime<Utc>`, enabling you to serialise and thus
//! backup currently running tasks,
//! in case you want to shutdown/restart your application,
//! constructing a new scheduler is doable.
//! However, please make sure your internal clock is synced.
#![deny(rust_2018_idioms)]
use chrono::Duration as ChronoDuration;
use parking_lot::{Condvar, Mutex, RwLock};
use std::{cmp::Ordering, collections::BinaryHeap, sync::Arc, time::Duration as StdDuration};
use threadpool::ThreadPool;
pub use chrono::{DateTime, Duration, Utc};
/// Compare if an `enum`-variant matches another variant.
macro_rules! cmp_variant {
($expression:expr, $($variant:tt)+) => {
match $expression {
$($variant)+ => true,
_ => false
}
}
}
/// When a task is due, this will be passed to the task.
/// Currently, there is not much use to this. However, this might be extended
/// in the future.
pub struct Context {
time: DateTime<Utc>,
}
/// Every task will return this `enum`.
pub enum DateResult {
/// The task is considered finished and can be fully removed.
Done,
/// The task will be scheduled for a new date on passed `DateTime<Utc>`.
Repeat(DateTime<Utc>),
}
/// Every job gets a planned `Date` with the scheduler.
pub struct Date {
pub context: Context,
pub job: Box<dyn FnMut(&mut Context) -> DateResult + Send + Sync +'static>,
}
impl Eq for Date {}
/// Invert comparisions to create a min-heap.
impl Ord for Date {
fn cmp(&self, other: &Date) -> Ordering {
match self.context.time.cmp(&other.context.time) {
Ordering::Less => Ordering::Greater,
Ordering::Greater => Ordering::Less,
Ordering::Equal => Ordering::Equal,
}
}
}
/// Invert comparisions to create a min-heap.
impl PartialOrd for Date {
fn partial_cmp(&self, other: &Date) -> Option<Ordering> {
Some(match self.context.time.cmp(&other.context.time) {
Ordering::Less => Ordering::Greater,
Ordering::Greater => Ordering::Less,
Ordering::Equal => Ordering::Equal,
})
}
}
impl PartialEq for Date {
fn eq(&self, other: &Date) -> bool {
self.context.time == other.context.time
}
}
/// The [`Scheduler`]'s worker thread switches through different states
/// while running, each state changes the behaviour.
///
/// [`Scheduler`]: struct.Scheduler.html
enum SchedulerState {
/// No dates being awaited, sleep until one gets added.
PauseEmpty,
/// Pause until next date is due.
PauseTime(StdDuration),
/// If the next date is already waiting to be executed,
/// the thread continues running without sleeping.
Run,
/// Exits the thread.
Exit,
}
impl SchedulerState {
fn is_running(&self) -> bool {
cmp_variant!(*self, SchedulerState::Run)
}
fn new_pause_time(duration: ChronoDuration) -> Self {
SchedulerState::PauseTime(
duration
.to_std()
.unwrap_or_else(|_| StdDuration::from_millis(0)),
)
}
}
/// This scheduler exists on two levels: The handle, granting you the
/// ability of adding new tasks, and the executor, dating and executing these
/// tasks when specified time is met.
///
/// **Info**: This scheduler may not be precise due to anomalies such as
/// preemption or platform differences.
pub struct Scheduler {
/// The mean of communication with the running scheduler.
condvar: Arc<(Mutex<SchedulerState>, Condvar)>,
/// Every job has its date listed inside this.
dates: Arc<RwLock<BinaryHeap<Date>>>,
}
impl Scheduler {
/// Add a task to be executed when `time` is reached.
pub fn add_task_datetime<T>(&mut self, time: DateTime<Utc>, to_execute: T)
where
T: FnMut(&mut Context) -> DateResult + Send + Sync +'static,
{
let &(ref state_lock, ref notifier) = &*self.condvar;
let task = Date {
context: Context { time },
job: Box::new(to_execute),
};
let mut locked_heap = self.dates.write();
if locked_heap.is_empty() {
let mut scheduler_state = state_lock.lock();
let left = task.context.time.signed_duration_since(Utc::now());
if!scheduler_state.is_running() {
*scheduler_state = SchedulerState::new_pause_time(left);
notifier.notify_one();
}
} else {
let mut scheduler_state = state_lock.lock();
if let SchedulerState::PauseTime(_) = *scheduler_state {
let peeked = locked_heap.peek().expect("Expected heap to be filled.");
if task.context.time < peeked.context.time {
let left = task.context.time.signed_duration_since(Utc::now());
if!scheduler_state.is_running() {
*scheduler_state = SchedulerState::PauseTime(
left.to_std()
.unwrap_or_else(|_| StdDuration::from_millis(0)),
);
notifier.notify_one();
}
}
}
}
locked_heap.push(task);
}
pub fn add_task_duration<T>(&mut self, how_long: ChronoDuration, to_execute: T)
where
T: FnMut(&mut Context) -> DateResult + Send + Sync +'static,
{
let time = Utc::now() + how_long;
self.add_task_datetime(time, to_execute);
}
}
fn set_state_lock(state_lock: &Mutex<SchedulerState>, to_set: SchedulerState) {
let mut state = state_lock.lock();
*state = to_set;
}
#[inline]
fn _push_and_notfiy(date: Date, heap: &mut BinaryHeap<Date>, notifier: &Condvar) {
heap.push(date);
notifier.notify_one();
}
/// This function pushes a `date` onto `data_pooled` and notifies the
/// dispatching-thread in case they are sleeping.
#[inline]
fn push_and_notfiy(
dispatcher_pair: &Arc<(Mutex<SchedulerState>, Condvar)>,
data_pooled: &Arc<RwLock<BinaryHeap<Date>>>,
when: &DateTime<Utc>,
date: Date,
) {
let &(ref state_lock, ref notifier) = &**dispatcher_pair;
let mut state = state_lock.lock();
let mut heap_lock = data_pooled.write();
if let Some(peek) = heap_lock.peek() {
if peek.context.time < *when {
let left = peek.context.time.signed_duration_since(Utc::now());
*state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
} else {
let left = when.signed_duration_since(Utc::now());
*state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
}
} else {
let left = when.signed_duration_since(Utc::now());
|
}
#[must_use]
enum Break {
Yes,
No,
}
#[inline]
fn process_states(state_lock: &Mutex<SchedulerState>, notifier: &Condvar) -> Break {
let mut scheduler_state = state_lock.lock();
while let SchedulerState::PauseEmpty = *scheduler_state {
notifier.wait(&mut scheduler_state);
}
while let SchedulerState::PauseTime(duration) = *scheduler_state {
if notifier
.wait_for(&mut scheduler_state, duration)
.timed_out()
{
break;
}
}
if let SchedulerState::Exit = *scheduler_state {
return Break::Yes;
}
Break::No
}
fn dispatch_date(
threadpool: &ThreadPool,
dates: &Arc<RwLock<BinaryHeap<Date>>>,
pair_scheduler: &Arc<(Mutex<SchedulerState>, Condvar)>,
) {
let mut date = {
let mut dates = dates.write();
dates.pop().expect("Should not run on empty heap.")
};
let date_dispatcher = dates.clone();
let dispatcher_pair = pair_scheduler.clone();
threadpool.execute(move || {
if let DateResult::Repeat(when) = (date.job)(&mut date.context) {
date.context.time = when;
push_and_notfiy(&dispatcher_pair, &date_dispatcher, &when, date);
}
});
}
fn check_peeking_date(dates: &Arc<RwLock<BinaryHeap<Date>>>, state_lock: &Mutex<SchedulerState>) {
if let Some(next) = dates.read().peek() {
let now = Utc::now();
if next.context.time > now {
let left = next.context.time.signed_duration_since(now);
set_state_lock(&state_lock, SchedulerState::new_pause_time(left));
} else {
set_state_lock(&state_lock, SchedulerState::Run);
}
} else {
set_state_lock(&state_lock, SchedulerState::PauseEmpty);
}
}
impl Scheduler {
/// Creates a new [`Scheduler`] which will use `thread_count` number of
/// threads when tasks are being dispatched/dated.
///
/// [`Scheduler`]: struct.Scheduler.html
pub fn new(thread_count: usize) -> Self {
let pair = Arc::new((Mutex::new(SchedulerState::PauseEmpty), Condvar::new()));
let pair_scheduler = pair.clone();
let dates: Arc<RwLock<BinaryHeap<Date>>> = Arc::new(RwLock::new(BinaryHeap::new()));
let dates_scheduler = Arc::clone(&dates);
std::thread::spawn(move || {
let &(ref state_lock, ref notifier) = &*pair_scheduler;
let threadpool = ThreadPool::new(thread_count);
loop {
if let Break::Yes = process_states(&state_lock, ¬ifier) {
break;
}
dispatch_date(&threadpool, &dates_scheduler, &pair_scheduler);
check_peeking_date(&dates_scheduler, &state_lock);
}
});
Scheduler {
condvar: pair,
dates,
}
}
}
/// Once the scheduler is dropped, we also need to join and finish the thread.
impl<'a> Drop for Scheduler {
fn drop(&mut self) {
let &(ref state_lock, ref notifier) = &*self.condvar;
let mut state = state_lock.lock();
*state = SchedulerState::Exit;
notifier.notify_one();
}
} | *state = SchedulerState::new_pause_time(left);
_push_and_notfiy(date, &mut heap_lock, ¬ifier);
} | random_line_split |
lib.rs | //! # Lattice Client
//!
//! This library provides a client that communicates with a waSCC lattice using
//! the lattice protocol over the NATS message broker. All waSCC hosts compiled
//! in lattice mode have the ability to automatically form self-healing, self-managing
//! infrastructure-agnostic clusters called [lattices](https://wascc.dev/docs/lattice/overview/)
extern crate log;
#[macro_use]
extern crate serde;
use std::{collections::HashMap, path::PathBuf, time::Duration};
use crossbeam::Sender;
use wascap::prelude::*;
use controlplane::{
LaunchAck, LaunchAuctionRequest, LaunchAuctionResponse, LaunchCommand, TerminateCommand,
};
pub use events::{BusEvent, CloudEvent};
use crate::controlplane::{
LaunchProviderCommand, ProviderAuctionRequest, ProviderAuctionResponse, ProviderLaunchAck,
};
pub mod controlplane;
mod events;
pub const INVENTORY_ACTORS: &str = "inventory.actors";
pub const INVENTORY_HOSTS: &str = "inventory.hosts";
pub const INVENTORY_BINDINGS: &str = "inventory.bindings";
pub const INVENTORY_CAPABILITIES: &str = "inventory.capabilities";
pub const EVENTS: &str = "events";
const AUCTION_TIMEOUT_SECONDS: u64 = 5;
/// A response to a lattice probe for inventory. Note that these responses are returned
/// through regular (non-queue) subscriptions via a scatter-gather like pattern, so the
/// client is responsible for aggregating many of these replies.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub enum InventoryResponse {
/// A single host probe response
Host(HostProfile),
/// A list of all registered actors within a host
Actors {
host: String,
actors: Vec<Claims<Actor>>,
},
/// A list of configuration bindings of actors originating from the given host
Bindings {
host: String,
bindings: Vec<Binding>,
},
/// A list of capability providers currently running within the given host
Capabilities {
host: String,
capabilities: Vec<HostedCapability>,
},
}
/// An overview of host information
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
pub struct HostProfile {
/// The public key (subject) of the host
pub id: String,
/// The host's labels
pub labels: HashMap<String, String>,
/// Host uptime in milliseconds
pub uptime_ms: u128,
}
/// Represents an instance of a capability, which is a binding name and
/// the capability descriptor
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub struct HostedCapability {
pub binding_name: String,
pub descriptor: wascc_codec::capabilities::CapabilityDescriptor,
}
/// Represents a single configuration binding from an actor to a capability ID and binding
/// name, with the specified configuration values.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub struct Binding {
pub actor: String,
pub capability_id: String,
pub binding_name: String,
pub configuration: HashMap<String, String>,
}
/// A client for interacting with the lattice
pub struct Client {
nc: nats::Connection,
namespace: Option<String>,
timeout: Duration,
}
impl Client {
/// Creates a new lattice client, connecting to the NATS server at the
/// given host with an optional set of credentials (JWT auth)
pub fn new(
host: &str,
credsfile: Option<PathBuf>,
call_timeout: Duration,
namespace: Option<String>,
) -> Self {
Client {
nc: get_connection(host, credsfile),
timeout: call_timeout,
namespace,
}
}
pub fn with_connection(
nc: nats::Connection,
call_timeout: Duration,
namespace: Option<String>,
) -> Self {
Client {
nc,
timeout: call_timeout,
namespace,
}
}
/// Retrieves the list of all hosts running within the lattice. If it takes a host longer
/// than the call timeout period to reply to the probe, it will not be included in the list
/// of hosts.
pub fn get_hosts(&self) -> std::result::Result<Vec<HostProfile>, Box<dyn std::error::Error>> {
let mut hosts = vec![];
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_HOSTS).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Host(h) = ir {
hosts.push(h);
}
}
Ok(hosts)
}
/// Retrieves a list of all bindings from actors to capabilities within the lattice (provided
/// the host responds to the probe within the client timeout period)
pub fn get_bindings(
&self,
) -> std::result::Result<HashMap<String, Vec<Binding>>, Box<dyn std::error::Error>> {
let mut host_bindings = HashMap::new();
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_BINDINGS).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Bindings { bindings: b, host } = ir {
host_bindings
.entry(host)
.and_modify(|e: &mut Vec<Binding>| e.extend_from_slice(&b))
.or_insert(b.clone());
}
}
Ok(host_bindings)
}
/// Retrieves the list of all actors currently running within the lattice (as discovered within
/// the client timeout period)
pub fn get_actors(
&self,
) -> std::result::Result<HashMap<String, Vec<Claims<Actor>>>, Box<dyn std::error::Error>> |
/// Retrieves the list of all capabilities within the lattice (discovery limited by the client timeout period)
pub fn get_capabilities(
&self,
) -> std::result::Result<HashMap<String, Vec<HostedCapability>>, Box<dyn std::error::Error>>
{
let mut host_caps = HashMap::new();
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_CAPABILITIES).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Capabilities { host, capabilities } = ir {
host_caps
.entry(host)
.and_modify(|e: &mut Vec<HostedCapability>| e.extend_from_slice(&capabilities))
.or_insert(capabilities.clone());
}
}
Ok(host_caps)
}
/// Watches the lattice for bus events. This will create a subscription in a background thread, so callers
/// are responsible for ensuring their process remains alive however long is appropriate. Pass the sender
/// half of a channel to receive the events
pub fn watch_events(&self, sender: Sender<BusEvent>) -> Result<(), Box<dyn std::error::Error>> {
let _sub = self
.nc
.subscribe(self.gen_subject(EVENTS).as_ref())?
.with_handler(move |msg| {
let ce: CloudEvent = serde_json::from_slice(&msg.data).unwrap();
let be: BusEvent = serde_json::from_str(&ce.data).unwrap();
let _ = sender.send(be);
Ok(())
});
Ok(())
}
/// Performs an auction among all hosts on the lattice, requesting that the given actor be launched (loaded+started)
/// on a suitable host as described by the set of constraints. Only hosts that believe they can launch the actor
/// will reply. In other words, there will be no negative responses in the result vector, only a list of suitable
/// hosts. The actor to be launched is identified by an OCI registry reference
pub fn perform_actor_launch_auction(
&self,
actor_id: &str,
constraints: HashMap<String, String>,
) -> Result<Vec<LaunchAuctionResponse>, Box<dyn std::error::Error>> {
let mut results = vec![];
let req = LaunchAuctionRequest::new(actor_id, constraints);
let sub = self.nc.request_multi(
self.gen_subject(&format!(
"{}.{}",
controlplane::CPLANE_PREFIX,
controlplane::AUCTION_REQ
))
.as_ref(),
&serde_json::to_vec(&req)?,
)?;
for msg in sub.timeout_iter(Duration::from_secs(AUCTION_TIMEOUT_SECONDS)) {
let resp: LaunchAuctionResponse = serde_json::from_slice(&msg.data)?;
results.push(resp);
}
Ok(results)
}
/// Performs an auction among all hosts on the lattice, requesting that the given capability provider
/// (indicated by OCI image reference) be loaded/started. Hosts that believe they can host the
/// provider given the constraints will respond to the auction
pub fn perform_provider_launch_auction(
&self,
provider_ref: &str,
binding_name: &str,
constraints: HashMap<String, String>,
) -> Result<Vec<ProviderAuctionResponse>, Box<dyn std::error::Error>> {
let mut results = vec![];
let req = ProviderAuctionRequest::new(provider_ref, binding_name, constraints);
let sub = self.nc.request_multi(
self.gen_subject(&format!(
"{}.{}",
controlplane::CPLANE_PREFIX,
controlplane::PROVIDER_AUCTION_REQ
))
.as_ref(),
&serde_json::to_vec(&req)?,
)?;
for msg in sub.timeout_iter(Duration::from_secs(AUCTION_TIMEOUT_SECONDS)) {
let resp: ProviderAuctionResponse = serde_json::from_slice(&msg.data)?;
results.push(resp);
}
Ok(results)
}
/// After collecting the results of a provider launch auction, a "winner" from among the hosts
/// can be selected and told to launch the given provider. The provider's bytes will be retrieved
/// from the OCI registry. This function does _not_ confirm successful launch, only receipt
/// of the launch request.
pub fn launch_provider_on_host(
&self,
provider_ref: &str,
host_id: &str,
binding_name: &str,
) -> Result<ProviderLaunchAck, Box<dyn std::error::Error>> {
let msg = LaunchProviderCommand {
provider_ref: provider_ref.to_string(),
binding_name: binding_name.to_string(),
};
let ack: ProviderLaunchAck = serde_json::from_slice(
&self
.nc
.request_timeout(
&self.gen_launch_provider_subject(host_id),
&serde_json::to_vec(&msg)?,
self.timeout,
)?
.data,
)?;
Ok(ack)
}
/// After collecting the results of a launch auction, a "winner" from among the hosts can be selected and
/// told to launch a given actor. Note that the actor's bytes will be retrieved from the OCI registry.
/// This function does _not_ confirm successful launch, only that the target host acknowledged the request
/// to launch.
pub fn launch_actor_on_host(
&self,
actor_id: &str,
host_id: &str,
) -> Result<LaunchAck, Box<dyn std::error::Error>> {
let msg = LaunchCommand {
actor_id: actor_id.to_string(),
};
let ack: LaunchAck = serde_json::from_slice(
&self
.nc
.request_timeout(
&self.gen_launch_actor_subject(host_id),
&serde_json::to_vec(&msg)?,
self.timeout,
)?
.data,
)?;
Ok(ack)
}
/// Sends a command to the specified host telling it to terminate an actor. The success of this command indicates
/// a successful publication, and not necessarily a successful remote actor termination. Monitor the lattice
/// events to see if the actor was successfully terminated
pub fn stop_actor_on_host(
&self,
actor_id: &str,
host_id: &str,
) -> Result<(), Box<dyn std::error::Error>> {
let msg = TerminateCommand {
actor_id: actor_id.to_string(),
};
self.nc.publish(
&self.gen_terminate_actor_subject(host_id),
&serde_json::to_vec(&msg)?,
)?;
let _ = self.nc.flush();
Ok(())
}
fn gen_subject(&self, subject: &str) -> String {
match self.namespace.as_ref() {
Some(s) => format!("{}.wasmbus.{}", s, subject),
None => format!("wasmbus.{}", subject),
}
}
}
fn get_connection(host: &str, credsfile: Option<PathBuf>) -> nats::Connection {
let mut opts = if let Some(creds) = credsfile {
nats::Options::with_credentials(creds)
} else {
nats::Options::new()
};
opts = opts.with_name("waSCC Lattice");
opts.connect(host).unwrap()
}
| {
let mut host_actors = HashMap::new();
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_ACTORS).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Actors { host, actors } = ir {
host_actors
.entry(host)
.and_modify(|e: &mut Vec<Claims<Actor>>| e.extend_from_slice(&actors))
.or_insert(actors.clone());
}
}
Ok(host_actors)
} | identifier_body |
lib.rs | //! # Lattice Client
//!
//! This library provides a client that communicates with a waSCC lattice using
//! the lattice protocol over the NATS message broker. All waSCC hosts compiled
//! in lattice mode have the ability to automatically form self-healing, self-managing
//! infrastructure-agnostic clusters called [lattices](https://wascc.dev/docs/lattice/overview/)
extern crate log;
#[macro_use]
extern crate serde;
use std::{collections::HashMap, path::PathBuf, time::Duration};
use crossbeam::Sender;
use wascap::prelude::*;
use controlplane::{
LaunchAck, LaunchAuctionRequest, LaunchAuctionResponse, LaunchCommand, TerminateCommand,
};
pub use events::{BusEvent, CloudEvent};
use crate::controlplane::{
LaunchProviderCommand, ProviderAuctionRequest, ProviderAuctionResponse, ProviderLaunchAck,
};
pub mod controlplane;
mod events;
pub const INVENTORY_ACTORS: &str = "inventory.actors";
pub const INVENTORY_HOSTS: &str = "inventory.hosts";
pub const INVENTORY_BINDINGS: &str = "inventory.bindings";
pub const INVENTORY_CAPABILITIES: &str = "inventory.capabilities";
pub const EVENTS: &str = "events";
const AUCTION_TIMEOUT_SECONDS: u64 = 5;
/// A response to a lattice probe for inventory. Note that these responses are returned
/// through regular (non-queue) subscriptions via a scatter-gather like pattern, so the
/// client is responsible for aggregating many of these replies.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub enum InventoryResponse {
/// A single host probe response
Host(HostProfile),
/// A list of all registered actors within a host
Actors {
host: String,
actors: Vec<Claims<Actor>>,
},
/// A list of configuration bindings of actors originating from the given host
Bindings {
host: String,
bindings: Vec<Binding>,
},
/// A list of capability providers currently running within the given host
Capabilities {
host: String,
capabilities: Vec<HostedCapability>,
},
}
/// An overview of host information
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
pub struct HostProfile {
/// The public key (subject) of the host
pub id: String,
/// The host's labels
pub labels: HashMap<String, String>,
/// Host uptime in milliseconds
pub uptime_ms: u128,
}
/// Represents an instance of a capability, which is a binding name and
/// the capability descriptor
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub struct HostedCapability {
pub binding_name: String,
pub descriptor: wascc_codec::capabilities::CapabilityDescriptor,
}
/// Represents a single configuration binding from an actor to a capability ID and binding
/// name, with the specified configuration values.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub struct Binding {
pub actor: String,
pub capability_id: String,
pub binding_name: String,
pub configuration: HashMap<String, String>,
}
/// A client for interacting with the lattice
pub struct Client {
nc: nats::Connection,
namespace: Option<String>,
timeout: Duration,
}
impl Client {
/// Creates a new lattice client, connecting to the NATS server at the
/// given host with an optional set of credentials (JWT auth)
pub fn new(
host: &str,
credsfile: Option<PathBuf>,
call_timeout: Duration,
namespace: Option<String>,
) -> Self {
Client {
nc: get_connection(host, credsfile),
timeout: call_timeout,
namespace,
}
}
pub fn with_connection(
nc: nats::Connection,
call_timeout: Duration,
namespace: Option<String>,
) -> Self {
Client {
nc,
timeout: call_timeout,
namespace,
}
}
/// Retrieves the list of all hosts running within the lattice. If it takes a host longer
/// than the call timeout period to reply to the probe, it will not be included in the list
/// of hosts.
pub fn get_hosts(&self) -> std::result::Result<Vec<HostProfile>, Box<dyn std::error::Error>> {
let mut hosts = vec![];
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_HOSTS).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Host(h) = ir {
hosts.push(h);
}
}
Ok(hosts)
}
/// Retrieves a list of all bindings from actors to capabilities within the lattice (provided
/// the host responds to the probe within the client timeout period)
pub fn get_bindings(
&self,
) -> std::result::Result<HashMap<String, Vec<Binding>>, Box<dyn std::error::Error>> {
let mut host_bindings = HashMap::new();
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_BINDINGS).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Bindings { bindings: b, host } = ir {
host_bindings
.entry(host)
.and_modify(|e: &mut Vec<Binding>| e.extend_from_slice(&b))
.or_insert(b.clone());
}
}
Ok(host_bindings)
}
/// Retrieves the list of all actors currently running within the lattice (as discovered within
/// the client timeout period)
pub fn get_actors(
&self,
) -> std::result::Result<HashMap<String, Vec<Claims<Actor>>>, Box<dyn std::error::Error>> {
let mut host_actors = HashMap::new();
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_ACTORS).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Actors { host, actors } = ir {
host_actors
.entry(host)
.and_modify(|e: &mut Vec<Claims<Actor>>| e.extend_from_slice(&actors))
.or_insert(actors.clone());
}
}
Ok(host_actors)
}
/// Retrieves the list of all capabilities within the lattice (discovery limited by the client timeout period)
pub fn get_capabilities(
&self,
) -> std::result::Result<HashMap<String, Vec<HostedCapability>>, Box<dyn std::error::Error>>
{
let mut host_caps = HashMap::new();
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_CAPABILITIES).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Capabilities { host, capabilities } = ir {
host_caps
.entry(host)
.and_modify(|e: &mut Vec<HostedCapability>| e.extend_from_slice(&capabilities))
.or_insert(capabilities.clone());
}
}
Ok(host_caps)
}
/// Watches the lattice for bus events. This will create a subscription in a background thread, so callers
/// are responsible for ensuring their process remains alive however long is appropriate. Pass the sender
/// half of a channel to receive the events
pub fn watch_events(&self, sender: Sender<BusEvent>) -> Result<(), Box<dyn std::error::Error>> {
let _sub = self
.nc
.subscribe(self.gen_subject(EVENTS).as_ref())?
.with_handler(move |msg| {
let ce: CloudEvent = serde_json::from_slice(&msg.data).unwrap();
let be: BusEvent = serde_json::from_str(&ce.data).unwrap();
let _ = sender.send(be);
Ok(())
});
Ok(())
}
/// Performs an auction among all hosts on the lattice, requesting that the given actor be launched (loaded+started)
/// on a suitable host as described by the set of constraints. Only hosts that believe they can launch the actor
/// will reply. In other words, there will be no negative responses in the result vector, only a list of suitable
/// hosts. The actor to be launched is identified by an OCI registry reference
pub fn perform_actor_launch_auction(
&self,
actor_id: &str,
constraints: HashMap<String, String>,
) -> Result<Vec<LaunchAuctionResponse>, Box<dyn std::error::Error>> {
let mut results = vec![];
let req = LaunchAuctionRequest::new(actor_id, constraints);
let sub = self.nc.request_multi(
self.gen_subject(&format!(
"{}.{}",
controlplane::CPLANE_PREFIX,
controlplane::AUCTION_REQ
))
.as_ref(),
&serde_json::to_vec(&req)?,
)?;
for msg in sub.timeout_iter(Duration::from_secs(AUCTION_TIMEOUT_SECONDS)) {
let resp: LaunchAuctionResponse = serde_json::from_slice(&msg.data)?;
results.push(resp);
}
Ok(results)
}
/// Performs an auction among all hosts on the lattice, requesting that the given capability provider
/// (indicated by OCI image reference) be loaded/started. Hosts that believe they can host the
/// provider given the constraints will respond to the auction
pub fn perform_provider_launch_auction(
&self,
provider_ref: &str,
binding_name: &str,
constraints: HashMap<String, String>,
) -> Result<Vec<ProviderAuctionResponse>, Box<dyn std::error::Error>> {
let mut results = vec![];
let req = ProviderAuctionRequest::new(provider_ref, binding_name, constraints);
let sub = self.nc.request_multi(
self.gen_subject(&format!(
"{}.{}",
controlplane::CPLANE_PREFIX,
controlplane::PROVIDER_AUCTION_REQ
))
.as_ref(),
&serde_json::to_vec(&req)?,
)?;
for msg in sub.timeout_iter(Duration::from_secs(AUCTION_TIMEOUT_SECONDS)) {
let resp: ProviderAuctionResponse = serde_json::from_slice(&msg.data)?;
results.push(resp);
}
Ok(results)
}
/// After collecting the results of a provider launch auction, a "winner" from among the hosts
/// can be selected and told to launch the given provider. The provider's bytes will be retrieved
/// from the OCI registry. This function does _not_ confirm successful launch, only receipt
/// of the launch request.
pub fn launch_provider_on_host(
&self,
provider_ref: &str,
host_id: &str,
binding_name: &str,
) -> Result<ProviderLaunchAck, Box<dyn std::error::Error>> {
let msg = LaunchProviderCommand {
provider_ref: provider_ref.to_string(),
binding_name: binding_name.to_string(),
};
let ack: ProviderLaunchAck = serde_json::from_slice(
&self
.nc
.request_timeout(
&self.gen_launch_provider_subject(host_id),
&serde_json::to_vec(&msg)?,
self.timeout,
)?
.data,
)?;
Ok(ack)
}
/// After collecting the results of a launch auction, a "winner" from among the hosts can be selected and
/// told to launch a given actor. Note that the actor's bytes will be retrieved from the OCI registry.
/// This function does _not_ confirm successful launch, only that the target host acknowledged the request
/// to launch.
pub fn launch_actor_on_host(
&self,
actor_id: &str,
host_id: &str,
) -> Result<LaunchAck, Box<dyn std::error::Error>> {
let msg = LaunchCommand {
actor_id: actor_id.to_string(),
};
let ack: LaunchAck = serde_json::from_slice(
&self
.nc
.request_timeout(
&self.gen_launch_actor_subject(host_id),
&serde_json::to_vec(&msg)?,
self.timeout,
)?
.data,
)?;
Ok(ack)
}
/// Sends a command to the specified host telling it to terminate an actor. The success of this command indicates
/// a successful publication, and not necessarily a successful remote actor termination. Monitor the lattice
/// events to see if the actor was successfully terminated
pub fn stop_actor_on_host(
&self,
actor_id: &str,
host_id: &str,
) -> Result<(), Box<dyn std::error::Error>> {
let msg = TerminateCommand {
actor_id: actor_id.to_string(),
};
self.nc.publish(
&self.gen_terminate_actor_subject(host_id),
&serde_json::to_vec(&msg)?,
)?;
let _ = self.nc.flush();
Ok(())
}
fn | (&self, subject: &str) -> String {
match self.namespace.as_ref() {
Some(s) => format!("{}.wasmbus.{}", s, subject),
None => format!("wasmbus.{}", subject),
}
}
}
fn get_connection(host: &str, credsfile: Option<PathBuf>) -> nats::Connection {
let mut opts = if let Some(creds) = credsfile {
nats::Options::with_credentials(creds)
} else {
nats::Options::new()
};
opts = opts.with_name("waSCC Lattice");
opts.connect(host).unwrap()
}
| gen_subject | identifier_name |
lib.rs | //! # Lattice Client
//!
//! This library provides a client that communicates with a waSCC lattice using
//! the lattice protocol over the NATS message broker. All waSCC hosts compiled
//! in lattice mode have the ability to automatically form self-healing, self-managing
//! infrastructure-agnostic clusters called [lattices](https://wascc.dev/docs/lattice/overview/)
extern crate log;
#[macro_use]
extern crate serde;
use std::{collections::HashMap, path::PathBuf, time::Duration};
use crossbeam::Sender;
use wascap::prelude::*;
use controlplane::{
LaunchAck, LaunchAuctionRequest, LaunchAuctionResponse, LaunchCommand, TerminateCommand,
};
pub use events::{BusEvent, CloudEvent};
use crate::controlplane::{
LaunchProviderCommand, ProviderAuctionRequest, ProviderAuctionResponse, ProviderLaunchAck,
};
pub mod controlplane;
mod events; | pub const INVENTORY_CAPABILITIES: &str = "inventory.capabilities";
pub const EVENTS: &str = "events";
const AUCTION_TIMEOUT_SECONDS: u64 = 5;
/// A response to a lattice probe for inventory. Note that these responses are returned
/// through regular (non-queue) subscriptions via a scatter-gather like pattern, so the
/// client is responsible for aggregating many of these replies.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub enum InventoryResponse {
/// A single host probe response
Host(HostProfile),
/// A list of all registered actors within a host
Actors {
host: String,
actors: Vec<Claims<Actor>>,
},
/// A list of configuration bindings of actors originating from the given host
Bindings {
host: String,
bindings: Vec<Binding>,
},
/// A list of capability providers currently running within the given host
Capabilities {
host: String,
capabilities: Vec<HostedCapability>,
},
}
/// An overview of host information
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
pub struct HostProfile {
/// The public key (subject) of the host
pub id: String,
/// The host's labels
pub labels: HashMap<String, String>,
/// Host uptime in milliseconds
pub uptime_ms: u128,
}
/// Represents an instance of a capability, which is a binding name and
/// the capability descriptor
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub struct HostedCapability {
pub binding_name: String,
pub descriptor: wascc_codec::capabilities::CapabilityDescriptor,
}
/// Represents a single configuration binding from an actor to a capability ID and binding
/// name, with the specified configuration values.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub struct Binding {
pub actor: String,
pub capability_id: String,
pub binding_name: String,
pub configuration: HashMap<String, String>,
}
/// A client for interacting with the lattice
pub struct Client {
nc: nats::Connection,
namespace: Option<String>,
timeout: Duration,
}
impl Client {
/// Creates a new lattice client, connecting to the NATS server at the
/// given host with an optional set of credentials (JWT auth)
pub fn new(
host: &str,
credsfile: Option<PathBuf>,
call_timeout: Duration,
namespace: Option<String>,
) -> Self {
Client {
nc: get_connection(host, credsfile),
timeout: call_timeout,
namespace,
}
}
pub fn with_connection(
nc: nats::Connection,
call_timeout: Duration,
namespace: Option<String>,
) -> Self {
Client {
nc,
timeout: call_timeout,
namespace,
}
}
/// Retrieves the list of all hosts running within the lattice. If it takes a host longer
/// than the call timeout period to reply to the probe, it will not be included in the list
/// of hosts.
pub fn get_hosts(&self) -> std::result::Result<Vec<HostProfile>, Box<dyn std::error::Error>> {
let mut hosts = vec![];
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_HOSTS).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Host(h) = ir {
hosts.push(h);
}
}
Ok(hosts)
}
/// Retrieves a list of all bindings from actors to capabilities within the lattice (provided
/// the host responds to the probe within the client timeout period)
pub fn get_bindings(
&self,
) -> std::result::Result<HashMap<String, Vec<Binding>>, Box<dyn std::error::Error>> {
let mut host_bindings = HashMap::new();
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_BINDINGS).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Bindings { bindings: b, host } = ir {
host_bindings
.entry(host)
.and_modify(|e: &mut Vec<Binding>| e.extend_from_slice(&b))
.or_insert(b.clone());
}
}
Ok(host_bindings)
}
/// Retrieves the list of all actors currently running within the lattice (as discovered within
/// the client timeout period)
pub fn get_actors(
&self,
) -> std::result::Result<HashMap<String, Vec<Claims<Actor>>>, Box<dyn std::error::Error>> {
let mut host_actors = HashMap::new();
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_ACTORS).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Actors { host, actors } = ir {
host_actors
.entry(host)
.and_modify(|e: &mut Vec<Claims<Actor>>| e.extend_from_slice(&actors))
.or_insert(actors.clone());
}
}
Ok(host_actors)
}
/// Retrieves the list of all capabilities within the lattice (discovery limited by the client timeout period)
pub fn get_capabilities(
&self,
) -> std::result::Result<HashMap<String, Vec<HostedCapability>>, Box<dyn std::error::Error>>
{
let mut host_caps = HashMap::new();
let sub = self
.nc
.request_multi(self.gen_subject(INVENTORY_CAPABILITIES).as_ref(), &[])?;
for msg in sub.timeout_iter(self.timeout) {
let ir: InventoryResponse = serde_json::from_slice(&msg.data)?;
if let InventoryResponse::Capabilities { host, capabilities } = ir {
host_caps
.entry(host)
.and_modify(|e: &mut Vec<HostedCapability>| e.extend_from_slice(&capabilities))
.or_insert(capabilities.clone());
}
}
Ok(host_caps)
}
/// Watches the lattice for bus events. This will create a subscription in a background thread, so callers
/// are responsible for ensuring their process remains alive however long is appropriate. Pass the sender
/// half of a channel to receive the events
pub fn watch_events(&self, sender: Sender<BusEvent>) -> Result<(), Box<dyn std::error::Error>> {
let _sub = self
.nc
.subscribe(self.gen_subject(EVENTS).as_ref())?
.with_handler(move |msg| {
let ce: CloudEvent = serde_json::from_slice(&msg.data).unwrap();
let be: BusEvent = serde_json::from_str(&ce.data).unwrap();
let _ = sender.send(be);
Ok(())
});
Ok(())
}
/// Performs an auction among all hosts on the lattice, requesting that the given actor be launched (loaded+started)
/// on a suitable host as described by the set of constraints. Only hosts that believe they can launch the actor
/// will reply. In other words, there will be no negative responses in the result vector, only a list of suitable
/// hosts. The actor to be launched is identified by an OCI registry reference
pub fn perform_actor_launch_auction(
&self,
actor_id: &str,
constraints: HashMap<String, String>,
) -> Result<Vec<LaunchAuctionResponse>, Box<dyn std::error::Error>> {
let mut results = vec![];
let req = LaunchAuctionRequest::new(actor_id, constraints);
let sub = self.nc.request_multi(
self.gen_subject(&format!(
"{}.{}",
controlplane::CPLANE_PREFIX,
controlplane::AUCTION_REQ
))
.as_ref(),
&serde_json::to_vec(&req)?,
)?;
for msg in sub.timeout_iter(Duration::from_secs(AUCTION_TIMEOUT_SECONDS)) {
let resp: LaunchAuctionResponse = serde_json::from_slice(&msg.data)?;
results.push(resp);
}
Ok(results)
}
/// Performs an auction among all hosts on the lattice, requesting that the given capability provider
/// (indicated by OCI image reference) be loaded/started. Hosts that believe they can host the
/// provider given the constraints will respond to the auction
pub fn perform_provider_launch_auction(
&self,
provider_ref: &str,
binding_name: &str,
constraints: HashMap<String, String>,
) -> Result<Vec<ProviderAuctionResponse>, Box<dyn std::error::Error>> {
let mut results = vec![];
let req = ProviderAuctionRequest::new(provider_ref, binding_name, constraints);
let sub = self.nc.request_multi(
self.gen_subject(&format!(
"{}.{}",
controlplane::CPLANE_PREFIX,
controlplane::PROVIDER_AUCTION_REQ
))
.as_ref(),
&serde_json::to_vec(&req)?,
)?;
for msg in sub.timeout_iter(Duration::from_secs(AUCTION_TIMEOUT_SECONDS)) {
let resp: ProviderAuctionResponse = serde_json::from_slice(&msg.data)?;
results.push(resp);
}
Ok(results)
}
/// After collecting the results of a provider launch auction, a "winner" from among the hosts
/// can be selected and told to launch the given provider. The provider's bytes will be retrieved
/// from the OCI registry. This function does _not_ confirm successful launch, only receipt
/// of the launch request.
pub fn launch_provider_on_host(
&self,
provider_ref: &str,
host_id: &str,
binding_name: &str,
) -> Result<ProviderLaunchAck, Box<dyn std::error::Error>> {
let msg = LaunchProviderCommand {
provider_ref: provider_ref.to_string(),
binding_name: binding_name.to_string(),
};
let ack: ProviderLaunchAck = serde_json::from_slice(
&self
.nc
.request_timeout(
&self.gen_launch_provider_subject(host_id),
&serde_json::to_vec(&msg)?,
self.timeout,
)?
.data,
)?;
Ok(ack)
}
/// After collecting the results of a launch auction, a "winner" from among the hosts can be selected and
/// told to launch a given actor. Note that the actor's bytes will be retrieved from the OCI registry.
/// This function does _not_ confirm successful launch, only that the target host acknowledged the request
/// to launch.
pub fn launch_actor_on_host(
&self,
actor_id: &str,
host_id: &str,
) -> Result<LaunchAck, Box<dyn std::error::Error>> {
let msg = LaunchCommand {
actor_id: actor_id.to_string(),
};
let ack: LaunchAck = serde_json::from_slice(
&self
.nc
.request_timeout(
&self.gen_launch_actor_subject(host_id),
&serde_json::to_vec(&msg)?,
self.timeout,
)?
.data,
)?;
Ok(ack)
}
/// Sends a command to the specified host telling it to terminate an actor. The success of this command indicates
/// a successful publication, and not necessarily a successful remote actor termination. Monitor the lattice
/// events to see if the actor was successfully terminated
pub fn stop_actor_on_host(
&self,
actor_id: &str,
host_id: &str,
) -> Result<(), Box<dyn std::error::Error>> {
let msg = TerminateCommand {
actor_id: actor_id.to_string(),
};
self.nc.publish(
&self.gen_terminate_actor_subject(host_id),
&serde_json::to_vec(&msg)?,
)?;
let _ = self.nc.flush();
Ok(())
}
fn gen_subject(&self, subject: &str) -> String {
match self.namespace.as_ref() {
Some(s) => format!("{}.wasmbus.{}", s, subject),
None => format!("wasmbus.{}", subject),
}
}
}
fn get_connection(host: &str, credsfile: Option<PathBuf>) -> nats::Connection {
let mut opts = if let Some(creds) = credsfile {
nats::Options::with_credentials(creds)
} else {
nats::Options::new()
};
opts = opts.with_name("waSCC Lattice");
opts.connect(host).unwrap()
} |
pub const INVENTORY_ACTORS: &str = "inventory.actors";
pub const INVENTORY_HOSTS: &str = "inventory.hosts";
pub const INVENTORY_BINDINGS: &str = "inventory.bindings"; | random_line_split |
mod.rs | pub mod cut_detector;
pub mod ring;
pub mod view;
use crate::{
common::{ConfigId, Endpoint, NodeId, Scheduler, SchedulerEvents},
consensus::FastPaxos,
error::Result,
event::{Event, NodeStatusChange},
monitor::Monitor,
transport::{
proto::{
self, Alert, BatchedAlertMessage, EdgeStatus, JoinMessage, JoinResponse, JoinStatus,
Metadata, NodeStatus, PreJoinMessage,
},
Message, Request, Response,
},
};
use cut_detector::CutDetector;
use view::View;
use futures::FutureExt;
use std::{
collections::{HashMap, VecDeque},
time::{Duration, Instant},
};
use tokio::sync::{broadcast, mpsc, oneshot};
use tracing::info;
type OutboundResponse = oneshot::Sender<crate::Result<Response>>;
#[derive(Debug)]
pub struct Membership<M> {
host_addr: Endpoint,
view: View,
cut_detector: CutDetector,
monitor: M,
alerts: VecDeque<proto::Alert>,
last_enqueued_alert: Instant,
joiners_to_respond: Vec<Endpoint>,
// joiners_to_respond: HashMap<Endpoint, VecDeque<OutboundResponse>>,
batch_window: Duration,
paxos: FastPaxos,
announced_proposal: bool,
joiner_data: HashMap<Endpoint, (NodeId, Metadata)>,
event_tx: broadcast::Sender<Event>,
monitor_cancellers: Vec<oneshot::Sender<()>>,
messages: VecDeque<(Endpoint, Message)>,
}
impl<M: Monitor> Membership<M> {
#[allow(dead_code)]
pub fn new(
host_addr: Endpoint,
view: View,
cut_detector: CutDetector,
monitor: M,
event_tx: broadcast::Sender<Event>,
) -> Self {
// TODO: setup startup tasks
let paxos = FastPaxos::new(
host_addr.clone(),
view.get_membership_size(),
view.get_current_config_id(),
);
Self {
host_addr,
view,
cut_detector,
monitor,
paxos,
alerts: VecDeque::default(),
last_enqueued_alert: Instant::now(),
joiners_to_respond: Vec::new(),
batch_window: Duration::new(10, 0),
announced_proposal: false,
joiner_data: HashMap::default(),
monitor_cancellers: vec![],
event_tx,
messages: VecDeque::new(),
}
}
#[allow(dead_code)]
fn send_initial_notification(&self) {
self.event_tx
.send(Event::ViewChange(self.get_inititial_view_changes()))
.expect("Unable to send response");
}
fn get_inititial_view_changes(&self) -> Vec<NodeStatusChange> {
let nodes = self.view.get_ring(0);
nodes
.iter()
.map(|_| NodeStatusChange {
endpoint: self.host_addr.clone(),
status: EdgeStatus::Up,
metadata: Metadata::default(),
})
.collect()
}
pub fn view(&self) -> Vec<&Endpoint> {
self.view
.get_ring(0)
.expect("There is always a ring!")
.iter()
.collect()
}
pub fn step(&mut self, from: Endpoint, msg: proto::RequestKind) {
use proto::RequestKind::*;
match msg {
PreJoin(msg) => self.handle_pre_join(from, msg),
Join(msg) => self.handle_join(from, msg),
BatchedAlert(msg) => self.handle_batched_alert_message(msg),
Consensus(msg) => {
let view = self
.view
.get_ring(0)
.expect("Ring zero should always exist")
.iter()
.collect();
let msgs = self.paxos.step(msg, view);
self.messages.extend(msgs);
}
_ => todo!("request type not implemented yet"),
}
}
pub fn start_classic_round(&mut self) -> Result<()> {
// TODO: make paxos syncrhonous
// self.paxos.start_classic_round()
todo!()
}
pub fn handle_pre_join(&mut self, from: Endpoint, msg: PreJoinMessage) {
let PreJoinMessage {
sender, node_id,..
} = msg;
let status = self.view.is_safe_to_join(&sender, &node_id);
let config_id = self.view.get_config().config_id();
let endpoints =
if status == JoinStatus::SafeToJoin || status == JoinStatus::HostnameAlreadyInRing {
self.view.get_expected_observers(&sender)
} else {
Vec::new()
};
let join_res = JoinResponse {
sender,
status,
config_id,
endpoints,
identifiers: Vec::new(),
cluster_metadata: HashMap::new(),
};
info!(
message = "Join at seed.",
seed = %self.host_addr,
sender = %join_res.sender,
config = %join_res.config_id,
size = %self.view.get_membership_size()
);
self.messages
.push_back((from, proto::ResponseKind::Join(join_res).into()));
}
pub fn handle_join(&mut self, from: Endpoint, msg: JoinMessage) {
if msg.config_id == self.view.get_current_config_id() {
let config = self.view.get_config();
// TODO: do we still need to do this?
// self.joiners_to_respond
// .entry(msg.sender.clone())
// .or_insert_with(VecDeque::new)
// .push_back(from);
let alert = proto::Alert {
src: self.host_addr.clone(),
dst: msg.sender.clone(),
edge_status: proto::EdgeStatus::Up,
config_id: config.config_id(),
node_id: Some(msg.node_id.clone()),
ring_number: msg.ring_number,
metadata: None,
};
self.enqueue_alert(alert);
} else {
// This is the case where the config changed between phase 1
// and phase 2 of the join process.
let response = if self.view.is_host_present(&msg.sender)
&& self.view.is_node_id_present(&msg.node_id)
{
let config = self.view.get_config();
// Race condition where a observer already crossed H messages for the joiner and
// changed the configuration, but the JoinPhase2 message shows up at the observer
// after it has already added the joiner. In this case, simply tell the joiner it's
// safe to join
proto::JoinResponse {
sender: self.host_addr.clone(),
status: JoinStatus::SafeToJoin,
config_id: config.config_id(),
endpoints: config.endpoints.clone(),
identifiers: config.node_ids.clone(),
cluster_metadata: HashMap::new(),
}
} else {
proto::JoinResponse {
sender: self.host_addr.clone(),
status: JoinStatus::ConfigChanged,
config_id: self.view.get_current_config_id(),
endpoints: vec![],
identifiers: vec![],
cluster_metadata: HashMap::new(),
}
};
self.messages
.push_back((from, proto::ResponseKind::Join(response).into()));
}
}
// Invoked by observers of a node for failure detection
fn handle_probe_message(&self) -> Response {
Response::new_probe(NodeStatus::Up) // TODO: FIXME THIS IS WRONG
}
// Receives edge update events and delivers them to the cut detector to check if it will
// return a valid proposal.
//
// Edge update messages that do not affect the ongoing proposal need to be dropped.
fn handle_batched_alert_message(&mut self, msg_batch: BatchedAlertMessage) {
let current_config_id = self.view.get_current_config_id();
let size = self.view.get_membership_size();
let mut proposal: Vec<Endpoint> = msg_batch
.alerts
.iter()
// filter out messages which violate membership invariants
// And then run the cut detector to see if there is a new proposal
.filter_map(|message| {
if!self.filter_alert_messages(&msg_batch, message, size, current_config_id) {
return None;
}
Some(self.cut_detector.aggregate(message))
})
.flatten()
.collect();
proposal.extend(self.cut_detector.invalidate_failing_edges(&mut self.view));
if!proposal.is_empty() {
self.announced_proposal = true;
self.event_tx
.send(Event::ViewChangeProposal(
self.create_node_status_change_list(proposal.clone()),
))
.expect("Unable to send response");
// TODO: make paxos syncrhonous
// self.paxos.propose(proposal, scheduler).await?
}
}
fn create_node_status_change_list(&self, proposal: Vec<Endpoint>) -> Vec<NodeStatusChange> {
proposal
.iter()
.map(|node| NodeStatusChange {
endpoint: node.to_string(),
status: if self.view.is_host_present(node) {
EdgeStatus::Down
} else | ,
metadata: Metadata::default(),
})
.collect()
}
// Filter for removing invalid edge update messages. These include messages
// that were for a configuration that the current node is not a part of, and messages
// that violate teh semantics of being a part of a configuration
fn filter_alert_messages(
&mut self,
_message_batch: &BatchedAlertMessage, // Might require this later for loggign
message: &Alert,
_size: usize,
config_id: ConfigId,
) -> bool {
let dst = &message.dst;
if config_id!= message.config_id {
return false;
}
// An invariant to maintain is that a node can only go into the membership set once
// and leave it once
if message.edge_status == EdgeStatus::Down &&!self.view.is_host_present(&dst) {
return false;
}
if message.edge_status == EdgeStatus::Up {
// Add joiner data after the node is done being added to the set. Store in a
// temp location for now.
self.joiner_data.insert(
dst.clone(),
(
message.node_id.clone().take().unwrap(),
message.metadata.clone().take().unwrap(),
),
);
}
true
}
pub fn create_failure_detectors(
&mut self,
scheduler: &mut Scheduler,
) -> Result<mpsc::Receiver<(Endpoint, ConfigId)>> {
todo!()
// let (tx, rx) = mpsc::channel(1000);
// for subject in self.view.get_subjects(&self.host_addr)? {
// let (mon_tx, mon_rx) = oneshot::channel();
// let fut = self.monitor.monitor(
// subject.clone(),
// client.clone(),
// self.view.get_current_config_id(),
// tx.clone(),
// mon_rx,
// );
// scheduler.push(Box::pin(fut.map(|_| SchedulerEvents::None)));
// self.monitor_cancellers.push(mon_tx);
// }
// Ok(rx)
}
#[allow(dead_code)]
pub fn edge_failure_notification(&mut self, subject: Endpoint, config_id: ConfigId) {
if config_id!= self.view.get_current_config_id() {
// TODO: Figure out why &String does not impl Value
// info!(
// target: "Failure notification from old config.",
// subject = subject,
// config = self.view.get_current_config_id(),
// old_config = config_id
// );
//
return;
}
let alert = proto::Alert {
src: self.host_addr.clone(),
dst: subject.clone(),
edge_status: proto::EdgeStatus::Down,
config_id,
node_id: None,
ring_number: self
.view
.get_ring_numbers(&self.host_addr, &subject)
.expect("Unable to get ring number"),
metadata: None,
};
self.enqueue_alert(alert);
}
pub fn get_batch_alerts(&mut self) -> Option<proto::BatchedAlertMessage> {
if!self.alerts.is_empty()
&& (Instant::now() - self.last_enqueued_alert) > self.batch_window
{
let alerts = self.alerts.drain(..).collect();
Some(proto::BatchedAlertMessage {
sender: self.host_addr.clone(),
alerts,
})
} else {
None
}
}
pub fn enqueue_alert(&mut self, alert: proto::Alert) {
self.last_enqueued_alert = Instant::now();
self.alerts.push_back(alert);
}
/// This is invoked when the consensus module decides on a proposal
///
/// Any node that is not in the membership list will be added to the cluster,
/// and any node that is currently in the membership list, but not in the proposal
/// will be removed.
pub fn on_decide(&mut self, proposal: Vec<Endpoint>) {
// TODO: Handle metadata updates
// TODO: Handle subscriptions
self.cancel_failure_detectors();
for node in &proposal {
if self.view.is_host_present(&node) {
self.view.ring_delete(&node);
} else if let Some((node_id, _metadata)) = self.joiner_data.remove(node) {
self.view.ring_add(node.clone(), node_id);
} else {
panic!("Node not present in pre-join metadata")
}
}
let _current_config_id = self.view.get_current_config_id();
// clear data structures
self.cut_detector.clear();
self.announced_proposal = false;
if self.view.is_host_present(&self.host_addr) {
// TODO: inform edge failure detector about config change
} else {
// We need to gracefully exit by calling a user handler and invalidating the current
// session
unimplemented!("How do you manage a callback again?");
}
// TODO: Instantiate new consensus instance
// self.paxos = FastPaxos::new(self.host_addr, self.view.get_membership_size(), )
self.respond_to_joiners(proposal);
}
fn cancel_failure_detectors(&mut self) {
for signal in self.monitor_cancellers.drain(..) {
let _ = signal.send(());
}
}
fn respond_to_joiners(&mut self, proposal: Vec<Endpoint>) {
let configuration = self.view.get_config();
let join_res = JoinResponse {
sender: self.host_addr.clone(),
status: JoinStatus::SafeToJoin,
config_id: configuration.config_id(),
endpoints: configuration.endpoints.clone(),
identifiers: configuration.node_ids.clone(),
cluster_metadata: HashMap::new(), // TODO: metadata manager
};
for node in proposal {
self.messages
.push_back((node, proto::ResponseKind::Join(join_res.clone()).into()));
// self.joiners_to_respond.remove(&node).and_then(|joiners| {
// joiners.into_iter().for_each(|joiner| {
// joiner
// .send(Ok(Response::new_join(join_res.clone())))
// .expect("Unable to send response");
// });
// // This is so the compiler can infer the type of the closure to be Option<()>
// Some(())
// });
}
}
pub fn drain_messages(&mut self) -> Vec<(Endpoint, Message)> {
let mut msgs = Vec::new();
while let Some(msg) = self.messages.pop_front() {
msgs.push(msg);
}
msgs
}
}
| {
EdgeStatus::Up
} | conditional_block |
mod.rs | pub mod cut_detector;
pub mod ring;
pub mod view;
use crate::{
common::{ConfigId, Endpoint, NodeId, Scheduler, SchedulerEvents},
consensus::FastPaxos,
error::Result,
event::{Event, NodeStatusChange},
monitor::Monitor,
transport::{
proto::{
self, Alert, BatchedAlertMessage, EdgeStatus, JoinMessage, JoinResponse, JoinStatus,
Metadata, NodeStatus, PreJoinMessage,
},
Message, Request, Response,
},
};
use cut_detector::CutDetector;
use view::View;
use futures::FutureExt;
use std::{
collections::{HashMap, VecDeque},
time::{Duration, Instant},
};
use tokio::sync::{broadcast, mpsc, oneshot};
use tracing::info;
type OutboundResponse = oneshot::Sender<crate::Result<Response>>;
#[derive(Debug)]
pub struct Membership<M> {
host_addr: Endpoint,
view: View,
cut_detector: CutDetector,
monitor: M,
alerts: VecDeque<proto::Alert>,
last_enqueued_alert: Instant,
joiners_to_respond: Vec<Endpoint>,
// joiners_to_respond: HashMap<Endpoint, VecDeque<OutboundResponse>>,
batch_window: Duration,
paxos: FastPaxos,
announced_proposal: bool,
joiner_data: HashMap<Endpoint, (NodeId, Metadata)>,
event_tx: broadcast::Sender<Event>,
monitor_cancellers: Vec<oneshot::Sender<()>>,
messages: VecDeque<(Endpoint, Message)>,
}
impl<M: Monitor> Membership<M> {
#[allow(dead_code)]
pub fn new(
host_addr: Endpoint,
view: View,
cut_detector: CutDetector,
monitor: M,
event_tx: broadcast::Sender<Event>,
) -> Self {
// TODO: setup startup tasks
let paxos = FastPaxos::new(
host_addr.clone(),
view.get_membership_size(),
view.get_current_config_id(),
);
Self {
host_addr,
view,
cut_detector,
monitor,
paxos,
alerts: VecDeque::default(),
last_enqueued_alert: Instant::now(),
joiners_to_respond: Vec::new(),
batch_window: Duration::new(10, 0),
announced_proposal: false,
joiner_data: HashMap::default(),
monitor_cancellers: vec![],
event_tx,
messages: VecDeque::new(),
}
}
#[allow(dead_code)]
fn send_initial_notification(&self) {
self.event_tx
.send(Event::ViewChange(self.get_inititial_view_changes()))
.expect("Unable to send response");
}
fn get_inititial_view_changes(&self) -> Vec<NodeStatusChange> {
let nodes = self.view.get_ring(0);
nodes
.iter()
.map(|_| NodeStatusChange {
endpoint: self.host_addr.clone(),
status: EdgeStatus::Up,
metadata: Metadata::default(),
})
.collect()
}
pub fn view(&self) -> Vec<&Endpoint> {
self.view
.get_ring(0)
.expect("There is always a ring!")
.iter()
.collect()
}
pub fn step(&mut self, from: Endpoint, msg: proto::RequestKind) {
use proto::RequestKind::*;
match msg {
PreJoin(msg) => self.handle_pre_join(from, msg),
Join(msg) => self.handle_join(from, msg),
BatchedAlert(msg) => self.handle_batched_alert_message(msg),
Consensus(msg) => {
let view = self
.view
.get_ring(0)
.expect("Ring zero should always exist")
.iter()
.collect();
let msgs = self.paxos.step(msg, view);
self.messages.extend(msgs);
}
_ => todo!("request type not implemented yet"),
}
}
pub fn start_classic_round(&mut self) -> Result<()> {
// TODO: make paxos syncrhonous
// self.paxos.start_classic_round()
todo!()
}
pub fn handle_pre_join(&mut self, from: Endpoint, msg: PreJoinMessage) {
let PreJoinMessage {
sender, node_id,..
} = msg;
let status = self.view.is_safe_to_join(&sender, &node_id);
let config_id = self.view.get_config().config_id();
let endpoints =
if status == JoinStatus::SafeToJoin || status == JoinStatus::HostnameAlreadyInRing {
self.view.get_expected_observers(&sender)
} else {
Vec::new()
};
let join_res = JoinResponse {
sender,
status,
config_id,
endpoints,
identifiers: Vec::new(),
cluster_metadata: HashMap::new(),
};
info!(
message = "Join at seed.",
seed = %self.host_addr,
sender = %join_res.sender,
config = %join_res.config_id,
size = %self.view.get_membership_size()
);
self.messages
.push_back((from, proto::ResponseKind::Join(join_res).into()));
}
pub fn handle_join(&mut self, from: Endpoint, msg: JoinMessage) {
if msg.config_id == self.view.get_current_config_id() {
let config = self.view.get_config();
// TODO: do we still need to do this?
// self.joiners_to_respond
// .entry(msg.sender.clone())
// .or_insert_with(VecDeque::new)
// .push_back(from);
let alert = proto::Alert {
src: self.host_addr.clone(),
dst: msg.sender.clone(),
edge_status: proto::EdgeStatus::Up,
config_id: config.config_id(),
node_id: Some(msg.node_id.clone()),
ring_number: msg.ring_number,
metadata: None,
};
self.enqueue_alert(alert);
} else {
// This is the case where the config changed between phase 1
// and phase 2 of the join process.
let response = if self.view.is_host_present(&msg.sender)
&& self.view.is_node_id_present(&msg.node_id)
{
let config = self.view.get_config();
// Race condition where a observer already crossed H messages for the joiner and
// changed the configuration, but the JoinPhase2 message shows up at the observer
// after it has already added the joiner. In this case, simply tell the joiner it's
// safe to join
proto::JoinResponse {
sender: self.host_addr.clone(),
status: JoinStatus::SafeToJoin,
config_id: config.config_id(),
endpoints: config.endpoints.clone(),
identifiers: config.node_ids.clone(),
cluster_metadata: HashMap::new(),
}
} else {
proto::JoinResponse { | cluster_metadata: HashMap::new(),
}
};
self.messages
.push_back((from, proto::ResponseKind::Join(response).into()));
}
}
// Invoked by observers of a node for failure detection
fn handle_probe_message(&self) -> Response {
Response::new_probe(NodeStatus::Up) // TODO: FIXME THIS IS WRONG
}
// Receives edge update events and delivers them to the cut detector to check if it will
// return a valid proposal.
//
// Edge update messages that do not affect the ongoing proposal need to be dropped.
fn handle_batched_alert_message(&mut self, msg_batch: BatchedAlertMessage) {
let current_config_id = self.view.get_current_config_id();
let size = self.view.get_membership_size();
let mut proposal: Vec<Endpoint> = msg_batch
.alerts
.iter()
// filter out messages which violate membership invariants
// And then run the cut detector to see if there is a new proposal
.filter_map(|message| {
if!self.filter_alert_messages(&msg_batch, message, size, current_config_id) {
return None;
}
Some(self.cut_detector.aggregate(message))
})
.flatten()
.collect();
proposal.extend(self.cut_detector.invalidate_failing_edges(&mut self.view));
if!proposal.is_empty() {
self.announced_proposal = true;
self.event_tx
.send(Event::ViewChangeProposal(
self.create_node_status_change_list(proposal.clone()),
))
.expect("Unable to send response");
// TODO: make paxos syncrhonous
// self.paxos.propose(proposal, scheduler).await?
}
}
fn create_node_status_change_list(&self, proposal: Vec<Endpoint>) -> Vec<NodeStatusChange> {
proposal
.iter()
.map(|node| NodeStatusChange {
endpoint: node.to_string(),
status: if self.view.is_host_present(node) {
EdgeStatus::Down
} else {
EdgeStatus::Up
},
metadata: Metadata::default(),
})
.collect()
}
// Filter for removing invalid edge update messages. These include messages
// that were for a configuration that the current node is not a part of, and messages
// that violate teh semantics of being a part of a configuration
fn filter_alert_messages(
&mut self,
_message_batch: &BatchedAlertMessage, // Might require this later for loggign
message: &Alert,
_size: usize,
config_id: ConfigId,
) -> bool {
let dst = &message.dst;
if config_id!= message.config_id {
return false;
}
// An invariant to maintain is that a node can only go into the membership set once
// and leave it once
if message.edge_status == EdgeStatus::Down &&!self.view.is_host_present(&dst) {
return false;
}
if message.edge_status == EdgeStatus::Up {
// Add joiner data after the node is done being added to the set. Store in a
// temp location for now.
self.joiner_data.insert(
dst.clone(),
(
message.node_id.clone().take().unwrap(),
message.metadata.clone().take().unwrap(),
),
);
}
true
}
pub fn create_failure_detectors(
&mut self,
scheduler: &mut Scheduler,
) -> Result<mpsc::Receiver<(Endpoint, ConfigId)>> {
todo!()
// let (tx, rx) = mpsc::channel(1000);
// for subject in self.view.get_subjects(&self.host_addr)? {
// let (mon_tx, mon_rx) = oneshot::channel();
// let fut = self.monitor.monitor(
// subject.clone(),
// client.clone(),
// self.view.get_current_config_id(),
// tx.clone(),
// mon_rx,
// );
// scheduler.push(Box::pin(fut.map(|_| SchedulerEvents::None)));
// self.monitor_cancellers.push(mon_tx);
// }
// Ok(rx)
}
#[allow(dead_code)]
pub fn edge_failure_notification(&mut self, subject: Endpoint, config_id: ConfigId) {
if config_id!= self.view.get_current_config_id() {
// TODO: Figure out why &String does not impl Value
// info!(
// target: "Failure notification from old config.",
// subject = subject,
// config = self.view.get_current_config_id(),
// old_config = config_id
// );
//
return;
}
let alert = proto::Alert {
src: self.host_addr.clone(),
dst: subject.clone(),
edge_status: proto::EdgeStatus::Down,
config_id,
node_id: None,
ring_number: self
.view
.get_ring_numbers(&self.host_addr, &subject)
.expect("Unable to get ring number"),
metadata: None,
};
self.enqueue_alert(alert);
}
pub fn get_batch_alerts(&mut self) -> Option<proto::BatchedAlertMessage> {
if!self.alerts.is_empty()
&& (Instant::now() - self.last_enqueued_alert) > self.batch_window
{
let alerts = self.alerts.drain(..).collect();
Some(proto::BatchedAlertMessage {
sender: self.host_addr.clone(),
alerts,
})
} else {
None
}
}
pub fn enqueue_alert(&mut self, alert: proto::Alert) {
self.last_enqueued_alert = Instant::now();
self.alerts.push_back(alert);
}
/// This is invoked when the consensus module decides on a proposal
///
/// Any node that is not in the membership list will be added to the cluster,
/// and any node that is currently in the membership list, but not in the proposal
/// will be removed.
pub fn on_decide(&mut self, proposal: Vec<Endpoint>) {
// TODO: Handle metadata updates
// TODO: Handle subscriptions
self.cancel_failure_detectors();
for node in &proposal {
if self.view.is_host_present(&node) {
self.view.ring_delete(&node);
} else if let Some((node_id, _metadata)) = self.joiner_data.remove(node) {
self.view.ring_add(node.clone(), node_id);
} else {
panic!("Node not present in pre-join metadata")
}
}
let _current_config_id = self.view.get_current_config_id();
// clear data structures
self.cut_detector.clear();
self.announced_proposal = false;
if self.view.is_host_present(&self.host_addr) {
// TODO: inform edge failure detector about config change
} else {
// We need to gracefully exit by calling a user handler and invalidating the current
// session
unimplemented!("How do you manage a callback again?");
}
// TODO: Instantiate new consensus instance
// self.paxos = FastPaxos::new(self.host_addr, self.view.get_membership_size(), )
self.respond_to_joiners(proposal);
}
fn cancel_failure_detectors(&mut self) {
for signal in self.monitor_cancellers.drain(..) {
let _ = signal.send(());
}
}
fn respond_to_joiners(&mut self, proposal: Vec<Endpoint>) {
let configuration = self.view.get_config();
let join_res = JoinResponse {
sender: self.host_addr.clone(),
status: JoinStatus::SafeToJoin,
config_id: configuration.config_id(),
endpoints: configuration.endpoints.clone(),
identifiers: configuration.node_ids.clone(),
cluster_metadata: HashMap::new(), // TODO: metadata manager
};
for node in proposal {
self.messages
.push_back((node, proto::ResponseKind::Join(join_res.clone()).into()));
// self.joiners_to_respond.remove(&node).and_then(|joiners| {
// joiners.into_iter().for_each(|joiner| {
// joiner
// .send(Ok(Response::new_join(join_res.clone())))
// .expect("Unable to send response");
// });
// // This is so the compiler can infer the type of the closure to be Option<()>
// Some(())
// });
}
}
pub fn drain_messages(&mut self) -> Vec<(Endpoint, Message)> {
let mut msgs = Vec::new();
while let Some(msg) = self.messages.pop_front() {
msgs.push(msg);
}
msgs
}
} | sender: self.host_addr.clone(),
status: JoinStatus::ConfigChanged,
config_id: self.view.get_current_config_id(),
endpoints: vec![],
identifiers: vec![], | random_line_split |
mod.rs | pub mod cut_detector;
pub mod ring;
pub mod view;
use crate::{
common::{ConfigId, Endpoint, NodeId, Scheduler, SchedulerEvents},
consensus::FastPaxos,
error::Result,
event::{Event, NodeStatusChange},
monitor::Monitor,
transport::{
proto::{
self, Alert, BatchedAlertMessage, EdgeStatus, JoinMessage, JoinResponse, JoinStatus,
Metadata, NodeStatus, PreJoinMessage,
},
Message, Request, Response,
},
};
use cut_detector::CutDetector;
use view::View;
use futures::FutureExt;
use std::{
collections::{HashMap, VecDeque},
time::{Duration, Instant},
};
use tokio::sync::{broadcast, mpsc, oneshot};
use tracing::info;
type OutboundResponse = oneshot::Sender<crate::Result<Response>>;
#[derive(Debug)]
pub struct Membership<M> {
host_addr: Endpoint,
view: View,
cut_detector: CutDetector,
monitor: M,
alerts: VecDeque<proto::Alert>,
last_enqueued_alert: Instant,
joiners_to_respond: Vec<Endpoint>,
// joiners_to_respond: HashMap<Endpoint, VecDeque<OutboundResponse>>,
batch_window: Duration,
paxos: FastPaxos,
announced_proposal: bool,
joiner_data: HashMap<Endpoint, (NodeId, Metadata)>,
event_tx: broadcast::Sender<Event>,
monitor_cancellers: Vec<oneshot::Sender<()>>,
messages: VecDeque<(Endpoint, Message)>,
}
impl<M: Monitor> Membership<M> {
#[allow(dead_code)]
pub fn new(
host_addr: Endpoint,
view: View,
cut_detector: CutDetector,
monitor: M,
event_tx: broadcast::Sender<Event>,
) -> Self {
// TODO: setup startup tasks
let paxos = FastPaxos::new(
host_addr.clone(),
view.get_membership_size(),
view.get_current_config_id(),
);
Self {
host_addr,
view,
cut_detector,
monitor,
paxos,
alerts: VecDeque::default(),
last_enqueued_alert: Instant::now(),
joiners_to_respond: Vec::new(),
batch_window: Duration::new(10, 0),
announced_proposal: false,
joiner_data: HashMap::default(),
monitor_cancellers: vec![],
event_tx,
messages: VecDeque::new(),
}
}
#[allow(dead_code)]
fn send_initial_notification(&self) {
self.event_tx
.send(Event::ViewChange(self.get_inititial_view_changes()))
.expect("Unable to send response");
}
fn get_inititial_view_changes(&self) -> Vec<NodeStatusChange> {
let nodes = self.view.get_ring(0);
nodes
.iter()
.map(|_| NodeStatusChange {
endpoint: self.host_addr.clone(),
status: EdgeStatus::Up,
metadata: Metadata::default(),
})
.collect()
}
pub fn view(&self) -> Vec<&Endpoint> {
self.view
.get_ring(0)
.expect("There is always a ring!")
.iter()
.collect()
}
pub fn step(&mut self, from: Endpoint, msg: proto::RequestKind) {
use proto::RequestKind::*;
match msg {
PreJoin(msg) => self.handle_pre_join(from, msg),
Join(msg) => self.handle_join(from, msg),
BatchedAlert(msg) => self.handle_batched_alert_message(msg),
Consensus(msg) => {
let view = self
.view
.get_ring(0)
.expect("Ring zero should always exist")
.iter()
.collect();
let msgs = self.paxos.step(msg, view);
self.messages.extend(msgs);
}
_ => todo!("request type not implemented yet"),
}
}
pub fn start_classic_round(&mut self) -> Result<()> {
// TODO: make paxos syncrhonous
// self.paxos.start_classic_round()
todo!()
}
pub fn handle_pre_join(&mut self, from: Endpoint, msg: PreJoinMessage) {
let PreJoinMessage {
sender, node_id,..
} = msg;
let status = self.view.is_safe_to_join(&sender, &node_id);
let config_id = self.view.get_config().config_id();
let endpoints =
if status == JoinStatus::SafeToJoin || status == JoinStatus::HostnameAlreadyInRing {
self.view.get_expected_observers(&sender)
} else {
Vec::new()
};
let join_res = JoinResponse {
sender,
status,
config_id,
endpoints,
identifiers: Vec::new(),
cluster_metadata: HashMap::new(),
};
info!(
message = "Join at seed.",
seed = %self.host_addr,
sender = %join_res.sender,
config = %join_res.config_id,
size = %self.view.get_membership_size()
);
self.messages
.push_back((from, proto::ResponseKind::Join(join_res).into()));
}
pub fn handle_join(&mut self, from: Endpoint, msg: JoinMessage) {
if msg.config_id == self.view.get_current_config_id() {
let config = self.view.get_config();
// TODO: do we still need to do this?
// self.joiners_to_respond
// .entry(msg.sender.clone())
// .or_insert_with(VecDeque::new)
// .push_back(from);
let alert = proto::Alert {
src: self.host_addr.clone(),
dst: msg.sender.clone(),
edge_status: proto::EdgeStatus::Up,
config_id: config.config_id(),
node_id: Some(msg.node_id.clone()),
ring_number: msg.ring_number,
metadata: None,
};
self.enqueue_alert(alert);
} else {
// This is the case where the config changed between phase 1
// and phase 2 of the join process.
let response = if self.view.is_host_present(&msg.sender)
&& self.view.is_node_id_present(&msg.node_id)
{
let config = self.view.get_config();
// Race condition where a observer already crossed H messages for the joiner and
// changed the configuration, but the JoinPhase2 message shows up at the observer
// after it has already added the joiner. In this case, simply tell the joiner it's
// safe to join
proto::JoinResponse {
sender: self.host_addr.clone(),
status: JoinStatus::SafeToJoin,
config_id: config.config_id(),
endpoints: config.endpoints.clone(),
identifiers: config.node_ids.clone(),
cluster_metadata: HashMap::new(),
}
} else {
proto::JoinResponse {
sender: self.host_addr.clone(),
status: JoinStatus::ConfigChanged,
config_id: self.view.get_current_config_id(),
endpoints: vec![],
identifiers: vec![],
cluster_metadata: HashMap::new(),
}
};
self.messages
.push_back((from, proto::ResponseKind::Join(response).into()));
}
}
// Invoked by observers of a node for failure detection
fn handle_probe_message(&self) -> Response {
Response::new_probe(NodeStatus::Up) // TODO: FIXME THIS IS WRONG
}
// Receives edge update events and delivers them to the cut detector to check if it will
// return a valid proposal.
//
// Edge update messages that do not affect the ongoing proposal need to be dropped.
fn handle_batched_alert_message(&mut self, msg_batch: BatchedAlertMessage) {
let current_config_id = self.view.get_current_config_id();
let size = self.view.get_membership_size();
let mut proposal: Vec<Endpoint> = msg_batch
.alerts
.iter()
// filter out messages which violate membership invariants
// And then run the cut detector to see if there is a new proposal
.filter_map(|message| {
if!self.filter_alert_messages(&msg_batch, message, size, current_config_id) {
return None;
}
Some(self.cut_detector.aggregate(message))
})
.flatten()
.collect();
proposal.extend(self.cut_detector.invalidate_failing_edges(&mut self.view));
if!proposal.is_empty() {
self.announced_proposal = true;
self.event_tx
.send(Event::ViewChangeProposal(
self.create_node_status_change_list(proposal.clone()),
))
.expect("Unable to send response");
// TODO: make paxos syncrhonous
// self.paxos.propose(proposal, scheduler).await?
}
}
fn create_node_status_change_list(&self, proposal: Vec<Endpoint>) -> Vec<NodeStatusChange> {
proposal
.iter()
.map(|node| NodeStatusChange {
endpoint: node.to_string(),
status: if self.view.is_host_present(node) {
EdgeStatus::Down
} else {
EdgeStatus::Up
},
metadata: Metadata::default(),
})
.collect()
}
// Filter for removing invalid edge update messages. These include messages
// that were for a configuration that the current node is not a part of, and messages
// that violate teh semantics of being a part of a configuration
fn filter_alert_messages(
&mut self,
_message_batch: &BatchedAlertMessage, // Might require this later for loggign
message: &Alert,
_size: usize,
config_id: ConfigId,
) -> bool {
let dst = &message.dst;
if config_id!= message.config_id {
return false;
}
// An invariant to maintain is that a node can only go into the membership set once
// and leave it once
if message.edge_status == EdgeStatus::Down &&!self.view.is_host_present(&dst) {
return false;
}
if message.edge_status == EdgeStatus::Up {
// Add joiner data after the node is done being added to the set. Store in a
// temp location for now.
self.joiner_data.insert(
dst.clone(),
(
message.node_id.clone().take().unwrap(),
message.metadata.clone().take().unwrap(),
),
);
}
true
}
pub fn create_failure_detectors(
&mut self,
scheduler: &mut Scheduler,
) -> Result<mpsc::Receiver<(Endpoint, ConfigId)>> {
todo!()
// let (tx, rx) = mpsc::channel(1000);
// for subject in self.view.get_subjects(&self.host_addr)? {
// let (mon_tx, mon_rx) = oneshot::channel();
// let fut = self.monitor.monitor(
// subject.clone(),
// client.clone(),
// self.view.get_current_config_id(),
// tx.clone(),
// mon_rx,
// );
// scheduler.push(Box::pin(fut.map(|_| SchedulerEvents::None)));
// self.monitor_cancellers.push(mon_tx);
// }
// Ok(rx)
}
#[allow(dead_code)]
pub fn edge_failure_notification(&mut self, subject: Endpoint, config_id: ConfigId) {
if config_id!= self.view.get_current_config_id() {
// TODO: Figure out why &String does not impl Value
// info!(
// target: "Failure notification from old config.",
// subject = subject,
// config = self.view.get_current_config_id(),
// old_config = config_id
// );
//
return;
}
let alert = proto::Alert {
src: self.host_addr.clone(),
dst: subject.clone(),
edge_status: proto::EdgeStatus::Down,
config_id,
node_id: None,
ring_number: self
.view
.get_ring_numbers(&self.host_addr, &subject)
.expect("Unable to get ring number"),
metadata: None,
};
self.enqueue_alert(alert);
}
pub fn get_batch_alerts(&mut self) -> Option<proto::BatchedAlertMessage> {
if!self.alerts.is_empty()
&& (Instant::now() - self.last_enqueued_alert) > self.batch_window
{
let alerts = self.alerts.drain(..).collect();
Some(proto::BatchedAlertMessage {
sender: self.host_addr.clone(),
alerts,
})
} else {
None
}
}
pub fn enqueue_alert(&mut self, alert: proto::Alert) {
self.last_enqueued_alert = Instant::now();
self.alerts.push_back(alert);
}
/// This is invoked when the consensus module decides on a proposal
///
/// Any node that is not in the membership list will be added to the cluster,
/// and any node that is currently in the membership list, but not in the proposal
/// will be removed.
pub fn on_decide(&mut self, proposal: Vec<Endpoint>) {
// TODO: Handle metadata updates
// TODO: Handle subscriptions
self.cancel_failure_detectors();
for node in &proposal {
if self.view.is_host_present(&node) {
self.view.ring_delete(&node);
} else if let Some((node_id, _metadata)) = self.joiner_data.remove(node) {
self.view.ring_add(node.clone(), node_id);
} else {
panic!("Node not present in pre-join metadata")
}
}
let _current_config_id = self.view.get_current_config_id();
// clear data structures
self.cut_detector.clear();
self.announced_proposal = false;
if self.view.is_host_present(&self.host_addr) {
// TODO: inform edge failure detector about config change
} else {
// We need to gracefully exit by calling a user handler and invalidating the current
// session
unimplemented!("How do you manage a callback again?");
}
// TODO: Instantiate new consensus instance
// self.paxos = FastPaxos::new(self.host_addr, self.view.get_membership_size(), )
self.respond_to_joiners(proposal);
}
fn | (&mut self) {
for signal in self.monitor_cancellers.drain(..) {
let _ = signal.send(());
}
}
fn respond_to_joiners(&mut self, proposal: Vec<Endpoint>) {
let configuration = self.view.get_config();
let join_res = JoinResponse {
sender: self.host_addr.clone(),
status: JoinStatus::SafeToJoin,
config_id: configuration.config_id(),
endpoints: configuration.endpoints.clone(),
identifiers: configuration.node_ids.clone(),
cluster_metadata: HashMap::new(), // TODO: metadata manager
};
for node in proposal {
self.messages
.push_back((node, proto::ResponseKind::Join(join_res.clone()).into()));
// self.joiners_to_respond.remove(&node).and_then(|joiners| {
// joiners.into_iter().for_each(|joiner| {
// joiner
// .send(Ok(Response::new_join(join_res.clone())))
// .expect("Unable to send response");
// });
// // This is so the compiler can infer the type of the closure to be Option<()>
// Some(())
// });
}
}
pub fn drain_messages(&mut self) -> Vec<(Endpoint, Message)> {
let mut msgs = Vec::new();
while let Some(msg) = self.messages.pop_front() {
msgs.push(msg);
}
msgs
}
}
| cancel_failure_detectors | identifier_name |
mod.rs | pub mod cut_detector;
pub mod ring;
pub mod view;
use crate::{
common::{ConfigId, Endpoint, NodeId, Scheduler, SchedulerEvents},
consensus::FastPaxos,
error::Result,
event::{Event, NodeStatusChange},
monitor::Monitor,
transport::{
proto::{
self, Alert, BatchedAlertMessage, EdgeStatus, JoinMessage, JoinResponse, JoinStatus,
Metadata, NodeStatus, PreJoinMessage,
},
Message, Request, Response,
},
};
use cut_detector::CutDetector;
use view::View;
use futures::FutureExt;
use std::{
collections::{HashMap, VecDeque},
time::{Duration, Instant},
};
use tokio::sync::{broadcast, mpsc, oneshot};
use tracing::info;
type OutboundResponse = oneshot::Sender<crate::Result<Response>>;
#[derive(Debug)]
pub struct Membership<M> {
host_addr: Endpoint,
view: View,
cut_detector: CutDetector,
monitor: M,
alerts: VecDeque<proto::Alert>,
last_enqueued_alert: Instant,
joiners_to_respond: Vec<Endpoint>,
// joiners_to_respond: HashMap<Endpoint, VecDeque<OutboundResponse>>,
batch_window: Duration,
paxos: FastPaxos,
announced_proposal: bool,
joiner_data: HashMap<Endpoint, (NodeId, Metadata)>,
event_tx: broadcast::Sender<Event>,
monitor_cancellers: Vec<oneshot::Sender<()>>,
messages: VecDeque<(Endpoint, Message)>,
}
impl<M: Monitor> Membership<M> {
#[allow(dead_code)]
pub fn new(
host_addr: Endpoint,
view: View,
cut_detector: CutDetector,
monitor: M,
event_tx: broadcast::Sender<Event>,
) -> Self {
// TODO: setup startup tasks
let paxos = FastPaxos::new(
host_addr.clone(),
view.get_membership_size(),
view.get_current_config_id(),
);
Self {
host_addr,
view,
cut_detector,
monitor,
paxos,
alerts: VecDeque::default(),
last_enqueued_alert: Instant::now(),
joiners_to_respond: Vec::new(),
batch_window: Duration::new(10, 0),
announced_proposal: false,
joiner_data: HashMap::default(),
monitor_cancellers: vec![],
event_tx,
messages: VecDeque::new(),
}
}
#[allow(dead_code)]
fn send_initial_notification(&self) {
self.event_tx
.send(Event::ViewChange(self.get_inititial_view_changes()))
.expect("Unable to send response");
}
fn get_inititial_view_changes(&self) -> Vec<NodeStatusChange> |
pub fn view(&self) -> Vec<&Endpoint> {
self.view
.get_ring(0)
.expect("There is always a ring!")
.iter()
.collect()
}
pub fn step(&mut self, from: Endpoint, msg: proto::RequestKind) {
use proto::RequestKind::*;
match msg {
PreJoin(msg) => self.handle_pre_join(from, msg),
Join(msg) => self.handle_join(from, msg),
BatchedAlert(msg) => self.handle_batched_alert_message(msg),
Consensus(msg) => {
let view = self
.view
.get_ring(0)
.expect("Ring zero should always exist")
.iter()
.collect();
let msgs = self.paxos.step(msg, view);
self.messages.extend(msgs);
}
_ => todo!("request type not implemented yet"),
}
}
pub fn start_classic_round(&mut self) -> Result<()> {
// TODO: make paxos syncrhonous
// self.paxos.start_classic_round()
todo!()
}
pub fn handle_pre_join(&mut self, from: Endpoint, msg: PreJoinMessage) {
let PreJoinMessage {
sender, node_id,..
} = msg;
let status = self.view.is_safe_to_join(&sender, &node_id);
let config_id = self.view.get_config().config_id();
let endpoints =
if status == JoinStatus::SafeToJoin || status == JoinStatus::HostnameAlreadyInRing {
self.view.get_expected_observers(&sender)
} else {
Vec::new()
};
let join_res = JoinResponse {
sender,
status,
config_id,
endpoints,
identifiers: Vec::new(),
cluster_metadata: HashMap::new(),
};
info!(
message = "Join at seed.",
seed = %self.host_addr,
sender = %join_res.sender,
config = %join_res.config_id,
size = %self.view.get_membership_size()
);
self.messages
.push_back((from, proto::ResponseKind::Join(join_res).into()));
}
pub fn handle_join(&mut self, from: Endpoint, msg: JoinMessage) {
if msg.config_id == self.view.get_current_config_id() {
let config = self.view.get_config();
// TODO: do we still need to do this?
// self.joiners_to_respond
// .entry(msg.sender.clone())
// .or_insert_with(VecDeque::new)
// .push_back(from);
let alert = proto::Alert {
src: self.host_addr.clone(),
dst: msg.sender.clone(),
edge_status: proto::EdgeStatus::Up,
config_id: config.config_id(),
node_id: Some(msg.node_id.clone()),
ring_number: msg.ring_number,
metadata: None,
};
self.enqueue_alert(alert);
} else {
// This is the case where the config changed between phase 1
// and phase 2 of the join process.
let response = if self.view.is_host_present(&msg.sender)
&& self.view.is_node_id_present(&msg.node_id)
{
let config = self.view.get_config();
// Race condition where a observer already crossed H messages for the joiner and
// changed the configuration, but the JoinPhase2 message shows up at the observer
// after it has already added the joiner. In this case, simply tell the joiner it's
// safe to join
proto::JoinResponse {
sender: self.host_addr.clone(),
status: JoinStatus::SafeToJoin,
config_id: config.config_id(),
endpoints: config.endpoints.clone(),
identifiers: config.node_ids.clone(),
cluster_metadata: HashMap::new(),
}
} else {
proto::JoinResponse {
sender: self.host_addr.clone(),
status: JoinStatus::ConfigChanged,
config_id: self.view.get_current_config_id(),
endpoints: vec![],
identifiers: vec![],
cluster_metadata: HashMap::new(),
}
};
self.messages
.push_back((from, proto::ResponseKind::Join(response).into()));
}
}
// Invoked by observers of a node for failure detection
fn handle_probe_message(&self) -> Response {
Response::new_probe(NodeStatus::Up) // TODO: FIXME THIS IS WRONG
}
// Receives edge update events and delivers them to the cut detector to check if it will
// return a valid proposal.
//
// Edge update messages that do not affect the ongoing proposal need to be dropped.
fn handle_batched_alert_message(&mut self, msg_batch: BatchedAlertMessage) {
let current_config_id = self.view.get_current_config_id();
let size = self.view.get_membership_size();
let mut proposal: Vec<Endpoint> = msg_batch
.alerts
.iter()
// filter out messages which violate membership invariants
// And then run the cut detector to see if there is a new proposal
.filter_map(|message| {
if!self.filter_alert_messages(&msg_batch, message, size, current_config_id) {
return None;
}
Some(self.cut_detector.aggregate(message))
})
.flatten()
.collect();
proposal.extend(self.cut_detector.invalidate_failing_edges(&mut self.view));
if!proposal.is_empty() {
self.announced_proposal = true;
self.event_tx
.send(Event::ViewChangeProposal(
self.create_node_status_change_list(proposal.clone()),
))
.expect("Unable to send response");
// TODO: make paxos syncrhonous
// self.paxos.propose(proposal, scheduler).await?
}
}
fn create_node_status_change_list(&self, proposal: Vec<Endpoint>) -> Vec<NodeStatusChange> {
proposal
.iter()
.map(|node| NodeStatusChange {
endpoint: node.to_string(),
status: if self.view.is_host_present(node) {
EdgeStatus::Down
} else {
EdgeStatus::Up
},
metadata: Metadata::default(),
})
.collect()
}
// Filter for removing invalid edge update messages. These include messages
// that were for a configuration that the current node is not a part of, and messages
// that violate teh semantics of being a part of a configuration
fn filter_alert_messages(
&mut self,
_message_batch: &BatchedAlertMessage, // Might require this later for loggign
message: &Alert,
_size: usize,
config_id: ConfigId,
) -> bool {
let dst = &message.dst;
if config_id!= message.config_id {
return false;
}
// An invariant to maintain is that a node can only go into the membership set once
// and leave it once
if message.edge_status == EdgeStatus::Down &&!self.view.is_host_present(&dst) {
return false;
}
if message.edge_status == EdgeStatus::Up {
// Add joiner data after the node is done being added to the set. Store in a
// temp location for now.
self.joiner_data.insert(
dst.clone(),
(
message.node_id.clone().take().unwrap(),
message.metadata.clone().take().unwrap(),
),
);
}
true
}
pub fn create_failure_detectors(
&mut self,
scheduler: &mut Scheduler,
) -> Result<mpsc::Receiver<(Endpoint, ConfigId)>> {
todo!()
// let (tx, rx) = mpsc::channel(1000);
// for subject in self.view.get_subjects(&self.host_addr)? {
// let (mon_tx, mon_rx) = oneshot::channel();
// let fut = self.monitor.monitor(
// subject.clone(),
// client.clone(),
// self.view.get_current_config_id(),
// tx.clone(),
// mon_rx,
// );
// scheduler.push(Box::pin(fut.map(|_| SchedulerEvents::None)));
// self.monitor_cancellers.push(mon_tx);
// }
// Ok(rx)
}
#[allow(dead_code)]
pub fn edge_failure_notification(&mut self, subject: Endpoint, config_id: ConfigId) {
if config_id!= self.view.get_current_config_id() {
// TODO: Figure out why &String does not impl Value
// info!(
// target: "Failure notification from old config.",
// subject = subject,
// config = self.view.get_current_config_id(),
// old_config = config_id
// );
//
return;
}
let alert = proto::Alert {
src: self.host_addr.clone(),
dst: subject.clone(),
edge_status: proto::EdgeStatus::Down,
config_id,
node_id: None,
ring_number: self
.view
.get_ring_numbers(&self.host_addr, &subject)
.expect("Unable to get ring number"),
metadata: None,
};
self.enqueue_alert(alert);
}
pub fn get_batch_alerts(&mut self) -> Option<proto::BatchedAlertMessage> {
if!self.alerts.is_empty()
&& (Instant::now() - self.last_enqueued_alert) > self.batch_window
{
let alerts = self.alerts.drain(..).collect();
Some(proto::BatchedAlertMessage {
sender: self.host_addr.clone(),
alerts,
})
} else {
None
}
}
pub fn enqueue_alert(&mut self, alert: proto::Alert) {
self.last_enqueued_alert = Instant::now();
self.alerts.push_back(alert);
}
/// This is invoked when the consensus module decides on a proposal
///
/// Any node that is not in the membership list will be added to the cluster,
/// and any node that is currently in the membership list, but not in the proposal
/// will be removed.
pub fn on_decide(&mut self, proposal: Vec<Endpoint>) {
// TODO: Handle metadata updates
// TODO: Handle subscriptions
self.cancel_failure_detectors();
for node in &proposal {
if self.view.is_host_present(&node) {
self.view.ring_delete(&node);
} else if let Some((node_id, _metadata)) = self.joiner_data.remove(node) {
self.view.ring_add(node.clone(), node_id);
} else {
panic!("Node not present in pre-join metadata")
}
}
let _current_config_id = self.view.get_current_config_id();
// clear data structures
self.cut_detector.clear();
self.announced_proposal = false;
if self.view.is_host_present(&self.host_addr) {
// TODO: inform edge failure detector about config change
} else {
// We need to gracefully exit by calling a user handler and invalidating the current
// session
unimplemented!("How do you manage a callback again?");
}
// TODO: Instantiate new consensus instance
// self.paxos = FastPaxos::new(self.host_addr, self.view.get_membership_size(), )
self.respond_to_joiners(proposal);
}
fn cancel_failure_detectors(&mut self) {
for signal in self.monitor_cancellers.drain(..) {
let _ = signal.send(());
}
}
fn respond_to_joiners(&mut self, proposal: Vec<Endpoint>) {
let configuration = self.view.get_config();
let join_res = JoinResponse {
sender: self.host_addr.clone(),
status: JoinStatus::SafeToJoin,
config_id: configuration.config_id(),
endpoints: configuration.endpoints.clone(),
identifiers: configuration.node_ids.clone(),
cluster_metadata: HashMap::new(), // TODO: metadata manager
};
for node in proposal {
self.messages
.push_back((node, proto::ResponseKind::Join(join_res.clone()).into()));
// self.joiners_to_respond.remove(&node).and_then(|joiners| {
// joiners.into_iter().for_each(|joiner| {
// joiner
// .send(Ok(Response::new_join(join_res.clone())))
// .expect("Unable to send response");
// });
// // This is so the compiler can infer the type of the closure to be Option<()>
// Some(())
// });
}
}
pub fn drain_messages(&mut self) -> Vec<(Endpoint, Message)> {
let mut msgs = Vec::new();
while let Some(msg) = self.messages.pop_front() {
msgs.push(msg);
}
msgs
}
}
| {
let nodes = self.view.get_ring(0);
nodes
.iter()
.map(|_| NodeStatusChange {
endpoint: self.host_addr.clone(),
status: EdgeStatus::Up,
metadata: Metadata::default(),
})
.collect()
} | identifier_body |
font.rs | use prelude::*;
use core::{self, Layer, Context, Color, Point2, Rect};
use core::builder::*;
use rusttype;
use backends::backend;
use font_loader::system_fonts;
static FONT_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
/// A font used for writing on a [`Layer`](struct.Layer.html).
///
/// Use [`Font::builder()`](#method.builder) to create a new font from a registered system font or
/// a local file. The [`Font::from_file()`](#method.from_file) is a shortcut to achieve the latter.
///
/// In addition to the usual properties of a font, radiant also assigns a fixed size
/// to each font object. Instead of modifying this value, you can clone a new font
/// with a different size using [`Font::with_size()`](struct.Font.html#method.with_size).
#[derive(Clone)]
pub struct Font {
data : Vec<u8>,
font_id : usize,
size : f32,
context : Context,
}
impl Debug for Font {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Font")
.field("data_len", &self.data.len())
.field("font_id", &self.font_id)
.field("size", &self.size)
.finish()
}
}
impl Font {
/// Returns a [font builder](support/struct.FontBuilder.html) for font construction.
///
/// # Examples
///
/// ```rust
/// # use radiant_rs::*;
/// # let display = Display::builder().hidden().build().unwrap();
/// # let renderer = Renderer::new(&display).unwrap();
/// # let context = display.context();
/// let my_font = Font::builder(&context).family("Arial").size(16.0).build().unwrap();
/// ```
pub fn builder(context: &Context) -> FontBuilder {
FontBuilder::new(context)
}
/// Creates a font instance from a file.
pub fn from_file(context: &Context, file: &str) -> core::Result<Font> {
use std::io::Read;
let mut f = File::open(Path::new(file))?;
let mut font_data = Vec::new();
f.read_to_end(&mut font_data)?;
Ok(Self::create(context, font_data, 12.0))
}
/// Returns the names of all available system fonts.
pub fn query_all() -> Vec<String> {
system_fonts::query_all()
}
/// Returns a query builder to retrieve the names of all available system fonts with the given properties (e.g. monospace).
///
/// # Examples
///
/// ```rust
/// # use radiant_rs::*;
/// let monospace_fonts = Font::query().monospace().italic().fetch();
/// ```
pub fn query() -> FontQueryBuilder {
FontQueryBuilder::new()
}
/// Returns a new font instance with given size.
pub fn clone_with_size(self: &Self, size: f32) -> Font {
let mut font = (*self).clone();
font.size = size;
font
}
/// Write to given layer.
pub fn write<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color) -> &Font where Point2<f32>: From<T> {
let position = Point2::from(position);
self.write_paragraph(layer, text, position.0, position.1, 0.0, color, 0.0, 1.0, 1.0);
self
}
/// Write to given layer. Breaks lines after max_width pixels.
pub fn write_wrapped<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32) -> &Font where Point2<f32>: From<T> {
let position = Point2::from(position);
self.write_paragraph(layer, text, position.0, position.1, max_width, color, 0.0, 1.0, 1.0);
self
}
/// Write to given layer. Breaks lines after max_width pixels and applies given rotation and scaling.
pub fn write_transformed<T, U>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32, rotation: f32, scale: U) -> &Font where Point2<f32>: From<T>+From<U> {
let position = Point2::from(position);
let scale = Point2::from(scale);
self.write_paragraph(layer, text, position.0, position.1, max_width, color, rotation, scale.0, scale.1);
self
}
/// Returns the font wrapped in an std::Arc.
pub fn arc(self: Self) -> Arc<Self> {
Arc::new(self)
}
/// Returns the names of all available system fonts with the given properties (e.g. monospace).
pub(crate) fn query_specific(info: FontInfo) -> Vec<String> {
system_fonts::query_specific(&mut Self::build_property(&info))
}
/// Creates a new font instance from given FontInfo struct.
pub(crate) fn from_info(context: &Context, info: FontInfo) -> core::Result<Font> {
if let Some((font_data, _)) = system_fonts::get(&Self::build_property(&info)) {
Ok(Self::create(context, font_data, info.size))
} else {
Err(core::Error::FontError("Failed to get system font".to_string()))
}
}
/// Creates a new unique font
fn | (context: &Context, font_data: Vec<u8>, size: f32) -> Font {
Font {
data : font_data,
font_id : FONT_COUNTER.fetch_add(1, Ordering::Relaxed),
size : size,
context : context.clone(),
}
}
/// Write text to given layer using given font
fn write_paragraph(self: &Self, layer: &Layer, text: &str, x: f32, y: f32, max_width: f32, color: Color, rotation: f32, scale_x: f32, scale_y: f32) {
//!todo probably expensive, but rusttype is completely opaque. would be nice to be able to store Font::info outside of a "may or may not own" container
let rt_font = rusttype::FontCollection::from_bytes(&self.data[..]).unwrap().into_font().unwrap();
let bucket_id = 0;
let glyphs = Self::layout_paragraph(&rt_font, rusttype::Scale::uniform(self.size), max_width, &text);
let context = self.context.lock();
context.font_cache.queue(self.font_id, &glyphs);
let anchor = (0., 0.);
let scale = (scale_x, scale_y);
let cos_rot = rotation.cos();
let sin_rot = rotation.sin();
for glyph in &glyphs {
if let Some((uv, pos, dim)) = context.font_cache.rect_for(self.font_id, glyph) {
let dist_x = pos.0 * scale_x;
let dist_y = pos.1 * scale_y;
let offset_x = x + dist_x * cos_rot - dist_y * sin_rot;
let offset_y = y + dist_x * sin_rot + dist_y * cos_rot;
layer.add_rect(None, bucket_id, 0, 1, uv, (offset_x, offset_y), anchor, dim, color, rotation, scale);
}
}
}
/// Layout a paragraph of glyphs
fn layout_paragraph<'a>(font: &'a rusttype::Font, scale: rusttype::Scale, width: f32, text: &str) -> Vec<rusttype::PositionedGlyph<'a>> {
use unicode_normalization::UnicodeNormalization;
let mut result = Vec::new();
let v_metrics = font.v_metrics(scale);
let advance_height = v_metrics.ascent - v_metrics.descent + v_metrics.line_gap;
let mut caret = rusttype::point(0.0, v_metrics.ascent);
let mut last_glyph_id = None;
for c in text.nfc() {
if c.is_control() {
match c {
'\n' => {
caret = rusttype::point(0.0, caret.y + advance_height);
},
_ => {}
}
continue;
}
let base_glyph = font.glyph(c);
if let Some(id) = last_glyph_id.take() {
caret.x += font.pair_kerning(scale, id, base_glyph.id());
}
last_glyph_id = Some(base_glyph.id());
let mut glyph = base_glyph.scaled(scale).positioned(caret);
if let Some(bb) = glyph.pixel_bounding_box() {
if width > 0.0 && bb.max.x > width as i32 {
caret = rusttype::point(0.0, caret.y + advance_height);
glyph = glyph.into_unpositioned().positioned(caret);
last_glyph_id = None;
}
}
caret.x += glyph.unpositioned().h_metrics().advance_width;
result.push(glyph);
}
result
}
/// Builds a FontProperty for the underlying system_fonts library
fn build_property(info: &FontInfo) -> system_fonts::FontProperty {
let mut property = system_fonts::FontPropertyBuilder::new();
if info.family!= "" {
property = property.family(&info.family);
}
if info.italic {
property = property.italic();
}
if info.oblique {
property = property.oblique();
}
if info.bold {
property = property.bold();
}
if info.monospace {
property = property.monospace();
}
property.build()
}
}
/// A wrapper around rusttype's font cache.
pub struct FontCache {
cache : Mutex<rusttype::gpu_cache::Cache<'static>>,
queue : Mutex<Vec<(Rect<u32>, Vec<u8>)>>,
dirty : AtomicBool,
}
impl FontCache {
/// Creates a new fontcache instant.
pub fn new(width: u32, height: u32, scale_tolerance: f32, position_tolerance: f32) -> FontCache {
let cache = rusttype::gpu_cache::CacheBuilder {
width,
height,
scale_tolerance,
position_tolerance,
pad_glyphs: true,
}.build();
FontCache {
cache: Mutex::new(cache),
queue: Mutex::new(Vec::new()),
dirty: AtomicBool::new(false),
}
}
/// Queues a glyph for caching.
pub fn queue(self: &Self, font_id: usize, glyphs: &[rusttype::PositionedGlyph]) {
let mut cache = self.cache.lock().unwrap();
let mut queue = self.queue.lock().unwrap();
let mut dirties = false;
for glyph in glyphs {
cache.queue_glyph(font_id, glyph.standalone());
}
cache.cache_queued(|rect, data| {
queue.push( ( ((rect.min.x, rect.min.y), (rect.max.x, rect.max.y)), data.to_vec() ) );
dirties = true;
}).unwrap();
if dirties {
self.dirty.store(dirties, Ordering::Relaxed);
}
}
/// Updates the font cache texture.
pub fn update(self: &Self, texture: &backend::Texture2d) {
if self.dirty.load(Ordering::Relaxed) {
let mut queue = self.queue.lock().unwrap();
for &(ref rect, ref data) in queue.deref() {
texture.write(rect, data);
}
queue.clear();
self.dirty.store(false, Ordering::Relaxed);
}
}
/// Returns a rectangle of uv coordinates for the given glyph as well as its offset and dimensions.
pub fn rect_for(self: &Self, font_id: usize, glyph: &rusttype::PositionedGlyph) -> Option<(Rect, Point2, Point2)> {
let cache = self.cache.lock().unwrap();
if let Ok(Some((uv_rect, screen_rect))) = cache.rect_for(font_id, glyph) {
let uv = ((uv_rect.min.x, uv_rect.min.y), (uv_rect.max.x, uv_rect.max.y));
let pos = (screen_rect.min.x as f32, screen_rect.min.y as f32);
let dim = ((screen_rect.max.x - screen_rect.min.x) as f32, (screen_rect.max.y - screen_rect.min.y) as f32);
Some((uv, pos, dim))
} else {
None
}
}
}
/// A struct used to filter the result of [`Font::query_specific()`](struct.Font.html#method.query_specific)
/// or to describe a [`Font`](struct.Font.html) to be created from a system font
/// via [`Font::from_info()`](struct.Font.html#method.from_info).
#[derive(Clone)]
pub struct FontInfo {
pub italic : bool,
pub oblique : bool,
pub bold : bool,
pub monospace : bool,
pub family : String,
pub size : f32,
}
impl Default for FontInfo {
fn default() -> FontInfo {
FontInfo {
italic : false,
oblique : false,
bold : false,
monospace : false,
family : "".to_string(),
size : 10.0,
}
}
}
| create | identifier_name |
font.rs | use prelude::*;
use core::{self, Layer, Context, Color, Point2, Rect};
use core::builder::*;
use rusttype;
use backends::backend;
use font_loader::system_fonts;
static FONT_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
/// A font used for writing on a [`Layer`](struct.Layer.html).
///
/// Use [`Font::builder()`](#method.builder) to create a new font from a registered system font or
/// a local file. The [`Font::from_file()`](#method.from_file) is a shortcut to achieve the latter.
///
/// In addition to the usual properties of a font, radiant also assigns a fixed size
/// to each font object. Instead of modifying this value, you can clone a new font
/// with a different size using [`Font::with_size()`](struct.Font.html#method.with_size).
#[derive(Clone)]
pub struct Font {
data : Vec<u8>,
font_id : usize,
size : f32,
context : Context,
}
impl Debug for Font {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Font")
.field("data_len", &self.data.len())
.field("font_id", &self.font_id)
.field("size", &self.size)
.finish()
}
}
impl Font {
/// Returns a [font builder](support/struct.FontBuilder.html) for font construction.
///
/// # Examples
///
/// ```rust
/// # use radiant_rs::*;
/// # let display = Display::builder().hidden().build().unwrap();
/// # let renderer = Renderer::new(&display).unwrap();
/// # let context = display.context();
/// let my_font = Font::builder(&context).family("Arial").size(16.0).build().unwrap();
/// ```
pub fn builder(context: &Context) -> FontBuilder {
FontBuilder::new(context)
}
/// Creates a font instance from a file.
pub fn from_file(context: &Context, file: &str) -> core::Result<Font> {
use std::io::Read;
let mut f = File::open(Path::new(file))?;
let mut font_data = Vec::new();
f.read_to_end(&mut font_data)?;
Ok(Self::create(context, font_data, 12.0))
}
/// Returns the names of all available system fonts.
pub fn query_all() -> Vec<String> {
system_fonts::query_all()
}
/// Returns a query builder to retrieve the names of all available system fonts with the given properties (e.g. monospace).
///
/// # Examples
///
/// ```rust
/// # use radiant_rs::*;
/// let monospace_fonts = Font::query().monospace().italic().fetch();
/// ```
pub fn query() -> FontQueryBuilder {
FontQueryBuilder::new()
}
/// Returns a new font instance with given size.
pub fn clone_with_size(self: &Self, size: f32) -> Font {
let mut font = (*self).clone();
font.size = size;
font
}
/// Write to given layer.
pub fn write<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color) -> &Font where Point2<f32>: From<T> {
let position = Point2::from(position);
self.write_paragraph(layer, text, position.0, position.1, 0.0, color, 0.0, 1.0, 1.0);
self
}
/// Write to given layer. Breaks lines after max_width pixels.
pub fn write_wrapped<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32) -> &Font where Point2<f32>: From<T> {
let position = Point2::from(position);
self.write_paragraph(layer, text, position.0, position.1, max_width, color, 0.0, 1.0, 1.0);
self
}
/// Write to given layer. Breaks lines after max_width pixels and applies given rotation and scaling.
pub fn write_transformed<T, U>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32, rotation: f32, scale: U) -> &Font where Point2<f32>: From<T>+From<U> {
let position = Point2::from(position);
let scale = Point2::from(scale);
self.write_paragraph(layer, text, position.0, position.1, max_width, color, rotation, scale.0, scale.1);
self
}
/// Returns the font wrapped in an std::Arc.
pub fn arc(self: Self) -> Arc<Self> {
Arc::new(self)
}
/// Returns the names of all available system fonts with the given properties (e.g. monospace).
pub(crate) fn query_specific(info: FontInfo) -> Vec<String> {
system_fonts::query_specific(&mut Self::build_property(&info))
}
/// Creates a new font instance from given FontInfo struct.
pub(crate) fn from_info(context: &Context, info: FontInfo) -> core::Result<Font> |
/// Creates a new unique font
fn create(context: &Context, font_data: Vec<u8>, size: f32) -> Font {
Font {
data : font_data,
font_id : FONT_COUNTER.fetch_add(1, Ordering::Relaxed),
size : size,
context : context.clone(),
}
}
/// Write text to given layer using given font
fn write_paragraph(self: &Self, layer: &Layer, text: &str, x: f32, y: f32, max_width: f32, color: Color, rotation: f32, scale_x: f32, scale_y: f32) {
//!todo probably expensive, but rusttype is completely opaque. would be nice to be able to store Font::info outside of a "may or may not own" container
let rt_font = rusttype::FontCollection::from_bytes(&self.data[..]).unwrap().into_font().unwrap();
let bucket_id = 0;
let glyphs = Self::layout_paragraph(&rt_font, rusttype::Scale::uniform(self.size), max_width, &text);
let context = self.context.lock();
context.font_cache.queue(self.font_id, &glyphs);
let anchor = (0., 0.);
let scale = (scale_x, scale_y);
let cos_rot = rotation.cos();
let sin_rot = rotation.sin();
for glyph in &glyphs {
if let Some((uv, pos, dim)) = context.font_cache.rect_for(self.font_id, glyph) {
let dist_x = pos.0 * scale_x;
let dist_y = pos.1 * scale_y;
let offset_x = x + dist_x * cos_rot - dist_y * sin_rot;
let offset_y = y + dist_x * sin_rot + dist_y * cos_rot;
layer.add_rect(None, bucket_id, 0, 1, uv, (offset_x, offset_y), anchor, dim, color, rotation, scale);
}
}
}
/// Layout a paragraph of glyphs
fn layout_paragraph<'a>(font: &'a rusttype::Font, scale: rusttype::Scale, width: f32, text: &str) -> Vec<rusttype::PositionedGlyph<'a>> {
use unicode_normalization::UnicodeNormalization;
let mut result = Vec::new();
let v_metrics = font.v_metrics(scale);
let advance_height = v_metrics.ascent - v_metrics.descent + v_metrics.line_gap;
let mut caret = rusttype::point(0.0, v_metrics.ascent);
let mut last_glyph_id = None;
for c in text.nfc() {
if c.is_control() {
match c {
'\n' => {
caret = rusttype::point(0.0, caret.y + advance_height);
},
_ => {}
}
continue;
}
let base_glyph = font.glyph(c);
if let Some(id) = last_glyph_id.take() {
caret.x += font.pair_kerning(scale, id, base_glyph.id());
}
last_glyph_id = Some(base_glyph.id());
let mut glyph = base_glyph.scaled(scale).positioned(caret);
if let Some(bb) = glyph.pixel_bounding_box() {
if width > 0.0 && bb.max.x > width as i32 {
caret = rusttype::point(0.0, caret.y + advance_height);
glyph = glyph.into_unpositioned().positioned(caret);
last_glyph_id = None;
}
}
caret.x += glyph.unpositioned().h_metrics().advance_width;
result.push(glyph);
}
result
}
/// Builds a FontProperty for the underlying system_fonts library
fn build_property(info: &FontInfo) -> system_fonts::FontProperty {
let mut property = system_fonts::FontPropertyBuilder::new();
if info.family!= "" {
property = property.family(&info.family);
}
if info.italic {
property = property.italic();
}
if info.oblique {
property = property.oblique();
}
if info.bold {
property = property.bold();
}
if info.monospace {
property = property.monospace();
}
property.build()
}
}
/// A wrapper around rusttype's font cache.
pub struct FontCache {
cache : Mutex<rusttype::gpu_cache::Cache<'static>>,
queue : Mutex<Vec<(Rect<u32>, Vec<u8>)>>,
dirty : AtomicBool,
}
impl FontCache {
/// Creates a new fontcache instant.
pub fn new(width: u32, height: u32, scale_tolerance: f32, position_tolerance: f32) -> FontCache {
let cache = rusttype::gpu_cache::CacheBuilder {
width,
height,
scale_tolerance,
position_tolerance,
pad_glyphs: true,
}.build();
FontCache {
cache: Mutex::new(cache),
queue: Mutex::new(Vec::new()),
dirty: AtomicBool::new(false),
}
}
/// Queues a glyph for caching.
pub fn queue(self: &Self, font_id: usize, glyphs: &[rusttype::PositionedGlyph]) {
let mut cache = self.cache.lock().unwrap();
let mut queue = self.queue.lock().unwrap();
let mut dirties = false;
for glyph in glyphs {
cache.queue_glyph(font_id, glyph.standalone());
}
cache.cache_queued(|rect, data| {
queue.push( ( ((rect.min.x, rect.min.y), (rect.max.x, rect.max.y)), data.to_vec() ) );
dirties = true;
}).unwrap();
if dirties {
self.dirty.store(dirties, Ordering::Relaxed);
}
}
/// Updates the font cache texture.
pub fn update(self: &Self, texture: &backend::Texture2d) {
if self.dirty.load(Ordering::Relaxed) {
let mut queue = self.queue.lock().unwrap();
for &(ref rect, ref data) in queue.deref() {
texture.write(rect, data);
}
queue.clear();
self.dirty.store(false, Ordering::Relaxed);
}
}
/// Returns a rectangle of uv coordinates for the given glyph as well as its offset and dimensions.
pub fn rect_for(self: &Self, font_id: usize, glyph: &rusttype::PositionedGlyph) -> Option<(Rect, Point2, Point2)> {
let cache = self.cache.lock().unwrap();
if let Ok(Some((uv_rect, screen_rect))) = cache.rect_for(font_id, glyph) {
let uv = ((uv_rect.min.x, uv_rect.min.y), (uv_rect.max.x, uv_rect.max.y));
let pos = (screen_rect.min.x as f32, screen_rect.min.y as f32);
let dim = ((screen_rect.max.x - screen_rect.min.x) as f32, (screen_rect.max.y - screen_rect.min.y) as f32);
Some((uv, pos, dim))
} else {
None
}
}
}
/// A struct used to filter the result of [`Font::query_specific()`](struct.Font.html#method.query_specific)
/// or to describe a [`Font`](struct.Font.html) to be created from a system font
/// via [`Font::from_info()`](struct.Font.html#method.from_info).
#[derive(Clone)]
pub struct FontInfo {
pub italic : bool,
pub oblique : bool,
pub bold : bool,
pub monospace : bool,
pub family : String,
pub size : f32,
}
impl Default for FontInfo {
fn default() -> FontInfo {
FontInfo {
italic : false,
oblique : false,
bold : false,
monospace : false,
family : "".to_string(),
size : 10.0,
}
}
}
| {
if let Some((font_data, _)) = system_fonts::get(&Self::build_property(&info)) {
Ok(Self::create(context, font_data, info.size))
} else {
Err(core::Error::FontError("Failed to get system font".to_string()))
}
} | identifier_body |
font.rs | use prelude::*;
use core::{self, Layer, Context, Color, Point2, Rect};
use core::builder::*;
use rusttype;
use backends::backend;
use font_loader::system_fonts;
static FONT_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
/// A font used for writing on a [`Layer`](struct.Layer.html).
///
/// Use [`Font::builder()`](#method.builder) to create a new font from a registered system font or
/// a local file. The [`Font::from_file()`](#method.from_file) is a shortcut to achieve the latter.
///
/// In addition to the usual properties of a font, radiant also assigns a fixed size
/// to each font object. Instead of modifying this value, you can clone a new font
/// with a different size using [`Font::with_size()`](struct.Font.html#method.with_size).
#[derive(Clone)]
pub struct Font {
data : Vec<u8>,
font_id : usize,
size : f32,
context : Context,
}
impl Debug for Font {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Font")
.field("data_len", &self.data.len())
.field("font_id", &self.font_id) | .finish()
}
}
impl Font {
/// Returns a [font builder](support/struct.FontBuilder.html) for font construction.
///
/// # Examples
///
/// ```rust
/// # use radiant_rs::*;
/// # let display = Display::builder().hidden().build().unwrap();
/// # let renderer = Renderer::new(&display).unwrap();
/// # let context = display.context();
/// let my_font = Font::builder(&context).family("Arial").size(16.0).build().unwrap();
/// ```
pub fn builder(context: &Context) -> FontBuilder {
FontBuilder::new(context)
}
/// Creates a font instance from a file.
pub fn from_file(context: &Context, file: &str) -> core::Result<Font> {
use std::io::Read;
let mut f = File::open(Path::new(file))?;
let mut font_data = Vec::new();
f.read_to_end(&mut font_data)?;
Ok(Self::create(context, font_data, 12.0))
}
/// Returns the names of all available system fonts.
pub fn query_all() -> Vec<String> {
system_fonts::query_all()
}
/// Returns a query builder to retrieve the names of all available system fonts with the given properties (e.g. monospace).
///
/// # Examples
///
/// ```rust
/// # use radiant_rs::*;
/// let monospace_fonts = Font::query().monospace().italic().fetch();
/// ```
pub fn query() -> FontQueryBuilder {
FontQueryBuilder::new()
}
/// Returns a new font instance with given size.
pub fn clone_with_size(self: &Self, size: f32) -> Font {
let mut font = (*self).clone();
font.size = size;
font
}
/// Write to given layer.
pub fn write<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color) -> &Font where Point2<f32>: From<T> {
let position = Point2::from(position);
self.write_paragraph(layer, text, position.0, position.1, 0.0, color, 0.0, 1.0, 1.0);
self
}
/// Write to given layer. Breaks lines after max_width pixels.
pub fn write_wrapped<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32) -> &Font where Point2<f32>: From<T> {
let position = Point2::from(position);
self.write_paragraph(layer, text, position.0, position.1, max_width, color, 0.0, 1.0, 1.0);
self
}
/// Write to given layer. Breaks lines after max_width pixels and applies given rotation and scaling.
pub fn write_transformed<T, U>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32, rotation: f32, scale: U) -> &Font where Point2<f32>: From<T>+From<U> {
let position = Point2::from(position);
let scale = Point2::from(scale);
self.write_paragraph(layer, text, position.0, position.1, max_width, color, rotation, scale.0, scale.1);
self
}
/// Returns the font wrapped in an std::Arc.
pub fn arc(self: Self) -> Arc<Self> {
Arc::new(self)
}
/// Returns the names of all available system fonts with the given properties (e.g. monospace).
pub(crate) fn query_specific(info: FontInfo) -> Vec<String> {
system_fonts::query_specific(&mut Self::build_property(&info))
}
/// Creates a new font instance from given FontInfo struct.
pub(crate) fn from_info(context: &Context, info: FontInfo) -> core::Result<Font> {
if let Some((font_data, _)) = system_fonts::get(&Self::build_property(&info)) {
Ok(Self::create(context, font_data, info.size))
} else {
Err(core::Error::FontError("Failed to get system font".to_string()))
}
}
/// Creates a new unique font
fn create(context: &Context, font_data: Vec<u8>, size: f32) -> Font {
Font {
data : font_data,
font_id : FONT_COUNTER.fetch_add(1, Ordering::Relaxed),
size : size,
context : context.clone(),
}
}
/// Write text to given layer using given font
fn write_paragraph(self: &Self, layer: &Layer, text: &str, x: f32, y: f32, max_width: f32, color: Color, rotation: f32, scale_x: f32, scale_y: f32) {
//!todo probably expensive, but rusttype is completely opaque. would be nice to be able to store Font::info outside of a "may or may not own" container
let rt_font = rusttype::FontCollection::from_bytes(&self.data[..]).unwrap().into_font().unwrap();
let bucket_id = 0;
let glyphs = Self::layout_paragraph(&rt_font, rusttype::Scale::uniform(self.size), max_width, &text);
let context = self.context.lock();
context.font_cache.queue(self.font_id, &glyphs);
let anchor = (0., 0.);
let scale = (scale_x, scale_y);
let cos_rot = rotation.cos();
let sin_rot = rotation.sin();
for glyph in &glyphs {
if let Some((uv, pos, dim)) = context.font_cache.rect_for(self.font_id, glyph) {
let dist_x = pos.0 * scale_x;
let dist_y = pos.1 * scale_y;
let offset_x = x + dist_x * cos_rot - dist_y * sin_rot;
let offset_y = y + dist_x * sin_rot + dist_y * cos_rot;
layer.add_rect(None, bucket_id, 0, 1, uv, (offset_x, offset_y), anchor, dim, color, rotation, scale);
}
}
}
/// Layout a paragraph of glyphs
fn layout_paragraph<'a>(font: &'a rusttype::Font, scale: rusttype::Scale, width: f32, text: &str) -> Vec<rusttype::PositionedGlyph<'a>> {
use unicode_normalization::UnicodeNormalization;
let mut result = Vec::new();
let v_metrics = font.v_metrics(scale);
let advance_height = v_metrics.ascent - v_metrics.descent + v_metrics.line_gap;
let mut caret = rusttype::point(0.0, v_metrics.ascent);
let mut last_glyph_id = None;
for c in text.nfc() {
if c.is_control() {
match c {
'\n' => {
caret = rusttype::point(0.0, caret.y + advance_height);
},
_ => {}
}
continue;
}
let base_glyph = font.glyph(c);
if let Some(id) = last_glyph_id.take() {
caret.x += font.pair_kerning(scale, id, base_glyph.id());
}
last_glyph_id = Some(base_glyph.id());
let mut glyph = base_glyph.scaled(scale).positioned(caret);
if let Some(bb) = glyph.pixel_bounding_box() {
if width > 0.0 && bb.max.x > width as i32 {
caret = rusttype::point(0.0, caret.y + advance_height);
glyph = glyph.into_unpositioned().positioned(caret);
last_glyph_id = None;
}
}
caret.x += glyph.unpositioned().h_metrics().advance_width;
result.push(glyph);
}
result
}
/// Builds a FontProperty for the underlying system_fonts library
fn build_property(info: &FontInfo) -> system_fonts::FontProperty {
let mut property = system_fonts::FontPropertyBuilder::new();
if info.family!= "" {
property = property.family(&info.family);
}
if info.italic {
property = property.italic();
}
if info.oblique {
property = property.oblique();
}
if info.bold {
property = property.bold();
}
if info.monospace {
property = property.monospace();
}
property.build()
}
}
/// A wrapper around rusttype's font cache.
pub struct FontCache {
cache : Mutex<rusttype::gpu_cache::Cache<'static>>,
queue : Mutex<Vec<(Rect<u32>, Vec<u8>)>>,
dirty : AtomicBool,
}
impl FontCache {
/// Creates a new fontcache instant.
pub fn new(width: u32, height: u32, scale_tolerance: f32, position_tolerance: f32) -> FontCache {
let cache = rusttype::gpu_cache::CacheBuilder {
width,
height,
scale_tolerance,
position_tolerance,
pad_glyphs: true,
}.build();
FontCache {
cache: Mutex::new(cache),
queue: Mutex::new(Vec::new()),
dirty: AtomicBool::new(false),
}
}
/// Queues a glyph for caching.
pub fn queue(self: &Self, font_id: usize, glyphs: &[rusttype::PositionedGlyph]) {
let mut cache = self.cache.lock().unwrap();
let mut queue = self.queue.lock().unwrap();
let mut dirties = false;
for glyph in glyphs {
cache.queue_glyph(font_id, glyph.standalone());
}
cache.cache_queued(|rect, data| {
queue.push( ( ((rect.min.x, rect.min.y), (rect.max.x, rect.max.y)), data.to_vec() ) );
dirties = true;
}).unwrap();
if dirties {
self.dirty.store(dirties, Ordering::Relaxed);
}
}
/// Updates the font cache texture.
pub fn update(self: &Self, texture: &backend::Texture2d) {
if self.dirty.load(Ordering::Relaxed) {
let mut queue = self.queue.lock().unwrap();
for &(ref rect, ref data) in queue.deref() {
texture.write(rect, data);
}
queue.clear();
self.dirty.store(false, Ordering::Relaxed);
}
}
/// Returns a rectangle of uv coordinates for the given glyph as well as its offset and dimensions.
pub fn rect_for(self: &Self, font_id: usize, glyph: &rusttype::PositionedGlyph) -> Option<(Rect, Point2, Point2)> {
let cache = self.cache.lock().unwrap();
if let Ok(Some((uv_rect, screen_rect))) = cache.rect_for(font_id, glyph) {
let uv = ((uv_rect.min.x, uv_rect.min.y), (uv_rect.max.x, uv_rect.max.y));
let pos = (screen_rect.min.x as f32, screen_rect.min.y as f32);
let dim = ((screen_rect.max.x - screen_rect.min.x) as f32, (screen_rect.max.y - screen_rect.min.y) as f32);
Some((uv, pos, dim))
} else {
None
}
}
}
/// A struct used to filter the result of [`Font::query_specific()`](struct.Font.html#method.query_specific)
/// or to describe a [`Font`](struct.Font.html) to be created from a system font
/// via [`Font::from_info()`](struct.Font.html#method.from_info).
#[derive(Clone)]
pub struct FontInfo {
pub italic : bool,
pub oblique : bool,
pub bold : bool,
pub monospace : bool,
pub family : String,
pub size : f32,
}
impl Default for FontInfo {
fn default() -> FontInfo {
FontInfo {
italic : false,
oblique : false,
bold : false,
monospace : false,
family : "".to_string(),
size : 10.0,
}
}
} | .field("size", &self.size) | random_line_split |
font.rs | use prelude::*;
use core::{self, Layer, Context, Color, Point2, Rect};
use core::builder::*;
use rusttype;
use backends::backend;
use font_loader::system_fonts;
static FONT_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
/// A font used for writing on a [`Layer`](struct.Layer.html).
///
/// Use [`Font::builder()`](#method.builder) to create a new font from a registered system font or
/// a local file. The [`Font::from_file()`](#method.from_file) is a shortcut to achieve the latter.
///
/// In addition to the usual properties of a font, radiant also assigns a fixed size
/// to each font object. Instead of modifying this value, you can clone a new font
/// with a different size using [`Font::with_size()`](struct.Font.html#method.with_size).
#[derive(Clone)]
pub struct Font {
data : Vec<u8>,
font_id : usize,
size : f32,
context : Context,
}
impl Debug for Font {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Font")
.field("data_len", &self.data.len())
.field("font_id", &self.font_id)
.field("size", &self.size)
.finish()
}
}
impl Font {
/// Returns a [font builder](support/struct.FontBuilder.html) for font construction.
///
/// # Examples
///
/// ```rust
/// # use radiant_rs::*;
/// # let display = Display::builder().hidden().build().unwrap();
/// # let renderer = Renderer::new(&display).unwrap();
/// # let context = display.context();
/// let my_font = Font::builder(&context).family("Arial").size(16.0).build().unwrap();
/// ```
pub fn builder(context: &Context) -> FontBuilder {
FontBuilder::new(context)
}
/// Creates a font instance from a file.
pub fn from_file(context: &Context, file: &str) -> core::Result<Font> {
use std::io::Read;
let mut f = File::open(Path::new(file))?;
let mut font_data = Vec::new();
f.read_to_end(&mut font_data)?;
Ok(Self::create(context, font_data, 12.0))
}
/// Returns the names of all available system fonts.
pub fn query_all() -> Vec<String> {
system_fonts::query_all()
}
/// Returns a query builder to retrieve the names of all available system fonts with the given properties (e.g. monospace).
///
/// # Examples
///
/// ```rust
/// # use radiant_rs::*;
/// let monospace_fonts = Font::query().monospace().italic().fetch();
/// ```
pub fn query() -> FontQueryBuilder {
FontQueryBuilder::new()
}
/// Returns a new font instance with given size.
pub fn clone_with_size(self: &Self, size: f32) -> Font {
let mut font = (*self).clone();
font.size = size;
font
}
/// Write to given layer.
pub fn write<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color) -> &Font where Point2<f32>: From<T> {
let position = Point2::from(position);
self.write_paragraph(layer, text, position.0, position.1, 0.0, color, 0.0, 1.0, 1.0);
self
}
/// Write to given layer. Breaks lines after max_width pixels.
pub fn write_wrapped<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32) -> &Font where Point2<f32>: From<T> {
let position = Point2::from(position);
self.write_paragraph(layer, text, position.0, position.1, max_width, color, 0.0, 1.0, 1.0);
self
}
/// Write to given layer. Breaks lines after max_width pixels and applies given rotation and scaling.
pub fn write_transformed<T, U>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32, rotation: f32, scale: U) -> &Font where Point2<f32>: From<T>+From<U> {
let position = Point2::from(position);
let scale = Point2::from(scale);
self.write_paragraph(layer, text, position.0, position.1, max_width, color, rotation, scale.0, scale.1);
self
}
/// Returns the font wrapped in an std::Arc.
pub fn arc(self: Self) -> Arc<Self> {
Arc::new(self)
}
/// Returns the names of all available system fonts with the given properties (e.g. monospace).
pub(crate) fn query_specific(info: FontInfo) -> Vec<String> {
system_fonts::query_specific(&mut Self::build_property(&info))
}
/// Creates a new font instance from given FontInfo struct.
pub(crate) fn from_info(context: &Context, info: FontInfo) -> core::Result<Font> {
if let Some((font_data, _)) = system_fonts::get(&Self::build_property(&info)) {
Ok(Self::create(context, font_data, info.size))
} else {
Err(core::Error::FontError("Failed to get system font".to_string()))
}
}
/// Creates a new unique font
fn create(context: &Context, font_data: Vec<u8>, size: f32) -> Font {
Font {
data : font_data,
font_id : FONT_COUNTER.fetch_add(1, Ordering::Relaxed),
size : size,
context : context.clone(),
}
}
/// Write text to given layer using given font
fn write_paragraph(self: &Self, layer: &Layer, text: &str, x: f32, y: f32, max_width: f32, color: Color, rotation: f32, scale_x: f32, scale_y: f32) {
//!todo probably expensive, but rusttype is completely opaque. would be nice to be able to store Font::info outside of a "may or may not own" container
let rt_font = rusttype::FontCollection::from_bytes(&self.data[..]).unwrap().into_font().unwrap();
let bucket_id = 0;
let glyphs = Self::layout_paragraph(&rt_font, rusttype::Scale::uniform(self.size), max_width, &text);
let context = self.context.lock();
context.font_cache.queue(self.font_id, &glyphs);
let anchor = (0., 0.);
let scale = (scale_x, scale_y);
let cos_rot = rotation.cos();
let sin_rot = rotation.sin();
for glyph in &glyphs {
if let Some((uv, pos, dim)) = context.font_cache.rect_for(self.font_id, glyph) {
let dist_x = pos.0 * scale_x;
let dist_y = pos.1 * scale_y;
let offset_x = x + dist_x * cos_rot - dist_y * sin_rot;
let offset_y = y + dist_x * sin_rot + dist_y * cos_rot;
layer.add_rect(None, bucket_id, 0, 1, uv, (offset_x, offset_y), anchor, dim, color, rotation, scale);
}
}
}
/// Layout a paragraph of glyphs
fn layout_paragraph<'a>(font: &'a rusttype::Font, scale: rusttype::Scale, width: f32, text: &str) -> Vec<rusttype::PositionedGlyph<'a>> {
use unicode_normalization::UnicodeNormalization;
let mut result = Vec::new();
let v_metrics = font.v_metrics(scale);
let advance_height = v_metrics.ascent - v_metrics.descent + v_metrics.line_gap;
let mut caret = rusttype::point(0.0, v_metrics.ascent);
let mut last_glyph_id = None;
for c in text.nfc() {
if c.is_control() {
match c {
'\n' => {
caret = rusttype::point(0.0, caret.y + advance_height);
},
_ => {}
}
continue;
}
let base_glyph = font.glyph(c);
if let Some(id) = last_glyph_id.take() {
caret.x += font.pair_kerning(scale, id, base_glyph.id());
}
last_glyph_id = Some(base_glyph.id());
let mut glyph = base_glyph.scaled(scale).positioned(caret);
if let Some(bb) = glyph.pixel_bounding_box() {
if width > 0.0 && bb.max.x > width as i32 {
caret = rusttype::point(0.0, caret.y + advance_height);
glyph = glyph.into_unpositioned().positioned(caret);
last_glyph_id = None;
}
}
caret.x += glyph.unpositioned().h_metrics().advance_width;
result.push(glyph);
}
result
}
/// Builds a FontProperty for the underlying system_fonts library
fn build_property(info: &FontInfo) -> system_fonts::FontProperty {
let mut property = system_fonts::FontPropertyBuilder::new();
if info.family!= "" {
property = property.family(&info.family);
}
if info.italic {
property = property.italic();
}
if info.oblique {
property = property.oblique();
}
if info.bold |
if info.monospace {
property = property.monospace();
}
property.build()
}
}
/// A wrapper around rusttype's font cache.
pub struct FontCache {
cache : Mutex<rusttype::gpu_cache::Cache<'static>>,
queue : Mutex<Vec<(Rect<u32>, Vec<u8>)>>,
dirty : AtomicBool,
}
impl FontCache {
/// Creates a new fontcache instant.
pub fn new(width: u32, height: u32, scale_tolerance: f32, position_tolerance: f32) -> FontCache {
let cache = rusttype::gpu_cache::CacheBuilder {
width,
height,
scale_tolerance,
position_tolerance,
pad_glyphs: true,
}.build();
FontCache {
cache: Mutex::new(cache),
queue: Mutex::new(Vec::new()),
dirty: AtomicBool::new(false),
}
}
/// Queues a glyph for caching.
pub fn queue(self: &Self, font_id: usize, glyphs: &[rusttype::PositionedGlyph]) {
let mut cache = self.cache.lock().unwrap();
let mut queue = self.queue.lock().unwrap();
let mut dirties = false;
for glyph in glyphs {
cache.queue_glyph(font_id, glyph.standalone());
}
cache.cache_queued(|rect, data| {
queue.push( ( ((rect.min.x, rect.min.y), (rect.max.x, rect.max.y)), data.to_vec() ) );
dirties = true;
}).unwrap();
if dirties {
self.dirty.store(dirties, Ordering::Relaxed);
}
}
/// Updates the font cache texture.
pub fn update(self: &Self, texture: &backend::Texture2d) {
if self.dirty.load(Ordering::Relaxed) {
let mut queue = self.queue.lock().unwrap();
for &(ref rect, ref data) in queue.deref() {
texture.write(rect, data);
}
queue.clear();
self.dirty.store(false, Ordering::Relaxed);
}
}
/// Returns a rectangle of uv coordinates for the given glyph as well as its offset and dimensions.
pub fn rect_for(self: &Self, font_id: usize, glyph: &rusttype::PositionedGlyph) -> Option<(Rect, Point2, Point2)> {
let cache = self.cache.lock().unwrap();
if let Ok(Some((uv_rect, screen_rect))) = cache.rect_for(font_id, glyph) {
let uv = ((uv_rect.min.x, uv_rect.min.y), (uv_rect.max.x, uv_rect.max.y));
let pos = (screen_rect.min.x as f32, screen_rect.min.y as f32);
let dim = ((screen_rect.max.x - screen_rect.min.x) as f32, (screen_rect.max.y - screen_rect.min.y) as f32);
Some((uv, pos, dim))
} else {
None
}
}
}
/// A struct used to filter the result of [`Font::query_specific()`](struct.Font.html#method.query_specific)
/// or to describe a [`Font`](struct.Font.html) to be created from a system font
/// via [`Font::from_info()`](struct.Font.html#method.from_info).
#[derive(Clone)]
pub struct FontInfo {
pub italic : bool,
pub oblique : bool,
pub bold : bool,
pub monospace : bool,
pub family : String,
pub size : f32,
}
impl Default for FontInfo {
fn default() -> FontInfo {
FontInfo {
italic : false,
oblique : false,
bold : false,
monospace : false,
family : "".to_string(),
size : 10.0,
}
}
}
| {
property = property.bold();
} | conditional_block |
type_checker.rs | use scopeguard::{guard, ScopeGuard};
use super::model::Model;
use super::cwf::*;
use super::lang::ast::*;
pub struct TypeChecker<T: Model> {
model: T,
ctxs : Vec<CtxInfo>,
}
struct CtxInfo {
syntax: Ctx,
// morphism from previous (if any) context to current
weakening: Option<Morph>,
defs: Vec<(String, Tm, Ty)>,
}
impl<TModel: Model> TypeChecker<TModel> {
pub fn new(mut model: TModel) -> TypeChecker<TModel> {
let empty = model.empty_ctx();
TypeChecker {
model: model,
ctxs: vec![CtxInfo {
syntax: empty,
weakening: None,
defs: vec![]
}],
}
}
// Saves the current number of context extensions and definitions
// in the current context and returns a scopeguard that will restore
// to this state when it is dropped. The scope guard takes ownership
// of the TC.
fn save_ctx<'a>(&'a mut self) ->
ScopeGuard<&mut TypeChecker<TModel>, impl FnOnce(&'a mut TypeChecker<TModel>)>
{
let depth = self.ctxs.len();
assert!(depth > 0); // always have empty context
let num_defs = self.ctxs.last().unwrap().defs.len();
guard(self, move |s| {
s.ctxs.truncate(depth);
s.ctxs.last_mut().unwrap().defs.truncate(num_defs)
})
}
fn extend(&mut self, ext: &CtxExt) -> Result<Ty, String> {
let ty = self.check_ty(&ext.1)?;
let new_ctx = self.model.comprehension(&ty);
let weakening = self.model.weakening(&ty);
let mut defs = vec![];
if let Some(ref name) = ext.0 {
let var_ty = Self::subst_ty(&mut self.model, &weakening, &ty);
defs.push((name.clone(), self.model.var(&ty), var_ty))
}
let new_ctx_info = CtxInfo {
syntax: new_ctx,
weakening: Some(weakening),
defs: defs
};
self.ctxs.push(new_ctx_info);
Ok(ty)
}
pub fn check_def(&mut self, def: &Def) -> Result<Tm, String> {
let mut s = self.save_ctx();
for ext in def.ctx.iter() {
s.extend(ext)?;
}
let ret_ty = s.check_ty(&def.ret_ty)?;
s.check_tm_ty(&def.body, &ret_ty)
}
fn check_let<T, F>(
&mut self, check_body: F,
name: &DefId, ty: &Expr, val: &Expr, body: &Expr) -> Result<T, String>
where F : FnOnce(&mut Self, &Expr) -> Result<T, String>
{
let mut s = self.save_ctx();
let ty = s.check_ty(ty)?;
let val = s.check_tm_ty(val, &ty)?;
if let Some(name) = name {
s.ctxs.last_mut().unwrap().defs.push((name.clone(), val, ty));
};
check_body(&mut s, body)
}
pub fn check_ty(&mut self, expr: &Expr) -> Result<Ty, String> {
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
match expr {
Expr::App(id, v) =>
match (id.as_str(), &v[..]) {
("bool", []) => Ok(self.model.bool_ty(cur_ctx_syn)),
("eq", [a, b]) => self.check_eq(a, b),
(s, v) => Err(format!("Unexpected {} with {} args", s, v.len()))
},
Expr::Let { name, ty, val, body } =>
self.check_let(|s, body| s.check_ty(body), name, &*ty, &*val, &*body),
_ => Err(format!("Unhandled type {:?}", expr))
}
}
pub fn check_tm(&mut self, expr: &Expr) -> Result<(Tm, Ty), String> {
match expr {
Expr::App(id, v) =>
match (id.as_str(), &v[..]) {
("refl", [a]) => self.refl(&*a),
("true", []) => Ok(self.true_tm()),
("false", []) => Ok(self.false_tm()),
(v, []) => self.access_var(v),
(s, v) => Err(format!("Unexpected {} with {} args", s, v.len()))
},
Expr::Let { name, ty, val, body } =>
self.check_let(|s, body| s.check_tm(body), name, &*ty, &*val, &*body),
Expr::Elim { val, into_ctx, into_ty, cases } =>
self.check_elim(&*val, into_ctx, &*into_ty, cases),
}
}
fn | (&mut self, expr: &Expr) -> Result<(Tm, Ty), String> {
let (tm, _) = self.check_tm(expr)?;
let eq_ty = self.model.eq_ty(&tm, &tm);
let refl_tm = self.model.refl(&tm);
Ok((refl_tm, eq_ty))
}
fn true_tm(&mut self) -> (Tm, Ty) {
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
let bool_ty = self.model.bool_ty(cur_ctx_syn);
let tm = self.model.true_tm(cur_ctx_syn);
(tm, bool_ty)
}
fn false_tm(&mut self) -> (Tm, Ty) {
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
let bool_ty = self.model.bool_ty(cur_ctx_syn);
let tm = self.model.false_tm(cur_ctx_syn);
(tm, bool_ty)
}
// Given G |- a : A, construct the morphism <1(G), A, a> : G.A -> G
// substituting the last A for a in any term in G.A.
fn bar_tm(model: &mut TModel, ctx: &Ctx, ty: &Ty, tm: &Tm) -> Morph {
let id = model.id_morph(ctx);
model.extension(&id, ty, tm)
}
fn check_elim(
&mut self,
val: &Expr, into_ctx: &Vec<CtxExt>, into_ty: &Expr,
cases: &Vec<ElimCase>) -> Result<(Tm, Ty), String>
{
let (val_tm, val_ty) = self.check_tm(val)?;
let bool_ty = self.model.bool_ty(&self.ctxs.last().unwrap().syntax);
let (elim_tm, elim_ty) =
if self.model.ty_eq(&val_ty, &bool_ty) {
self.elim_bool(into_ctx, into_ty, cases)?
} else {
return Err(format!("Cannot eliminate {:?} of type {:?}", val, val_ty))
};
// Substitute bar(val_tm) into elimination term and type, which live
// live in an extended context.
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
let bar = Self::bar_tm(&mut self.model, cur_ctx_syn, &val_ty, &val_tm);
let tm = self.model.subst_tm(&bar, &elim_tm);
let ty = self.model.subst_ty(&bar, &elim_ty);
Ok((tm, ty))
}
fn elim_bool(
&mut self,
into_ctx: &Vec<CtxExt>, into_ty: &Expr,
cases: &Vec<ElimCase>) -> Result<(Tm, Ty), String>
{
if into_ctx.len()!= 1 || cases.len()!= 2 ||
cases[0].0.len()!= 0 || cases[1].0.len()!= 0
{
return Err("Invalid bool elimination".to_owned())
}
let cur_ctx_syn = self.ctxs.last().unwrap().syntax.clone();
let bool_ty = self.model.bool_ty(&cur_ctx_syn);
let into_ty = {
let mut s = self.save_ctx();
let ext_ty = s.extend(&into_ctx[0])?;
if!s.model.ty_eq(&ext_ty, &bool_ty) {
return Err("Invalid extension for into-type: expected bool".to_owned());
}
s.check_ty(into_ty)?
};
let true_tm = self.model.true_tm(&cur_ctx_syn);
let true_bar = Self::bar_tm(&mut self.model, &cur_ctx_syn, &bool_ty, &true_tm);
let expected_ty_true_case = Self::subst_ty(&mut self.model, &true_bar, &into_ty);
let false_tm = self.model.false_tm(&cur_ctx_syn);
let false_bar = Self::bar_tm(&mut self.model, &cur_ctx_syn, &bool_ty, &false_tm);
let expected_ty_false_case = Self::subst_ty(&mut self.model, &false_bar, &into_ty);
let true_case_tm = self.check_tm_ty(&cases[0].1, &expected_ty_true_case)?;
let false_case_tm = self.check_tm_ty(&cases[1].1, &expected_ty_false_case)?;
let tm = self.model.elim_bool(&cur_ctx_syn, &into_ty, &true_case_tm, &false_case_tm);
// Define substitutions by true and false
Self::subst_tm(&mut self.model, &true_bar, &tm);
Self::subst_tm(&mut self.model, &false_bar, &tm);
Ok((tm, into_ty))
}
fn check_tm_ty(&mut self, expr: &Expr, expected_ty: &Ty) -> Result<Tm, String> {
let (tm, ty) = self.check_tm(expr)?;
if self.model.ty_eq(&ty, expected_ty) {
Ok(tm)
} else {
Err(format!("expected:\n{:?}\ngot:\n{:?}", expected_ty, ty))
}
}
fn access_var(&mut self, name: &str) -> Result<(Tm, Ty), String> {
let mut ctx_index = self.ctxs.len();
for ctx in self.ctxs.iter().rev() {
ctx_index -= 1;
for (ref ctx_var_name, ref tm, ref ty) in ctx.defs.iter().rev() {
if ctx_var_name!= name {
continue
}
let mut tm = tm.clone();
let mut ty = ty.clone();
// Found term, inject it into current context.
for ctx in &self.ctxs[ctx_index+1..] {
let weakening = match ctx.weakening {
Some(ref w) => w,
None => panic!("expected weakening to be available")
};
tm = Self::subst_tm(&mut self.model, &weakening, &tm);
ty = Self::subst_ty(&mut self.model, &weakening, &ty);
}
return Ok((tm, ty))
}
}
Err(format!("unknown definition {}", name))
}
fn check_eq(&mut self, a: &Expr, b: &Expr) -> Result<Ty, String> {
let (tma, tya) = self.check_tm(a)?;
let tmb = self.check_tm_ty(b, &tya)?;
Ok(self.model.eq_ty(&tma, &tmb))
}
fn subst_ty(model: &mut TModel, g: &Morph, ty: &Ty) -> Ty {
model.subst_ty(g, ty);
match ty {
Ty::Subst(f, s) => {
// g (f s) = (g. f) s
let gf = Self::comp_morphs(model, g, &*f);
Self::subst_ty(model, &gf, &*s)
},
Ty::Bool(_) => {
let codomain = Self::morph_codomain(model, g);
model.bool_ty(&codomain)
},
Ty::Eq(a, b) => {
let ga = Self::subst_tm(model, g, &*a);
let gb = Self::subst_tm(model, g, &*b);
model.eq_ty(&ga, &gb)
},
}
}
fn subst_tm(model: &mut TModel, g: &Morph, tm: &Tm) -> Tm {
let gtm = model.subst_tm(g, tm);
match tm {
Tm::Subst(f, tm) => {
// g (f tm) = (g. f) tm
let gf = Self::comp_morphs(model, g, &*f);
Self::subst_tm(model, &gf, &*tm)
},
Tm::Refl(a) => {
let ga = Self::subst_tm(model, g, &*a);
model.refl(&ga)
},
Tm::True(_) => {
let codomain = Self::morph_codomain(model, g);
model.true_tm(&codomain)
},
Tm::False(_) => {
let codomain = Self::morph_codomain(model, g);
model.false_tm(&codomain)
},
Tm::ElimBool(ctx, into_ty, true_case, false_case) => {
let ctx_bool = model.bool_ty(ctx);
let w = model.weakening(&ctx_bool);
// w : ctx -> ctx.bool (where into_ty, true_case and false_case) live
let gw = Self::comp_morphs(model, g, &w);
Self::subst_ty(model, &gw, &*into_ty);
Self::subst_tm(model, &gw, &*true_case);
Self::subst_tm(model, &gw, &*false_case);
gtm
},
_ => gtm
}
}
fn comp_morphs(model: &mut TModel, g: &Morph, f: &Morph) -> Morph {
let gf = model.compose(g, f);
match f {
Morph::Identity(_) => g.clone(),
Morph::Composition(f, e) => {
// g. (f. e) = (g. f). e
let gf = model.compose(g, &*f);
Self::comp_morphs(model, &gf, e)
},
Morph::Extension(f, s, tm) => {
// g. <f, s, tm> = <g. f, s, gtm>
let gf = Self::comp_morphs(model, g, f);
let gtm = model.subst_tm(g, tm);
model.extension(&gf, &*s, >m)
}
_ => gf
}
}
fn morph_codomain(model: &mut TModel, morph: &Morph) -> Ctx {
match morph {
Morph::Identity(ctx) => (**ctx).clone(),
Morph::Weakening(ty) => model.comprehension(&*ty),
Morph::Composition(g, _) => Self::morph_codomain(model, g),
Morph::Extension(f, _, _) => Self::morph_codomain(model, f),
}
}
}
#[cfg(test)]
mod tests {
use crate::cwf_model;
use crate::lang::parser::DefParser;
fn verify_def(code: &str) {
let p = DefParser::new().parse(code).unwrap();
let model = cwf_model::Cwf::new();
super::TypeChecker::new(model).check_def(&p).unwrap();
}
#[test]
fn id() {
verify_def("def id (b : bool) : bool := b.");
}
#[test]
fn negb() {
verify_def("
def negb (b : bool) : bool :=
elim b into (_ : bool) : bool
| => false
| => true
end.");
}
#[test]
fn transitive() {
verify_def("
def trans (a b c d e : bool)
(p1 : a = b)
(p2 : b = c)
(p3 : c = d)
(p4 : d = e) : a = e :=
refl a.")
}
#[test]
fn uip() {
verify_def("
def uip (a b : bool) (p : a = b) (q : b = a) : p = q :=
refl p.")
}
#[test]
fn eta() {
verify_def("
def eta (a : bool) : a = elim a into (_ : bool) : bool | => true | => false end :=
elim a into (b : bool) : b = elim b into (_ : bool) : bool | => true | => false end
| => refl true
| => refl false
end.")
}
} | refl | identifier_name |
type_checker.rs | use scopeguard::{guard, ScopeGuard};
use super::model::Model;
use super::cwf::*;
use super::lang::ast::*;
pub struct TypeChecker<T: Model> {
model: T,
ctxs : Vec<CtxInfo>,
}
struct CtxInfo {
syntax: Ctx,
// morphism from previous (if any) context to current
weakening: Option<Morph>,
defs: Vec<(String, Tm, Ty)>,
}
impl<TModel: Model> TypeChecker<TModel> {
pub fn new(mut model: TModel) -> TypeChecker<TModel> {
let empty = model.empty_ctx();
TypeChecker {
model: model,
ctxs: vec![CtxInfo {
syntax: empty,
weakening: None,
defs: vec![]
}],
}
}
// Saves the current number of context extensions and definitions
// in the current context and returns a scopeguard that will restore
// to this state when it is dropped. The scope guard takes ownership
// of the TC.
fn save_ctx<'a>(&'a mut self) ->
ScopeGuard<&mut TypeChecker<TModel>, impl FnOnce(&'a mut TypeChecker<TModel>)>
{
let depth = self.ctxs.len();
assert!(depth > 0); // always have empty context
let num_defs = self.ctxs.last().unwrap().defs.len();
guard(self, move |s| {
s.ctxs.truncate(depth);
s.ctxs.last_mut().unwrap().defs.truncate(num_defs)
})
}
fn extend(&mut self, ext: &CtxExt) -> Result<Ty, String> {
let ty = self.check_ty(&ext.1)?;
let new_ctx = self.model.comprehension(&ty);
let weakening = self.model.weakening(&ty);
let mut defs = vec![];
if let Some(ref name) = ext.0 {
let var_ty = Self::subst_ty(&mut self.model, &weakening, &ty);
defs.push((name.clone(), self.model.var(&ty), var_ty))
}
let new_ctx_info = CtxInfo {
syntax: new_ctx,
weakening: Some(weakening),
defs: defs
};
self.ctxs.push(new_ctx_info);
Ok(ty)
}
pub fn check_def(&mut self, def: &Def) -> Result<Tm, String> {
let mut s = self.save_ctx();
for ext in def.ctx.iter() {
s.extend(ext)?;
}
let ret_ty = s.check_ty(&def.ret_ty)?;
s.check_tm_ty(&def.body, &ret_ty)
}
fn check_let<T, F>(
&mut self, check_body: F,
name: &DefId, ty: &Expr, val: &Expr, body: &Expr) -> Result<T, String>
where F : FnOnce(&mut Self, &Expr) -> Result<T, String>
{
let mut s = self.save_ctx();
let ty = s.check_ty(ty)?;
let val = s.check_tm_ty(val, &ty)?; | if let Some(name) = name {
s.ctxs.last_mut().unwrap().defs.push((name.clone(), val, ty));
};
check_body(&mut s, body)
}
pub fn check_ty(&mut self, expr: &Expr) -> Result<Ty, String> {
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
match expr {
Expr::App(id, v) =>
match (id.as_str(), &v[..]) {
("bool", []) => Ok(self.model.bool_ty(cur_ctx_syn)),
("eq", [a, b]) => self.check_eq(a, b),
(s, v) => Err(format!("Unexpected {} with {} args", s, v.len()))
},
Expr::Let { name, ty, val, body } =>
self.check_let(|s, body| s.check_ty(body), name, &*ty, &*val, &*body),
_ => Err(format!("Unhandled type {:?}", expr))
}
}
pub fn check_tm(&mut self, expr: &Expr) -> Result<(Tm, Ty), String> {
match expr {
Expr::App(id, v) =>
match (id.as_str(), &v[..]) {
("refl", [a]) => self.refl(&*a),
("true", []) => Ok(self.true_tm()),
("false", []) => Ok(self.false_tm()),
(v, []) => self.access_var(v),
(s, v) => Err(format!("Unexpected {} with {} args", s, v.len()))
},
Expr::Let { name, ty, val, body } =>
self.check_let(|s, body| s.check_tm(body), name, &*ty, &*val, &*body),
Expr::Elim { val, into_ctx, into_ty, cases } =>
self.check_elim(&*val, into_ctx, &*into_ty, cases),
}
}
fn refl(&mut self, expr: &Expr) -> Result<(Tm, Ty), String> {
let (tm, _) = self.check_tm(expr)?;
let eq_ty = self.model.eq_ty(&tm, &tm);
let refl_tm = self.model.refl(&tm);
Ok((refl_tm, eq_ty))
}
fn true_tm(&mut self) -> (Tm, Ty) {
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
let bool_ty = self.model.bool_ty(cur_ctx_syn);
let tm = self.model.true_tm(cur_ctx_syn);
(tm, bool_ty)
}
fn false_tm(&mut self) -> (Tm, Ty) {
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
let bool_ty = self.model.bool_ty(cur_ctx_syn);
let tm = self.model.false_tm(cur_ctx_syn);
(tm, bool_ty)
}
// Given G |- a : A, construct the morphism <1(G), A, a> : G.A -> G
// substituting the last A for a in any term in G.A.
fn bar_tm(model: &mut TModel, ctx: &Ctx, ty: &Ty, tm: &Tm) -> Morph {
let id = model.id_morph(ctx);
model.extension(&id, ty, tm)
}
fn check_elim(
&mut self,
val: &Expr, into_ctx: &Vec<CtxExt>, into_ty: &Expr,
cases: &Vec<ElimCase>) -> Result<(Tm, Ty), String>
{
let (val_tm, val_ty) = self.check_tm(val)?;
let bool_ty = self.model.bool_ty(&self.ctxs.last().unwrap().syntax);
let (elim_tm, elim_ty) =
if self.model.ty_eq(&val_ty, &bool_ty) {
self.elim_bool(into_ctx, into_ty, cases)?
} else {
return Err(format!("Cannot eliminate {:?} of type {:?}", val, val_ty))
};
// Substitute bar(val_tm) into elimination term and type, which live
// live in an extended context.
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
let bar = Self::bar_tm(&mut self.model, cur_ctx_syn, &val_ty, &val_tm);
let tm = self.model.subst_tm(&bar, &elim_tm);
let ty = self.model.subst_ty(&bar, &elim_ty);
Ok((tm, ty))
}
fn elim_bool(
&mut self,
into_ctx: &Vec<CtxExt>, into_ty: &Expr,
cases: &Vec<ElimCase>) -> Result<(Tm, Ty), String>
{
if into_ctx.len()!= 1 || cases.len()!= 2 ||
cases[0].0.len()!= 0 || cases[1].0.len()!= 0
{
return Err("Invalid bool elimination".to_owned())
}
let cur_ctx_syn = self.ctxs.last().unwrap().syntax.clone();
let bool_ty = self.model.bool_ty(&cur_ctx_syn);
let into_ty = {
let mut s = self.save_ctx();
let ext_ty = s.extend(&into_ctx[0])?;
if!s.model.ty_eq(&ext_ty, &bool_ty) {
return Err("Invalid extension for into-type: expected bool".to_owned());
}
s.check_ty(into_ty)?
};
let true_tm = self.model.true_tm(&cur_ctx_syn);
let true_bar = Self::bar_tm(&mut self.model, &cur_ctx_syn, &bool_ty, &true_tm);
let expected_ty_true_case = Self::subst_ty(&mut self.model, &true_bar, &into_ty);
let false_tm = self.model.false_tm(&cur_ctx_syn);
let false_bar = Self::bar_tm(&mut self.model, &cur_ctx_syn, &bool_ty, &false_tm);
let expected_ty_false_case = Self::subst_ty(&mut self.model, &false_bar, &into_ty);
let true_case_tm = self.check_tm_ty(&cases[0].1, &expected_ty_true_case)?;
let false_case_tm = self.check_tm_ty(&cases[1].1, &expected_ty_false_case)?;
let tm = self.model.elim_bool(&cur_ctx_syn, &into_ty, &true_case_tm, &false_case_tm);
// Define substitutions by true and false
Self::subst_tm(&mut self.model, &true_bar, &tm);
Self::subst_tm(&mut self.model, &false_bar, &tm);
Ok((tm, into_ty))
}
fn check_tm_ty(&mut self, expr: &Expr, expected_ty: &Ty) -> Result<Tm, String> {
let (tm, ty) = self.check_tm(expr)?;
if self.model.ty_eq(&ty, expected_ty) {
Ok(tm)
} else {
Err(format!("expected:\n{:?}\ngot:\n{:?}", expected_ty, ty))
}
}
fn access_var(&mut self, name: &str) -> Result<(Tm, Ty), String> {
let mut ctx_index = self.ctxs.len();
for ctx in self.ctxs.iter().rev() {
ctx_index -= 1;
for (ref ctx_var_name, ref tm, ref ty) in ctx.defs.iter().rev() {
if ctx_var_name!= name {
continue
}
let mut tm = tm.clone();
let mut ty = ty.clone();
// Found term, inject it into current context.
for ctx in &self.ctxs[ctx_index+1..] {
let weakening = match ctx.weakening {
Some(ref w) => w,
None => panic!("expected weakening to be available")
};
tm = Self::subst_tm(&mut self.model, &weakening, &tm);
ty = Self::subst_ty(&mut self.model, &weakening, &ty);
}
return Ok((tm, ty))
}
}
Err(format!("unknown definition {}", name))
}
fn check_eq(&mut self, a: &Expr, b: &Expr) -> Result<Ty, String> {
let (tma, tya) = self.check_tm(a)?;
let tmb = self.check_tm_ty(b, &tya)?;
Ok(self.model.eq_ty(&tma, &tmb))
}
fn subst_ty(model: &mut TModel, g: &Morph, ty: &Ty) -> Ty {
model.subst_ty(g, ty);
match ty {
Ty::Subst(f, s) => {
// g (f s) = (g. f) s
let gf = Self::comp_morphs(model, g, &*f);
Self::subst_ty(model, &gf, &*s)
},
Ty::Bool(_) => {
let codomain = Self::morph_codomain(model, g);
model.bool_ty(&codomain)
},
Ty::Eq(a, b) => {
let ga = Self::subst_tm(model, g, &*a);
let gb = Self::subst_tm(model, g, &*b);
model.eq_ty(&ga, &gb)
},
}
}
fn subst_tm(model: &mut TModel, g: &Morph, tm: &Tm) -> Tm {
let gtm = model.subst_tm(g, tm);
match tm {
Tm::Subst(f, tm) => {
// g (f tm) = (g. f) tm
let gf = Self::comp_morphs(model, g, &*f);
Self::subst_tm(model, &gf, &*tm)
},
Tm::Refl(a) => {
let ga = Self::subst_tm(model, g, &*a);
model.refl(&ga)
},
Tm::True(_) => {
let codomain = Self::morph_codomain(model, g);
model.true_tm(&codomain)
},
Tm::False(_) => {
let codomain = Self::morph_codomain(model, g);
model.false_tm(&codomain)
},
Tm::ElimBool(ctx, into_ty, true_case, false_case) => {
let ctx_bool = model.bool_ty(ctx);
let w = model.weakening(&ctx_bool);
// w : ctx -> ctx.bool (where into_ty, true_case and false_case) live
let gw = Self::comp_morphs(model, g, &w);
Self::subst_ty(model, &gw, &*into_ty);
Self::subst_tm(model, &gw, &*true_case);
Self::subst_tm(model, &gw, &*false_case);
gtm
},
_ => gtm
}
}
fn comp_morphs(model: &mut TModel, g: &Morph, f: &Morph) -> Morph {
let gf = model.compose(g, f);
match f {
Morph::Identity(_) => g.clone(),
Morph::Composition(f, e) => {
// g. (f. e) = (g. f). e
let gf = model.compose(g, &*f);
Self::comp_morphs(model, &gf, e)
},
Morph::Extension(f, s, tm) => {
// g. <f, s, tm> = <g. f, s, gtm>
let gf = Self::comp_morphs(model, g, f);
let gtm = model.subst_tm(g, tm);
model.extension(&gf, &*s, >m)
}
_ => gf
}
}
fn morph_codomain(model: &mut TModel, morph: &Morph) -> Ctx {
match morph {
Morph::Identity(ctx) => (**ctx).clone(),
Morph::Weakening(ty) => model.comprehension(&*ty),
Morph::Composition(g, _) => Self::morph_codomain(model, g),
Morph::Extension(f, _, _) => Self::morph_codomain(model, f),
}
}
}
#[cfg(test)]
mod tests {
use crate::cwf_model;
use crate::lang::parser::DefParser;
fn verify_def(code: &str) {
let p = DefParser::new().parse(code).unwrap();
let model = cwf_model::Cwf::new();
super::TypeChecker::new(model).check_def(&p).unwrap();
}
#[test]
fn id() {
verify_def("def id (b : bool) : bool := b.");
}
#[test]
fn negb() {
verify_def("
def negb (b : bool) : bool :=
elim b into (_ : bool) : bool
| => false
| => true
end.");
}
#[test]
fn transitive() {
verify_def("
def trans (a b c d e : bool)
(p1 : a = b)
(p2 : b = c)
(p3 : c = d)
(p4 : d = e) : a = e :=
refl a.")
}
#[test]
fn uip() {
verify_def("
def uip (a b : bool) (p : a = b) (q : b = a) : p = q :=
refl p.")
}
#[test]
fn eta() {
verify_def("
def eta (a : bool) : a = elim a into (_ : bool) : bool | => true | => false end :=
elim a into (b : bool) : b = elim b into (_ : bool) : bool | => true | => false end
| => refl true
| => refl false
end.")
}
} | random_line_split |
|
type_checker.rs | use scopeguard::{guard, ScopeGuard};
use super::model::Model;
use super::cwf::*;
use super::lang::ast::*;
pub struct TypeChecker<T: Model> {
model: T,
ctxs : Vec<CtxInfo>,
}
struct CtxInfo {
syntax: Ctx,
// morphism from previous (if any) context to current
weakening: Option<Morph>,
defs: Vec<(String, Tm, Ty)>,
}
impl<TModel: Model> TypeChecker<TModel> {
pub fn new(mut model: TModel) -> TypeChecker<TModel> {
let empty = model.empty_ctx();
TypeChecker {
model: model,
ctxs: vec![CtxInfo {
syntax: empty,
weakening: None,
defs: vec![]
}],
}
}
// Saves the current number of context extensions and definitions
// in the current context and returns a scopeguard that will restore
// to this state when it is dropped. The scope guard takes ownership
// of the TC.
fn save_ctx<'a>(&'a mut self) ->
ScopeGuard<&mut TypeChecker<TModel>, impl FnOnce(&'a mut TypeChecker<TModel>)>
{
let depth = self.ctxs.len();
assert!(depth > 0); // always have empty context
let num_defs = self.ctxs.last().unwrap().defs.len();
guard(self, move |s| {
s.ctxs.truncate(depth);
s.ctxs.last_mut().unwrap().defs.truncate(num_defs)
})
}
fn extend(&mut self, ext: &CtxExt) -> Result<Ty, String> {
let ty = self.check_ty(&ext.1)?;
let new_ctx = self.model.comprehension(&ty);
let weakening = self.model.weakening(&ty);
let mut defs = vec![];
if let Some(ref name) = ext.0 {
let var_ty = Self::subst_ty(&mut self.model, &weakening, &ty);
defs.push((name.clone(), self.model.var(&ty), var_ty))
}
let new_ctx_info = CtxInfo {
syntax: new_ctx,
weakening: Some(weakening),
defs: defs
};
self.ctxs.push(new_ctx_info);
Ok(ty)
}
pub fn check_def(&mut self, def: &Def) -> Result<Tm, String> {
let mut s = self.save_ctx();
for ext in def.ctx.iter() {
s.extend(ext)?;
}
let ret_ty = s.check_ty(&def.ret_ty)?;
s.check_tm_ty(&def.body, &ret_ty)
}
fn check_let<T, F>(
&mut self, check_body: F,
name: &DefId, ty: &Expr, val: &Expr, body: &Expr) -> Result<T, String>
where F : FnOnce(&mut Self, &Expr) -> Result<T, String>
{
let mut s = self.save_ctx();
let ty = s.check_ty(ty)?;
let val = s.check_tm_ty(val, &ty)?;
if let Some(name) = name {
s.ctxs.last_mut().unwrap().defs.push((name.clone(), val, ty));
};
check_body(&mut s, body)
}
pub fn check_ty(&mut self, expr: &Expr) -> Result<Ty, String> {
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
match expr {
Expr::App(id, v) =>
match (id.as_str(), &v[..]) {
("bool", []) => Ok(self.model.bool_ty(cur_ctx_syn)),
("eq", [a, b]) => self.check_eq(a, b),
(s, v) => Err(format!("Unexpected {} with {} args", s, v.len()))
},
Expr::Let { name, ty, val, body } =>
self.check_let(|s, body| s.check_ty(body), name, &*ty, &*val, &*body),
_ => Err(format!("Unhandled type {:?}", expr))
}
}
pub fn check_tm(&mut self, expr: &Expr) -> Result<(Tm, Ty), String> {
match expr {
Expr::App(id, v) =>
match (id.as_str(), &v[..]) {
("refl", [a]) => self.refl(&*a),
("true", []) => Ok(self.true_tm()),
("false", []) => Ok(self.false_tm()),
(v, []) => self.access_var(v),
(s, v) => Err(format!("Unexpected {} with {} args", s, v.len()))
},
Expr::Let { name, ty, val, body } =>
self.check_let(|s, body| s.check_tm(body), name, &*ty, &*val, &*body),
Expr::Elim { val, into_ctx, into_ty, cases } =>
self.check_elim(&*val, into_ctx, &*into_ty, cases),
}
}
fn refl(&mut self, expr: &Expr) -> Result<(Tm, Ty), String> |
fn true_tm(&mut self) -> (Tm, Ty) {
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
let bool_ty = self.model.bool_ty(cur_ctx_syn);
let tm = self.model.true_tm(cur_ctx_syn);
(tm, bool_ty)
}
fn false_tm(&mut self) -> (Tm, Ty) {
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
let bool_ty = self.model.bool_ty(cur_ctx_syn);
let tm = self.model.false_tm(cur_ctx_syn);
(tm, bool_ty)
}
// Given G |- a : A, construct the morphism <1(G), A, a> : G.A -> G
// substituting the last A for a in any term in G.A.
fn bar_tm(model: &mut TModel, ctx: &Ctx, ty: &Ty, tm: &Tm) -> Morph {
let id = model.id_morph(ctx);
model.extension(&id, ty, tm)
}
fn check_elim(
&mut self,
val: &Expr, into_ctx: &Vec<CtxExt>, into_ty: &Expr,
cases: &Vec<ElimCase>) -> Result<(Tm, Ty), String>
{
let (val_tm, val_ty) = self.check_tm(val)?;
let bool_ty = self.model.bool_ty(&self.ctxs.last().unwrap().syntax);
let (elim_tm, elim_ty) =
if self.model.ty_eq(&val_ty, &bool_ty) {
self.elim_bool(into_ctx, into_ty, cases)?
} else {
return Err(format!("Cannot eliminate {:?} of type {:?}", val, val_ty))
};
// Substitute bar(val_tm) into elimination term and type, which live
// live in an extended context.
let cur_ctx_syn = &self.ctxs.last().unwrap().syntax;
let bar = Self::bar_tm(&mut self.model, cur_ctx_syn, &val_ty, &val_tm);
let tm = self.model.subst_tm(&bar, &elim_tm);
let ty = self.model.subst_ty(&bar, &elim_ty);
Ok((tm, ty))
}
fn elim_bool(
&mut self,
into_ctx: &Vec<CtxExt>, into_ty: &Expr,
cases: &Vec<ElimCase>) -> Result<(Tm, Ty), String>
{
if into_ctx.len()!= 1 || cases.len()!= 2 ||
cases[0].0.len()!= 0 || cases[1].0.len()!= 0
{
return Err("Invalid bool elimination".to_owned())
}
let cur_ctx_syn = self.ctxs.last().unwrap().syntax.clone();
let bool_ty = self.model.bool_ty(&cur_ctx_syn);
let into_ty = {
let mut s = self.save_ctx();
let ext_ty = s.extend(&into_ctx[0])?;
if!s.model.ty_eq(&ext_ty, &bool_ty) {
return Err("Invalid extension for into-type: expected bool".to_owned());
}
s.check_ty(into_ty)?
};
let true_tm = self.model.true_tm(&cur_ctx_syn);
let true_bar = Self::bar_tm(&mut self.model, &cur_ctx_syn, &bool_ty, &true_tm);
let expected_ty_true_case = Self::subst_ty(&mut self.model, &true_bar, &into_ty);
let false_tm = self.model.false_tm(&cur_ctx_syn);
let false_bar = Self::bar_tm(&mut self.model, &cur_ctx_syn, &bool_ty, &false_tm);
let expected_ty_false_case = Self::subst_ty(&mut self.model, &false_bar, &into_ty);
let true_case_tm = self.check_tm_ty(&cases[0].1, &expected_ty_true_case)?;
let false_case_tm = self.check_tm_ty(&cases[1].1, &expected_ty_false_case)?;
let tm = self.model.elim_bool(&cur_ctx_syn, &into_ty, &true_case_tm, &false_case_tm);
// Define substitutions by true and false
Self::subst_tm(&mut self.model, &true_bar, &tm);
Self::subst_tm(&mut self.model, &false_bar, &tm);
Ok((tm, into_ty))
}
fn check_tm_ty(&mut self, expr: &Expr, expected_ty: &Ty) -> Result<Tm, String> {
let (tm, ty) = self.check_tm(expr)?;
if self.model.ty_eq(&ty, expected_ty) {
Ok(tm)
} else {
Err(format!("expected:\n{:?}\ngot:\n{:?}", expected_ty, ty))
}
}
fn access_var(&mut self, name: &str) -> Result<(Tm, Ty), String> {
let mut ctx_index = self.ctxs.len();
for ctx in self.ctxs.iter().rev() {
ctx_index -= 1;
for (ref ctx_var_name, ref tm, ref ty) in ctx.defs.iter().rev() {
if ctx_var_name!= name {
continue
}
let mut tm = tm.clone();
let mut ty = ty.clone();
// Found term, inject it into current context.
for ctx in &self.ctxs[ctx_index+1..] {
let weakening = match ctx.weakening {
Some(ref w) => w,
None => panic!("expected weakening to be available")
};
tm = Self::subst_tm(&mut self.model, &weakening, &tm);
ty = Self::subst_ty(&mut self.model, &weakening, &ty);
}
return Ok((tm, ty))
}
}
Err(format!("unknown definition {}", name))
}
fn check_eq(&mut self, a: &Expr, b: &Expr) -> Result<Ty, String> {
let (tma, tya) = self.check_tm(a)?;
let tmb = self.check_tm_ty(b, &tya)?;
Ok(self.model.eq_ty(&tma, &tmb))
}
fn subst_ty(model: &mut TModel, g: &Morph, ty: &Ty) -> Ty {
model.subst_ty(g, ty);
match ty {
Ty::Subst(f, s) => {
// g (f s) = (g. f) s
let gf = Self::comp_morphs(model, g, &*f);
Self::subst_ty(model, &gf, &*s)
},
Ty::Bool(_) => {
let codomain = Self::morph_codomain(model, g);
model.bool_ty(&codomain)
},
Ty::Eq(a, b) => {
let ga = Self::subst_tm(model, g, &*a);
let gb = Self::subst_tm(model, g, &*b);
model.eq_ty(&ga, &gb)
},
}
}
fn subst_tm(model: &mut TModel, g: &Morph, tm: &Tm) -> Tm {
let gtm = model.subst_tm(g, tm);
match tm {
Tm::Subst(f, tm) => {
// g (f tm) = (g. f) tm
let gf = Self::comp_morphs(model, g, &*f);
Self::subst_tm(model, &gf, &*tm)
},
Tm::Refl(a) => {
let ga = Self::subst_tm(model, g, &*a);
model.refl(&ga)
},
Tm::True(_) => {
let codomain = Self::morph_codomain(model, g);
model.true_tm(&codomain)
},
Tm::False(_) => {
let codomain = Self::morph_codomain(model, g);
model.false_tm(&codomain)
},
Tm::ElimBool(ctx, into_ty, true_case, false_case) => {
let ctx_bool = model.bool_ty(ctx);
let w = model.weakening(&ctx_bool);
// w : ctx -> ctx.bool (where into_ty, true_case and false_case) live
let gw = Self::comp_morphs(model, g, &w);
Self::subst_ty(model, &gw, &*into_ty);
Self::subst_tm(model, &gw, &*true_case);
Self::subst_tm(model, &gw, &*false_case);
gtm
},
_ => gtm
}
}
fn comp_morphs(model: &mut TModel, g: &Morph, f: &Morph) -> Morph {
let gf = model.compose(g, f);
match f {
Morph::Identity(_) => g.clone(),
Morph::Composition(f, e) => {
// g. (f. e) = (g. f). e
let gf = model.compose(g, &*f);
Self::comp_morphs(model, &gf, e)
},
Morph::Extension(f, s, tm) => {
// g. <f, s, tm> = <g. f, s, gtm>
let gf = Self::comp_morphs(model, g, f);
let gtm = model.subst_tm(g, tm);
model.extension(&gf, &*s, >m)
}
_ => gf
}
}
fn morph_codomain(model: &mut TModel, morph: &Morph) -> Ctx {
match morph {
Morph::Identity(ctx) => (**ctx).clone(),
Morph::Weakening(ty) => model.comprehension(&*ty),
Morph::Composition(g, _) => Self::morph_codomain(model, g),
Morph::Extension(f, _, _) => Self::morph_codomain(model, f),
}
}
}
#[cfg(test)]
mod tests {
use crate::cwf_model;
use crate::lang::parser::DefParser;
fn verify_def(code: &str) {
let p = DefParser::new().parse(code).unwrap();
let model = cwf_model::Cwf::new();
super::TypeChecker::new(model).check_def(&p).unwrap();
}
#[test]
fn id() {
verify_def("def id (b : bool) : bool := b.");
}
#[test]
fn negb() {
verify_def("
def negb (b : bool) : bool :=
elim b into (_ : bool) : bool
| => false
| => true
end.");
}
#[test]
fn transitive() {
verify_def("
def trans (a b c d e : bool)
(p1 : a = b)
(p2 : b = c)
(p3 : c = d)
(p4 : d = e) : a = e :=
refl a.")
}
#[test]
fn uip() {
verify_def("
def uip (a b : bool) (p : a = b) (q : b = a) : p = q :=
refl p.")
}
#[test]
fn eta() {
verify_def("
def eta (a : bool) : a = elim a into (_ : bool) : bool | => true | => false end :=
elim a into (b : bool) : b = elim b into (_ : bool) : bool | => true | => false end
| => refl true
| => refl false
end.")
}
} | {
let (tm, _) = self.check_tm(expr)?;
let eq_ty = self.model.eq_ty(&tm, &tm);
let refl_tm = self.model.refl(&tm);
Ok((refl_tm, eq_ty))
} | identifier_body |
secp256k1_recover.rs | //! Public key recovery from [secp256k1] ECDSA signatures.
//!
//! [secp256k1]: https://en.bitcoin.it/wiki/Secp256k1
//!
//! _This module provides low-level cryptographic building blocks that must be
//! used carefully to ensure proper security. Read this documentation and
//! accompanying links thoroughly._
//!
//! The [`secp256k1_recover`] syscall allows a secp256k1 public key that has
//! previously signed a message to be recovered from the combination of the
//! message, the signature, and a recovery ID. The recovery ID is generated
//! during signing.
//!
//! Use cases for `secp256k1_recover` include:
//!
//! - Implementing the Ethereum [`ecrecover`] builtin contract.
//! - Performing secp256k1 public key recovery generally.
//! - Verifying a single secp256k1 signature.
//!
//! While `secp256k1_recover` can be used to verify secp256k1 signatures, Solana
//! also provides the [secp256k1 program][sp], which is more flexible, has lower CPU
//! cost, and can validate many signatures at once.
//!
//! [sp]: crate::secp256k1_program
//! [`ecrecover`]: https://docs.soliditylang.org/en/v0.8.14/units-and-global-variables.html?highlight=ecrecover#mathematical-and-cryptographic-functions
use {
borsh::{BorshDeserialize, BorshSchema, BorshSerialize},
core::convert::TryFrom,
thiserror::Error,
};
#[derive(Debug, Clone, PartialEq, Eq, Error)]
pub enum Secp256k1RecoverError {
#[error("The hash provided to a secp256k1_recover is invalid")]
InvalidHash,
#[error("The recovery_id provided to a secp256k1_recover is invalid")]
InvalidRecoveryId,
#[error("The signature provided to a secp256k1_recover is invalid")]
InvalidSignature,
}
impl From<u64> for Secp256k1RecoverError {
fn from(v: u64) -> Secp256k1RecoverError {
match v {
1 => Secp256k1RecoverError::InvalidHash,
2 => Secp256k1RecoverError::InvalidRecoveryId,
3 => Secp256k1RecoverError::InvalidSignature,
_ => panic!("Unsupported Secp256k1RecoverError"),
}
}
}
impl From<Secp256k1RecoverError> for u64 {
fn from(v: Secp256k1RecoverError) -> u64 {
match v {
Secp256k1RecoverError::InvalidHash => 1,
Secp256k1RecoverError::InvalidRecoveryId => 2,
Secp256k1RecoverError::InvalidSignature => 3,
}
}
}
pub const SECP256K1_SIGNATURE_LENGTH: usize = 64;
pub const SECP256K1_PUBLIC_KEY_LENGTH: usize = 64;
#[repr(transparent)]
#[derive(
BorshSerialize,
BorshDeserialize,
BorshSchema,
Clone,
Copy,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
AbiExample,
)]
pub struct Secp256k1Pubkey(pub [u8; SECP256K1_PUBLIC_KEY_LENGTH]);
impl Secp256k1Pubkey {
pub fn new(pubkey_vec: &[u8]) -> Self {
Self(
<[u8; SECP256K1_PUBLIC_KEY_LENGTH]>::try_from(<&[u8]>::clone(&pubkey_vec))
.expect("Slice must be the same length as a Pubkey"),
)
}
pub fn to_bytes(self) -> [u8; 64] {
self.0
}
}
/// Recover the public key from a [secp256k1] ECDSA signature and
/// cryptographically-hashed message.
///
/// [secp256k1]: https://en.bitcoin.it/wiki/Secp256k1
///
/// This function is specifically intended for efficiently implementing
/// Ethereum's [`ecrecover`] builtin contract, for use by Ethereum integrators.
/// It may be useful for other purposes.
///
/// [`ecrecover`]: https://docs.soliditylang.org/en/v0.8.14/units-and-global-variables.html?highlight=ecrecover#mathematical-and-cryptographic-functions
///
/// `hash` is the 32-byte cryptographic hash (typically [`keccak`]) of an
/// arbitrary message, signed by some public key.
///
/// The recovery ID is a value in the range [0, 3] that is generated during
/// signing, and allows the recovery process to be more efficent. Note that the
/// `recovery_id` here does not directly correspond to an Ethereum recovery ID
/// as used in `ecrecover`. This function accepts recovery IDs in the range of
/// [0, 3], while Ethereum's recovery IDs have a value of 27 or 28. To convert
/// an Ethereum recovery ID to a value this function will accept subtract 27
/// from it, checking for underflow. In practice this function will not succeed
/// if given a recovery ID of 2 or 3, as these values represent an
/// "overflowing" signature, and this function returns an error when parsing
/// overflowing signatures.
///
/// [`keccak`]: crate::keccak
/// [`wrapping_sub`]: https://doc.rust-lang.org/std/primitive.u8.html#method.wrapping_sub
///
/// On success this function returns a [`Secp256k1Pubkey`], a wrapper around a
/// 64-byte secp256k1 public key. This public key corresponds to the secret key
/// that previously signed the message `hash` to produce the provided
/// `signature`.
///
/// While `secp256k1_recover` can be used to verify secp256k1 signatures by
/// comparing the recovered key against an expected key, Solana also provides
/// the [secp256k1 program][sp], which is more flexible, has lower CPU cost, and
/// can validate many signatures at once.
///
/// [sp]: crate::secp256k1_program
///
/// The `secp256k1_recover` syscall is implemented with the [`libsecp256k1`]
/// crate, which clients may also want to use.
///
/// [`libsecp256k1`]: https://docs.rs/libsecp256k1/latest/libsecp256k1
///
/// # Hashing messages
///
/// In ECDSA signing and key recovery the signed "message" is always a
/// crytographic hash, not the original message itself. If not a cryptographic
/// hash, then an adversary can craft signatures that recover to arbitrary
/// public keys. This means the caller of this function generally must hash the
/// original message themselves and not rely on another party to provide the
/// hash.
///
/// Ethereum uses the [`keccak`] hash.
///
/// # Signature malleability
///
/// With the ECDSA signature algorithm it is possible for any party, given a
/// valid signature of some message, to create a second signature that is
/// equally valid. This is known as _signature malleability_. In many cases this
/// is not a concern, but in cases where applications rely on signatures to have
/// a unique representation this can be the source of bugs, potentially with
/// security implications.
///
/// **The solana `secp256k1_recover` function does not prevent signature
/// malleability**. This is in contrast to the Bitcoin secp256k1 library, which
/// does prevent malleability by default. Solana accepts signatures with `S`
/// values that are either in the _high order_ or in the _low order_, and it
/// is trivial to produce one from the other.
///
/// To prevent signature malleability, it is common for secp256k1 signature
/// validators to only accept signatures with low-order `S` values, and reject
/// signatures with high-order `S` values. The following code will accomplish
/// this:
///
/// ```rust
/// # use solana_program::program_error::ProgramError;
/// # let signature_bytes = [
/// # 0x83, 0x55, 0x81, 0xDF, 0xB1, 0x02, 0xA7, 0xD2,
/// # 0x2D, 0x33, 0xA4, 0x07, 0xDD, 0x7E, 0xFA, 0x9A,
/// # 0xE8, 0x5F, 0x42, 0x6B, 0x2A, 0x05, 0xBB, 0xFB,
/// # 0xA1, 0xAE, 0x93, 0x84, 0x46, 0x48, 0xE3, 0x35,
/// # 0x74, 0xE1, 0x6D, 0xB4, 0xD0, 0x2D, 0xB2, 0x0B,
/// # 0x3C, 0x89, 0x8D, 0x0A, 0x44, 0xDF, 0x73, 0x9C,
/// # 0x1E, 0xBF, 0x06, 0x8E, 0x8A, 0x9F, 0xA9, 0xC3,
/// # 0xA5, 0xEA, 0x21, 0xAC, 0xED, 0x5B, 0x22, 0x13,
/// # ];
/// let signature = libsecp256k1::Signature::parse_standard_slice(&signature_bytes)
/// .map_err(|_| ProgramError::InvalidArgument)?;
///
/// if signature.s.is_high() {
/// return Err(ProgramError::InvalidArgument);
/// }
/// # Ok::<_, ProgramError>(())
/// ```
///
/// This has the downside that the program must link to the [`libsecp256k1`]
/// crate and parse the signature just for this check. Note that `libsecp256k1`
/// version 0.7.0 or greater is required for running on the Solana SBF target.
///
/// [`libsecp256k1`]: https://docs.rs/libsecp256k1/latest/libsecp256k1
///
/// For the most accurate description of signature malleability, and its
/// prevention in secp256k1, refer to comments in [`secp256k1.h`] in the Bitcoin
/// Core secp256k1 library, the documentation of the [OpenZeppelin `recover`
/// method for Solidity][ozr], and [this description of the problem on
/// StackExchange][sxr].
///
/// [`secp256k1.h`]: https://github.com/bitcoin-core/secp256k1/blob/44c2452fd387f7ca604ab42d73746e7d3a44d8a2/include/secp256k1.h
/// [ozr]: https://docs.openzeppelin.com/contracts/2.x/api/cryptography#ECDSA-recover-bytes32-bytes-
/// [sxr]: https://bitcoin.stackexchange.com/questions/81115/if-someone-wanted-to-pretend-to-be-satoshi-by-posting-a-fake-signature-to-defrau/81116#81116
///
/// # Errors
///
/// If `hash` is not 32 bytes in length this function returns
/// [`Secp256k1RecoverError::InvalidHash`], though see notes
/// on SBF-specific behavior below.
///
/// If `recovery_id` is not in the range [0, 3] this function returns
/// [`Secp256k1RecoverError::InvalidRecoveryId`].
///
/// If `signature` is not 64 bytes in length this function returns
/// [`Secp256k1RecoverError::InvalidSignature`], though see notes
/// on SBF-specific behavior below.
///
/// If `signature` represents an "overflowing" signature this function returns
/// [`Secp256k1RecoverError::InvalidSignature`]. Overflowing signatures are
/// non-standard and should not be encountered in practice.
///
/// If `signature` is otherwise invalid this function returns
/// [`Secp256k1RecoverError::InvalidSignature`].
///
/// # SBF-specific behavior
///
/// When calling this function on-chain the caller must verify the correct
/// lengths of `hash` and `signature` beforehand.
///
/// When run on-chain this function will not directly validate the lengths of
/// `hash` and `signature`. It will assume they are the the correct lengths and
/// pass their pointers to the runtime, which will interpret them as 32-byte and
/// 64-byte buffers. If the provided slices are too short, the runtime will read
/// invalid data and attempt to interpret it, most likely returning an error,
/// though in some scenarios it may be possible to incorrectly return
/// successfully, or the transaction will abort if the syscall reads data
/// outside of the program's memory space. If the provided slices are too long
/// then they may be used to "smuggle" uninterpreted data.
///
/// # Examples
///
/// This example demonstrates recovering a public key and using it to very a
/// signature with the `secp256k1_recover` syscall. It has three parts: a Solana
/// program, an RPC client to call the program, and common definitions shared
/// between the two.
///
/// Common definitions:
///
/// ```
/// use borsh::{BorshDeserialize, BorshSerialize};
///
/// #[derive(BorshSerialize, BorshDeserialize, Debug)]
/// pub struct DemoSecp256k1RecoverInstruction {
/// pub message: Vec<u8>,
/// pub signature: [u8; 64],
/// pub recovery_id: u8,
/// }
/// ```
///
/// The Solana program. Note that it uses `libsecp256k1` version 0.7.0 to parse
/// the secp256k1 signature to prevent malleability.
///
/// ```no_run
/// use solana_program::{
/// entrypoint::ProgramResult,
/// keccak, msg,
/// program_error::ProgramError,
/// secp256k1_recover::secp256k1_recover,
/// };
///
/// /// The key we expect to sign secp256k1 messages,
/// /// as serialized by `libsecp256k1::PublicKey::serialize`.
/// const AUTHORIZED_PUBLIC_KEY: [u8; 64] = [
/// 0x8C, 0xD6, 0x47, 0xF8, 0xA5, 0xBF, 0x59, 0xA0, 0x4F, 0x77, 0xFA, 0xFA, 0x6C, 0xA0, 0xE6, 0x4D,
/// 0x94, 0x5B, 0x46, 0x55, 0xA6, 0x2B, 0xB0, 0x6F, 0x10, 0x4C, 0x9E, 0x2C, 0x6F, 0x42, 0x0A, 0xBE,
/// 0x18, 0xDF, 0x0B, 0xF0, 0x87, 0x42, 0xBA, 0x88, 0xB4, 0xCF, 0x87, 0x5A, 0x35, 0x27, 0xBE, 0x0F,
/// 0x45, 0xAE, 0xFC, 0x66, 0x9C, 0x2C, 0x6B, 0xF3, 0xEF, 0xCA, 0x5C, 0x32, 0x11, 0xF7, 0x2A, 0xC7,
/// ];
/// # pub struct DemoSecp256k1RecoverInstruction {
/// # pub message: Vec<u8>,
/// # pub signature: [u8; 64],
/// # pub recovery_id: u8,
/// # }
///
/// pub fn process_secp256k1_recover(
/// instruction: DemoSecp256k1RecoverInstruction,
/// ) -> ProgramResult {
/// // The secp256k1 recovery operation accepts a cryptographically-hashed
/// // message only. Passing it anything else is insecure and allows signatures
/// // to be forged.
/// //
/// // This means that the code calling `secp256k1_recover` must perform the hash
/// // itself, and not assume that data passed to it has been properly hashed.
/// let message_hash = {
/// let mut hasher = keccak::Hasher::default();
/// hasher.hash(&instruction.message);
/// hasher.result()
/// };
///
/// // Reject high-s value signatures to prevent malleability.
/// // Solana does not do this itself.
/// // This may or may not be necessary depending on use case.
/// {
/// let signature = libsecp256k1::Signature::parse_standard_slice(&instruction.signature)
/// .map_err(|_| ProgramError::InvalidArgument)?;
///
/// if signature.s.is_high() {
/// msg!("signature with high-s value");
/// return Err(ProgramError::InvalidArgument);
/// }
/// }
///
/// let recovered_pubkey = secp256k1_recover(
/// &message_hash.0,
/// instruction.recovery_id,
/// &instruction.signature,
/// )
/// .map_err(|_| ProgramError::InvalidArgument)?;
///
/// // If we're using this function for signature verification then we
/// // need to check the pubkey is an expected value.
/// // Here we are checking the secp256k1 pubkey against a known authorized pubkey.
/// if recovered_pubkey.0!= AUTHORIZED_PUBLIC_KEY {
/// return Err(ProgramError::InvalidArgument);
/// }
///
/// Ok(())
/// }
/// ```
///
/// The RPC client program:
///
/// ```no_run
/// # use solana_program::example_mocks::solana_rpc_client;
/// # use solana_program::example_mocks::solana_sdk;
/// use anyhow::Result;
/// use solana_rpc_client::rpc_client::RpcClient;
/// use solana_sdk::{
/// instruction::Instruction,
/// keccak,
/// pubkey::Pubkey,
/// signature::{Keypair, Signer},
/// transaction::Transaction,
/// };
/// # use borsh::{BorshDeserialize, BorshSerialize};
/// # #[derive(BorshSerialize, BorshDeserialize, Debug)]
/// # pub struct DemoSecp256k1RecoverInstruction {
/// # pub message: Vec<u8>,
/// # pub signature: [u8; 64],
/// # pub recovery_id: u8,
/// # }
///
/// pub fn demo_secp256k1_recover(
/// payer_keypair: &Keypair,
/// secp256k1_secret_key: &libsecp256k1::SecretKey,
/// client: &RpcClient,
/// program_keypair: &Keypair,
/// ) -> Result<()> {
/// let message = b"hello world";
/// let message_hash = {
/// let mut hasher = keccak::Hasher::default();
/// hasher.hash(message);
/// hasher.result()
/// };
///
/// let secp_message = libsecp256k1::Message::parse(&message_hash.0);
/// let (signature, recovery_id) = libsecp256k1::sign(&secp_message, &secp256k1_secret_key);
///
/// let signature = signature.serialize();
///
/// let instr = DemoSecp256k1RecoverInstruction {
/// message: message.to_vec(),
/// signature,
/// recovery_id: recovery_id.serialize(),
/// };
/// let instr = Instruction::new_with_borsh(
/// program_keypair.pubkey(),
/// &instr,
/// vec![],
/// );
///
/// let blockhash = client.get_latest_blockhash()?;
/// let tx = Transaction::new_signed_with_payer(
/// &[instr],
/// Some(&payer_keypair.pubkey()),
/// &[payer_keypair],
/// blockhash,
/// ); | /// client.send_and_confirm_transaction(&tx)?;
///
/// Ok(())
/// }
/// ```
pub fn secp256k1_recover(
hash: &[u8],
recovery_id: u8,
signature: &[u8],
) -> Result<Secp256k1Pubkey, Secp256k1RecoverError> {
#[cfg(target_os = "solana")]
{
let mut pubkey_buffer = [0u8; SECP256K1_PUBLIC_KEY_LENGTH];
let result = unsafe {
crate::syscalls::sol_secp256k1_recover(
hash.as_ptr(),
recovery_id as u64,
signature.as_ptr(),
pubkey_buffer.as_mut_ptr(),
)
};
match result {
0 => Ok(Secp256k1Pubkey::new(&pubkey_buffer)),
error => Err(Secp256k1RecoverError::from(error)),
}
}
#[cfg(not(target_os = "solana"))]
{
let message = libsecp256k1::Message::parse_slice(hash)
.map_err(|_| Secp256k1RecoverError::InvalidHash)?;
let recovery_id = libsecp256k1::RecoveryId::parse(recovery_id)
.map_err(|_| Secp256k1RecoverError::InvalidRecoveryId)?;
let signature = libsecp256k1::Signature::parse_standard_slice(signature)
.map_err(|_| Secp256k1RecoverError::InvalidSignature)?;
let secp256k1_key = libsecp256k1::recover(&message, &signature, &recovery_id)
.map_err(|_| Secp256k1RecoverError::InvalidSignature)?;
Ok(Secp256k1Pubkey::new(&secp256k1_key.serialize()[1..65]))
}
} | /// | random_line_split |
secp256k1_recover.rs | //! Public key recovery from [secp256k1] ECDSA signatures.
//!
//! [secp256k1]: https://en.bitcoin.it/wiki/Secp256k1
//!
//! _This module provides low-level cryptographic building blocks that must be
//! used carefully to ensure proper security. Read this documentation and
//! accompanying links thoroughly._
//!
//! The [`secp256k1_recover`] syscall allows a secp256k1 public key that has
//! previously signed a message to be recovered from the combination of the
//! message, the signature, and a recovery ID. The recovery ID is generated
//! during signing.
//!
//! Use cases for `secp256k1_recover` include:
//!
//! - Implementing the Ethereum [`ecrecover`] builtin contract.
//! - Performing secp256k1 public key recovery generally.
//! - Verifying a single secp256k1 signature.
//!
//! While `secp256k1_recover` can be used to verify secp256k1 signatures, Solana
//! also provides the [secp256k1 program][sp], which is more flexible, has lower CPU
//! cost, and can validate many signatures at once.
//!
//! [sp]: crate::secp256k1_program
//! [`ecrecover`]: https://docs.soliditylang.org/en/v0.8.14/units-and-global-variables.html?highlight=ecrecover#mathematical-and-cryptographic-functions
use {
borsh::{BorshDeserialize, BorshSchema, BorshSerialize},
core::convert::TryFrom,
thiserror::Error,
};
#[derive(Debug, Clone, PartialEq, Eq, Error)]
pub enum Secp256k1RecoverError {
#[error("The hash provided to a secp256k1_recover is invalid")]
InvalidHash,
#[error("The recovery_id provided to a secp256k1_recover is invalid")]
InvalidRecoveryId,
#[error("The signature provided to a secp256k1_recover is invalid")]
InvalidSignature,
}
impl From<u64> for Secp256k1RecoverError {
fn from(v: u64) -> Secp256k1RecoverError {
match v {
1 => Secp256k1RecoverError::InvalidHash,
2 => Secp256k1RecoverError::InvalidRecoveryId,
3 => Secp256k1RecoverError::InvalidSignature,
_ => panic!("Unsupported Secp256k1RecoverError"),
}
}
}
impl From<Secp256k1RecoverError> for u64 {
fn from(v: Secp256k1RecoverError) -> u64 {
match v {
Secp256k1RecoverError::InvalidHash => 1,
Secp256k1RecoverError::InvalidRecoveryId => 2,
Secp256k1RecoverError::InvalidSignature => 3,
}
}
}
pub const SECP256K1_SIGNATURE_LENGTH: usize = 64;
pub const SECP256K1_PUBLIC_KEY_LENGTH: usize = 64;
#[repr(transparent)]
#[derive(
BorshSerialize,
BorshDeserialize,
BorshSchema,
Clone,
Copy,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
AbiExample,
)]
pub struct Secp256k1Pubkey(pub [u8; SECP256K1_PUBLIC_KEY_LENGTH]);
impl Secp256k1Pubkey {
pub fn new(pubkey_vec: &[u8]) -> Self {
Self(
<[u8; SECP256K1_PUBLIC_KEY_LENGTH]>::try_from(<&[u8]>::clone(&pubkey_vec))
.expect("Slice must be the same length as a Pubkey"),
)
}
pub fn to_bytes(self) -> [u8; 64] {
self.0
}
}
/// Recover the public key from a [secp256k1] ECDSA signature and
/// cryptographically-hashed message.
///
/// [secp256k1]: https://en.bitcoin.it/wiki/Secp256k1
///
/// This function is specifically intended for efficiently implementing
/// Ethereum's [`ecrecover`] builtin contract, for use by Ethereum integrators.
/// It may be useful for other purposes.
///
/// [`ecrecover`]: https://docs.soliditylang.org/en/v0.8.14/units-and-global-variables.html?highlight=ecrecover#mathematical-and-cryptographic-functions
///
/// `hash` is the 32-byte cryptographic hash (typically [`keccak`]) of an
/// arbitrary message, signed by some public key.
///
/// The recovery ID is a value in the range [0, 3] that is generated during
/// signing, and allows the recovery process to be more efficent. Note that the
/// `recovery_id` here does not directly correspond to an Ethereum recovery ID
/// as used in `ecrecover`. This function accepts recovery IDs in the range of
/// [0, 3], while Ethereum's recovery IDs have a value of 27 or 28. To convert
/// an Ethereum recovery ID to a value this function will accept subtract 27
/// from it, checking for underflow. In practice this function will not succeed
/// if given a recovery ID of 2 or 3, as these values represent an
/// "overflowing" signature, and this function returns an error when parsing
/// overflowing signatures.
///
/// [`keccak`]: crate::keccak
/// [`wrapping_sub`]: https://doc.rust-lang.org/std/primitive.u8.html#method.wrapping_sub
///
/// On success this function returns a [`Secp256k1Pubkey`], a wrapper around a
/// 64-byte secp256k1 public key. This public key corresponds to the secret key
/// that previously signed the message `hash` to produce the provided
/// `signature`.
///
/// While `secp256k1_recover` can be used to verify secp256k1 signatures by
/// comparing the recovered key against an expected key, Solana also provides
/// the [secp256k1 program][sp], which is more flexible, has lower CPU cost, and
/// can validate many signatures at once.
///
/// [sp]: crate::secp256k1_program
///
/// The `secp256k1_recover` syscall is implemented with the [`libsecp256k1`]
/// crate, which clients may also want to use.
///
/// [`libsecp256k1`]: https://docs.rs/libsecp256k1/latest/libsecp256k1
///
/// # Hashing messages
///
/// In ECDSA signing and key recovery the signed "message" is always a
/// crytographic hash, not the original message itself. If not a cryptographic
/// hash, then an adversary can craft signatures that recover to arbitrary
/// public keys. This means the caller of this function generally must hash the
/// original message themselves and not rely on another party to provide the
/// hash.
///
/// Ethereum uses the [`keccak`] hash.
///
/// # Signature malleability
///
/// With the ECDSA signature algorithm it is possible for any party, given a
/// valid signature of some message, to create a second signature that is
/// equally valid. This is known as _signature malleability_. In many cases this
/// is not a concern, but in cases where applications rely on signatures to have
/// a unique representation this can be the source of bugs, potentially with
/// security implications.
///
/// **The solana `secp256k1_recover` function does not prevent signature
/// malleability**. This is in contrast to the Bitcoin secp256k1 library, which
/// does prevent malleability by default. Solana accepts signatures with `S`
/// values that are either in the _high order_ or in the _low order_, and it
/// is trivial to produce one from the other.
///
/// To prevent signature malleability, it is common for secp256k1 signature
/// validators to only accept signatures with low-order `S` values, and reject
/// signatures with high-order `S` values. The following code will accomplish
/// this:
///
/// ```rust
/// # use solana_program::program_error::ProgramError;
/// # let signature_bytes = [
/// # 0x83, 0x55, 0x81, 0xDF, 0xB1, 0x02, 0xA7, 0xD2,
/// # 0x2D, 0x33, 0xA4, 0x07, 0xDD, 0x7E, 0xFA, 0x9A,
/// # 0xE8, 0x5F, 0x42, 0x6B, 0x2A, 0x05, 0xBB, 0xFB,
/// # 0xA1, 0xAE, 0x93, 0x84, 0x46, 0x48, 0xE3, 0x35,
/// # 0x74, 0xE1, 0x6D, 0xB4, 0xD0, 0x2D, 0xB2, 0x0B,
/// # 0x3C, 0x89, 0x8D, 0x0A, 0x44, 0xDF, 0x73, 0x9C,
/// # 0x1E, 0xBF, 0x06, 0x8E, 0x8A, 0x9F, 0xA9, 0xC3,
/// # 0xA5, 0xEA, 0x21, 0xAC, 0xED, 0x5B, 0x22, 0x13,
/// # ];
/// let signature = libsecp256k1::Signature::parse_standard_slice(&signature_bytes)
/// .map_err(|_| ProgramError::InvalidArgument)?;
///
/// if signature.s.is_high() {
/// return Err(ProgramError::InvalidArgument);
/// }
/// # Ok::<_, ProgramError>(())
/// ```
///
/// This has the downside that the program must link to the [`libsecp256k1`]
/// crate and parse the signature just for this check. Note that `libsecp256k1`
/// version 0.7.0 or greater is required for running on the Solana SBF target.
///
/// [`libsecp256k1`]: https://docs.rs/libsecp256k1/latest/libsecp256k1
///
/// For the most accurate description of signature malleability, and its
/// prevention in secp256k1, refer to comments in [`secp256k1.h`] in the Bitcoin
/// Core secp256k1 library, the documentation of the [OpenZeppelin `recover`
/// method for Solidity][ozr], and [this description of the problem on
/// StackExchange][sxr].
///
/// [`secp256k1.h`]: https://github.com/bitcoin-core/secp256k1/blob/44c2452fd387f7ca604ab42d73746e7d3a44d8a2/include/secp256k1.h
/// [ozr]: https://docs.openzeppelin.com/contracts/2.x/api/cryptography#ECDSA-recover-bytes32-bytes-
/// [sxr]: https://bitcoin.stackexchange.com/questions/81115/if-someone-wanted-to-pretend-to-be-satoshi-by-posting-a-fake-signature-to-defrau/81116#81116
///
/// # Errors
///
/// If `hash` is not 32 bytes in length this function returns
/// [`Secp256k1RecoverError::InvalidHash`], though see notes
/// on SBF-specific behavior below.
///
/// If `recovery_id` is not in the range [0, 3] this function returns
/// [`Secp256k1RecoverError::InvalidRecoveryId`].
///
/// If `signature` is not 64 bytes in length this function returns
/// [`Secp256k1RecoverError::InvalidSignature`], though see notes
/// on SBF-specific behavior below.
///
/// If `signature` represents an "overflowing" signature this function returns
/// [`Secp256k1RecoverError::InvalidSignature`]. Overflowing signatures are
/// non-standard and should not be encountered in practice.
///
/// If `signature` is otherwise invalid this function returns
/// [`Secp256k1RecoverError::InvalidSignature`].
///
/// # SBF-specific behavior
///
/// When calling this function on-chain the caller must verify the correct
/// lengths of `hash` and `signature` beforehand.
///
/// When run on-chain this function will not directly validate the lengths of
/// `hash` and `signature`. It will assume they are the the correct lengths and
/// pass their pointers to the runtime, which will interpret them as 32-byte and
/// 64-byte buffers. If the provided slices are too short, the runtime will read
/// invalid data and attempt to interpret it, most likely returning an error,
/// though in some scenarios it may be possible to incorrectly return
/// successfully, or the transaction will abort if the syscall reads data
/// outside of the program's memory space. If the provided slices are too long
/// then they may be used to "smuggle" uninterpreted data.
///
/// # Examples
///
/// This example demonstrates recovering a public key and using it to very a
/// signature with the `secp256k1_recover` syscall. It has three parts: a Solana
/// program, an RPC client to call the program, and common definitions shared
/// between the two.
///
/// Common definitions:
///
/// ```
/// use borsh::{BorshDeserialize, BorshSerialize};
///
/// #[derive(BorshSerialize, BorshDeserialize, Debug)]
/// pub struct DemoSecp256k1RecoverInstruction {
/// pub message: Vec<u8>,
/// pub signature: [u8; 64],
/// pub recovery_id: u8,
/// }
/// ```
///
/// The Solana program. Note that it uses `libsecp256k1` version 0.7.0 to parse
/// the secp256k1 signature to prevent malleability.
///
/// ```no_run
/// use solana_program::{
/// entrypoint::ProgramResult,
/// keccak, msg,
/// program_error::ProgramError,
/// secp256k1_recover::secp256k1_recover,
/// };
///
/// /// The key we expect to sign secp256k1 messages,
/// /// as serialized by `libsecp256k1::PublicKey::serialize`.
/// const AUTHORIZED_PUBLIC_KEY: [u8; 64] = [
/// 0x8C, 0xD6, 0x47, 0xF8, 0xA5, 0xBF, 0x59, 0xA0, 0x4F, 0x77, 0xFA, 0xFA, 0x6C, 0xA0, 0xE6, 0x4D,
/// 0x94, 0x5B, 0x46, 0x55, 0xA6, 0x2B, 0xB0, 0x6F, 0x10, 0x4C, 0x9E, 0x2C, 0x6F, 0x42, 0x0A, 0xBE,
/// 0x18, 0xDF, 0x0B, 0xF0, 0x87, 0x42, 0xBA, 0x88, 0xB4, 0xCF, 0x87, 0x5A, 0x35, 0x27, 0xBE, 0x0F,
/// 0x45, 0xAE, 0xFC, 0x66, 0x9C, 0x2C, 0x6B, 0xF3, 0xEF, 0xCA, 0x5C, 0x32, 0x11, 0xF7, 0x2A, 0xC7,
/// ];
/// # pub struct DemoSecp256k1RecoverInstruction {
/// # pub message: Vec<u8>,
/// # pub signature: [u8; 64],
/// # pub recovery_id: u8,
/// # }
///
/// pub fn process_secp256k1_recover(
/// instruction: DemoSecp256k1RecoverInstruction,
/// ) -> ProgramResult {
/// // The secp256k1 recovery operation accepts a cryptographically-hashed
/// // message only. Passing it anything else is insecure and allows signatures
/// // to be forged.
/// //
/// // This means that the code calling `secp256k1_recover` must perform the hash
/// // itself, and not assume that data passed to it has been properly hashed.
/// let message_hash = {
/// let mut hasher = keccak::Hasher::default();
/// hasher.hash(&instruction.message);
/// hasher.result()
/// };
///
/// // Reject high-s value signatures to prevent malleability.
/// // Solana does not do this itself.
/// // This may or may not be necessary depending on use case.
/// {
/// let signature = libsecp256k1::Signature::parse_standard_slice(&instruction.signature)
/// .map_err(|_| ProgramError::InvalidArgument)?;
///
/// if signature.s.is_high() {
/// msg!("signature with high-s value");
/// return Err(ProgramError::InvalidArgument);
/// }
/// }
///
/// let recovered_pubkey = secp256k1_recover(
/// &message_hash.0,
/// instruction.recovery_id,
/// &instruction.signature,
/// )
/// .map_err(|_| ProgramError::InvalidArgument)?;
///
/// // If we're using this function for signature verification then we
/// // need to check the pubkey is an expected value.
/// // Here we are checking the secp256k1 pubkey against a known authorized pubkey.
/// if recovered_pubkey.0!= AUTHORIZED_PUBLIC_KEY {
/// return Err(ProgramError::InvalidArgument);
/// }
///
/// Ok(())
/// }
/// ```
///
/// The RPC client program:
///
/// ```no_run
/// # use solana_program::example_mocks::solana_rpc_client;
/// # use solana_program::example_mocks::solana_sdk;
/// use anyhow::Result;
/// use solana_rpc_client::rpc_client::RpcClient;
/// use solana_sdk::{
/// instruction::Instruction,
/// keccak,
/// pubkey::Pubkey,
/// signature::{Keypair, Signer},
/// transaction::Transaction,
/// };
/// # use borsh::{BorshDeserialize, BorshSerialize};
/// # #[derive(BorshSerialize, BorshDeserialize, Debug)]
/// # pub struct DemoSecp256k1RecoverInstruction {
/// # pub message: Vec<u8>,
/// # pub signature: [u8; 64],
/// # pub recovery_id: u8,
/// # }
///
/// pub fn demo_secp256k1_recover(
/// payer_keypair: &Keypair,
/// secp256k1_secret_key: &libsecp256k1::SecretKey,
/// client: &RpcClient,
/// program_keypair: &Keypair,
/// ) -> Result<()> {
/// let message = b"hello world";
/// let message_hash = {
/// let mut hasher = keccak::Hasher::default();
/// hasher.hash(message);
/// hasher.result()
/// };
///
/// let secp_message = libsecp256k1::Message::parse(&message_hash.0);
/// let (signature, recovery_id) = libsecp256k1::sign(&secp_message, &secp256k1_secret_key);
///
/// let signature = signature.serialize();
///
/// let instr = DemoSecp256k1RecoverInstruction {
/// message: message.to_vec(),
/// signature,
/// recovery_id: recovery_id.serialize(),
/// };
/// let instr = Instruction::new_with_borsh(
/// program_keypair.pubkey(),
/// &instr,
/// vec![],
/// );
///
/// let blockhash = client.get_latest_blockhash()?;
/// let tx = Transaction::new_signed_with_payer(
/// &[instr],
/// Some(&payer_keypair.pubkey()),
/// &[payer_keypair],
/// blockhash,
/// );
///
/// client.send_and_confirm_transaction(&tx)?;
///
/// Ok(())
/// }
/// ```
pub fn | (
hash: &[u8],
recovery_id: u8,
signature: &[u8],
) -> Result<Secp256k1Pubkey, Secp256k1RecoverError> {
#[cfg(target_os = "solana")]
{
let mut pubkey_buffer = [0u8; SECP256K1_PUBLIC_KEY_LENGTH];
let result = unsafe {
crate::syscalls::sol_secp256k1_recover(
hash.as_ptr(),
recovery_id as u64,
signature.as_ptr(),
pubkey_buffer.as_mut_ptr(),
)
};
match result {
0 => Ok(Secp256k1Pubkey::new(&pubkey_buffer)),
error => Err(Secp256k1RecoverError::from(error)),
}
}
#[cfg(not(target_os = "solana"))]
{
let message = libsecp256k1::Message::parse_slice(hash)
.map_err(|_| Secp256k1RecoverError::InvalidHash)?;
let recovery_id = libsecp256k1::RecoveryId::parse(recovery_id)
.map_err(|_| Secp256k1RecoverError::InvalidRecoveryId)?;
let signature = libsecp256k1::Signature::parse_standard_slice(signature)
.map_err(|_| Secp256k1RecoverError::InvalidSignature)?;
let secp256k1_key = libsecp256k1::recover(&message, &signature, &recovery_id)
.map_err(|_| Secp256k1RecoverError::InvalidSignature)?;
Ok(Secp256k1Pubkey::new(&secp256k1_key.serialize()[1..65]))
}
}
| secp256k1_recover | identifier_name |
secp256k1_recover.rs | //! Public key recovery from [secp256k1] ECDSA signatures.
//!
//! [secp256k1]: https://en.bitcoin.it/wiki/Secp256k1
//!
//! _This module provides low-level cryptographic building blocks that must be
//! used carefully to ensure proper security. Read this documentation and
//! accompanying links thoroughly._
//!
//! The [`secp256k1_recover`] syscall allows a secp256k1 public key that has
//! previously signed a message to be recovered from the combination of the
//! message, the signature, and a recovery ID. The recovery ID is generated
//! during signing.
//!
//! Use cases for `secp256k1_recover` include:
//!
//! - Implementing the Ethereum [`ecrecover`] builtin contract.
//! - Performing secp256k1 public key recovery generally.
//! - Verifying a single secp256k1 signature.
//!
//! While `secp256k1_recover` can be used to verify secp256k1 signatures, Solana
//! also provides the [secp256k1 program][sp], which is more flexible, has lower CPU
//! cost, and can validate many signatures at once.
//!
//! [sp]: crate::secp256k1_program
//! [`ecrecover`]: https://docs.soliditylang.org/en/v0.8.14/units-and-global-variables.html?highlight=ecrecover#mathematical-and-cryptographic-functions
use {
borsh::{BorshDeserialize, BorshSchema, BorshSerialize},
core::convert::TryFrom,
thiserror::Error,
};
#[derive(Debug, Clone, PartialEq, Eq, Error)]
pub enum Secp256k1RecoverError {
#[error("The hash provided to a secp256k1_recover is invalid")]
InvalidHash,
#[error("The recovery_id provided to a secp256k1_recover is invalid")]
InvalidRecoveryId,
#[error("The signature provided to a secp256k1_recover is invalid")]
InvalidSignature,
}
impl From<u64> for Secp256k1RecoverError {
fn from(v: u64) -> Secp256k1RecoverError {
match v {
1 => Secp256k1RecoverError::InvalidHash,
2 => Secp256k1RecoverError::InvalidRecoveryId,
3 => Secp256k1RecoverError::InvalidSignature,
_ => panic!("Unsupported Secp256k1RecoverError"),
}
}
}
impl From<Secp256k1RecoverError> for u64 {
fn from(v: Secp256k1RecoverError) -> u64 {
match v {
Secp256k1RecoverError::InvalidHash => 1,
Secp256k1RecoverError::InvalidRecoveryId => 2,
Secp256k1RecoverError::InvalidSignature => 3,
}
}
}
pub const SECP256K1_SIGNATURE_LENGTH: usize = 64;
pub const SECP256K1_PUBLIC_KEY_LENGTH: usize = 64;
#[repr(transparent)]
#[derive(
BorshSerialize,
BorshDeserialize,
BorshSchema,
Clone,
Copy,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
AbiExample,
)]
pub struct Secp256k1Pubkey(pub [u8; SECP256K1_PUBLIC_KEY_LENGTH]);
impl Secp256k1Pubkey {
pub fn new(pubkey_vec: &[u8]) -> Self |
pub fn to_bytes(self) -> [u8; 64] {
self.0
}
}
/// Recover the public key from a [secp256k1] ECDSA signature and
/// cryptographically-hashed message.
///
/// [secp256k1]: https://en.bitcoin.it/wiki/Secp256k1
///
/// This function is specifically intended for efficiently implementing
/// Ethereum's [`ecrecover`] builtin contract, for use by Ethereum integrators.
/// It may be useful for other purposes.
///
/// [`ecrecover`]: https://docs.soliditylang.org/en/v0.8.14/units-and-global-variables.html?highlight=ecrecover#mathematical-and-cryptographic-functions
///
/// `hash` is the 32-byte cryptographic hash (typically [`keccak`]) of an
/// arbitrary message, signed by some public key.
///
/// The recovery ID is a value in the range [0, 3] that is generated during
/// signing, and allows the recovery process to be more efficent. Note that the
/// `recovery_id` here does not directly correspond to an Ethereum recovery ID
/// as used in `ecrecover`. This function accepts recovery IDs in the range of
/// [0, 3], while Ethereum's recovery IDs have a value of 27 or 28. To convert
/// an Ethereum recovery ID to a value this function will accept subtract 27
/// from it, checking for underflow. In practice this function will not succeed
/// if given a recovery ID of 2 or 3, as these values represent an
/// "overflowing" signature, and this function returns an error when parsing
/// overflowing signatures.
///
/// [`keccak`]: crate::keccak
/// [`wrapping_sub`]: https://doc.rust-lang.org/std/primitive.u8.html#method.wrapping_sub
///
/// On success this function returns a [`Secp256k1Pubkey`], a wrapper around a
/// 64-byte secp256k1 public key. This public key corresponds to the secret key
/// that previously signed the message `hash` to produce the provided
/// `signature`.
///
/// While `secp256k1_recover` can be used to verify secp256k1 signatures by
/// comparing the recovered key against an expected key, Solana also provides
/// the [secp256k1 program][sp], which is more flexible, has lower CPU cost, and
/// can validate many signatures at once.
///
/// [sp]: crate::secp256k1_program
///
/// The `secp256k1_recover` syscall is implemented with the [`libsecp256k1`]
/// crate, which clients may also want to use.
///
/// [`libsecp256k1`]: https://docs.rs/libsecp256k1/latest/libsecp256k1
///
/// # Hashing messages
///
/// In ECDSA signing and key recovery the signed "message" is always a
/// crytographic hash, not the original message itself. If not a cryptographic
/// hash, then an adversary can craft signatures that recover to arbitrary
/// public keys. This means the caller of this function generally must hash the
/// original message themselves and not rely on another party to provide the
/// hash.
///
/// Ethereum uses the [`keccak`] hash.
///
/// # Signature malleability
///
/// With the ECDSA signature algorithm it is possible for any party, given a
/// valid signature of some message, to create a second signature that is
/// equally valid. This is known as _signature malleability_. In many cases this
/// is not a concern, but in cases where applications rely on signatures to have
/// a unique representation this can be the source of bugs, potentially with
/// security implications.
///
/// **The solana `secp256k1_recover` function does not prevent signature
/// malleability**. This is in contrast to the Bitcoin secp256k1 library, which
/// does prevent malleability by default. Solana accepts signatures with `S`
/// values that are either in the _high order_ or in the _low order_, and it
/// is trivial to produce one from the other.
///
/// To prevent signature malleability, it is common for secp256k1 signature
/// validators to only accept signatures with low-order `S` values, and reject
/// signatures with high-order `S` values. The following code will accomplish
/// this:
///
/// ```rust
/// # use solana_program::program_error::ProgramError;
/// # let signature_bytes = [
/// # 0x83, 0x55, 0x81, 0xDF, 0xB1, 0x02, 0xA7, 0xD2,
/// # 0x2D, 0x33, 0xA4, 0x07, 0xDD, 0x7E, 0xFA, 0x9A,
/// # 0xE8, 0x5F, 0x42, 0x6B, 0x2A, 0x05, 0xBB, 0xFB,
/// # 0xA1, 0xAE, 0x93, 0x84, 0x46, 0x48, 0xE3, 0x35,
/// # 0x74, 0xE1, 0x6D, 0xB4, 0xD0, 0x2D, 0xB2, 0x0B,
/// # 0x3C, 0x89, 0x8D, 0x0A, 0x44, 0xDF, 0x73, 0x9C,
/// # 0x1E, 0xBF, 0x06, 0x8E, 0x8A, 0x9F, 0xA9, 0xC3,
/// # 0xA5, 0xEA, 0x21, 0xAC, 0xED, 0x5B, 0x22, 0x13,
/// # ];
/// let signature = libsecp256k1::Signature::parse_standard_slice(&signature_bytes)
/// .map_err(|_| ProgramError::InvalidArgument)?;
///
/// if signature.s.is_high() {
/// return Err(ProgramError::InvalidArgument);
/// }
/// # Ok::<_, ProgramError>(())
/// ```
///
/// This has the downside that the program must link to the [`libsecp256k1`]
/// crate and parse the signature just for this check. Note that `libsecp256k1`
/// version 0.7.0 or greater is required for running on the Solana SBF target.
///
/// [`libsecp256k1`]: https://docs.rs/libsecp256k1/latest/libsecp256k1
///
/// For the most accurate description of signature malleability, and its
/// prevention in secp256k1, refer to comments in [`secp256k1.h`] in the Bitcoin
/// Core secp256k1 library, the documentation of the [OpenZeppelin `recover`
/// method for Solidity][ozr], and [this description of the problem on
/// StackExchange][sxr].
///
/// [`secp256k1.h`]: https://github.com/bitcoin-core/secp256k1/blob/44c2452fd387f7ca604ab42d73746e7d3a44d8a2/include/secp256k1.h
/// [ozr]: https://docs.openzeppelin.com/contracts/2.x/api/cryptography#ECDSA-recover-bytes32-bytes-
/// [sxr]: https://bitcoin.stackexchange.com/questions/81115/if-someone-wanted-to-pretend-to-be-satoshi-by-posting-a-fake-signature-to-defrau/81116#81116
///
/// # Errors
///
/// If `hash` is not 32 bytes in length this function returns
/// [`Secp256k1RecoverError::InvalidHash`], though see notes
/// on SBF-specific behavior below.
///
/// If `recovery_id` is not in the range [0, 3] this function returns
/// [`Secp256k1RecoverError::InvalidRecoveryId`].
///
/// If `signature` is not 64 bytes in length this function returns
/// [`Secp256k1RecoverError::InvalidSignature`], though see notes
/// on SBF-specific behavior below.
///
/// If `signature` represents an "overflowing" signature this function returns
/// [`Secp256k1RecoverError::InvalidSignature`]. Overflowing signatures are
/// non-standard and should not be encountered in practice.
///
/// If `signature` is otherwise invalid this function returns
/// [`Secp256k1RecoverError::InvalidSignature`].
///
/// # SBF-specific behavior
///
/// When calling this function on-chain the caller must verify the correct
/// lengths of `hash` and `signature` beforehand.
///
/// When run on-chain this function will not directly validate the lengths of
/// `hash` and `signature`. It will assume they are the the correct lengths and
/// pass their pointers to the runtime, which will interpret them as 32-byte and
/// 64-byte buffers. If the provided slices are too short, the runtime will read
/// invalid data and attempt to interpret it, most likely returning an error,
/// though in some scenarios it may be possible to incorrectly return
/// successfully, or the transaction will abort if the syscall reads data
/// outside of the program's memory space. If the provided slices are too long
/// then they may be used to "smuggle" uninterpreted data.
///
/// # Examples
///
/// This example demonstrates recovering a public key and using it to very a
/// signature with the `secp256k1_recover` syscall. It has three parts: a Solana
/// program, an RPC client to call the program, and common definitions shared
/// between the two.
///
/// Common definitions:
///
/// ```
/// use borsh::{BorshDeserialize, BorshSerialize};
///
/// #[derive(BorshSerialize, BorshDeserialize, Debug)]
/// pub struct DemoSecp256k1RecoverInstruction {
/// pub message: Vec<u8>,
/// pub signature: [u8; 64],
/// pub recovery_id: u8,
/// }
/// ```
///
/// The Solana program. Note that it uses `libsecp256k1` version 0.7.0 to parse
/// the secp256k1 signature to prevent malleability.
///
/// ```no_run
/// use solana_program::{
/// entrypoint::ProgramResult,
/// keccak, msg,
/// program_error::ProgramError,
/// secp256k1_recover::secp256k1_recover,
/// };
///
/// /// The key we expect to sign secp256k1 messages,
/// /// as serialized by `libsecp256k1::PublicKey::serialize`.
/// const AUTHORIZED_PUBLIC_KEY: [u8; 64] = [
/// 0x8C, 0xD6, 0x47, 0xF8, 0xA5, 0xBF, 0x59, 0xA0, 0x4F, 0x77, 0xFA, 0xFA, 0x6C, 0xA0, 0xE6, 0x4D,
/// 0x94, 0x5B, 0x46, 0x55, 0xA6, 0x2B, 0xB0, 0x6F, 0x10, 0x4C, 0x9E, 0x2C, 0x6F, 0x42, 0x0A, 0xBE,
/// 0x18, 0xDF, 0x0B, 0xF0, 0x87, 0x42, 0xBA, 0x88, 0xB4, 0xCF, 0x87, 0x5A, 0x35, 0x27, 0xBE, 0x0F,
/// 0x45, 0xAE, 0xFC, 0x66, 0x9C, 0x2C, 0x6B, 0xF3, 0xEF, 0xCA, 0x5C, 0x32, 0x11, 0xF7, 0x2A, 0xC7,
/// ];
/// # pub struct DemoSecp256k1RecoverInstruction {
/// # pub message: Vec<u8>,
/// # pub signature: [u8; 64],
/// # pub recovery_id: u8,
/// # }
///
/// pub fn process_secp256k1_recover(
/// instruction: DemoSecp256k1RecoverInstruction,
/// ) -> ProgramResult {
/// // The secp256k1 recovery operation accepts a cryptographically-hashed
/// // message only. Passing it anything else is insecure and allows signatures
/// // to be forged.
/// //
/// // This means that the code calling `secp256k1_recover` must perform the hash
/// // itself, and not assume that data passed to it has been properly hashed.
/// let message_hash = {
/// let mut hasher = keccak::Hasher::default();
/// hasher.hash(&instruction.message);
/// hasher.result()
/// };
///
/// // Reject high-s value signatures to prevent malleability.
/// // Solana does not do this itself.
/// // This may or may not be necessary depending on use case.
/// {
/// let signature = libsecp256k1::Signature::parse_standard_slice(&instruction.signature)
/// .map_err(|_| ProgramError::InvalidArgument)?;
///
/// if signature.s.is_high() {
/// msg!("signature with high-s value");
/// return Err(ProgramError::InvalidArgument);
/// }
/// }
///
/// let recovered_pubkey = secp256k1_recover(
/// &message_hash.0,
/// instruction.recovery_id,
/// &instruction.signature,
/// )
/// .map_err(|_| ProgramError::InvalidArgument)?;
///
/// // If we're using this function for signature verification then we
/// // need to check the pubkey is an expected value.
/// // Here we are checking the secp256k1 pubkey against a known authorized pubkey.
/// if recovered_pubkey.0!= AUTHORIZED_PUBLIC_KEY {
/// return Err(ProgramError::InvalidArgument);
/// }
///
/// Ok(())
/// }
/// ```
///
/// The RPC client program:
///
/// ```no_run
/// # use solana_program::example_mocks::solana_rpc_client;
/// # use solana_program::example_mocks::solana_sdk;
/// use anyhow::Result;
/// use solana_rpc_client::rpc_client::RpcClient;
/// use solana_sdk::{
/// instruction::Instruction,
/// keccak,
/// pubkey::Pubkey,
/// signature::{Keypair, Signer},
/// transaction::Transaction,
/// };
/// # use borsh::{BorshDeserialize, BorshSerialize};
/// # #[derive(BorshSerialize, BorshDeserialize, Debug)]
/// # pub struct DemoSecp256k1RecoverInstruction {
/// # pub message: Vec<u8>,
/// # pub signature: [u8; 64],
/// # pub recovery_id: u8,
/// # }
///
/// pub fn demo_secp256k1_recover(
/// payer_keypair: &Keypair,
/// secp256k1_secret_key: &libsecp256k1::SecretKey,
/// client: &RpcClient,
/// program_keypair: &Keypair,
/// ) -> Result<()> {
/// let message = b"hello world";
/// let message_hash = {
/// let mut hasher = keccak::Hasher::default();
/// hasher.hash(message);
/// hasher.result()
/// };
///
/// let secp_message = libsecp256k1::Message::parse(&message_hash.0);
/// let (signature, recovery_id) = libsecp256k1::sign(&secp_message, &secp256k1_secret_key);
///
/// let signature = signature.serialize();
///
/// let instr = DemoSecp256k1RecoverInstruction {
/// message: message.to_vec(),
/// signature,
/// recovery_id: recovery_id.serialize(),
/// };
/// let instr = Instruction::new_with_borsh(
/// program_keypair.pubkey(),
/// &instr,
/// vec![],
/// );
///
/// let blockhash = client.get_latest_blockhash()?;
/// let tx = Transaction::new_signed_with_payer(
/// &[instr],
/// Some(&payer_keypair.pubkey()),
/// &[payer_keypair],
/// blockhash,
/// );
///
/// client.send_and_confirm_transaction(&tx)?;
///
/// Ok(())
/// }
/// ```
pub fn secp256k1_recover(
hash: &[u8],
recovery_id: u8,
signature: &[u8],
) -> Result<Secp256k1Pubkey, Secp256k1RecoverError> {
#[cfg(target_os = "solana")]
{
let mut pubkey_buffer = [0u8; SECP256K1_PUBLIC_KEY_LENGTH];
let result = unsafe {
crate::syscalls::sol_secp256k1_recover(
hash.as_ptr(),
recovery_id as u64,
signature.as_ptr(),
pubkey_buffer.as_mut_ptr(),
)
};
match result {
0 => Ok(Secp256k1Pubkey::new(&pubkey_buffer)),
error => Err(Secp256k1RecoverError::from(error)),
}
}
#[cfg(not(target_os = "solana"))]
{
let message = libsecp256k1::Message::parse_slice(hash)
.map_err(|_| Secp256k1RecoverError::InvalidHash)?;
let recovery_id = libsecp256k1::RecoveryId::parse(recovery_id)
.map_err(|_| Secp256k1RecoverError::InvalidRecoveryId)?;
let signature = libsecp256k1::Signature::parse_standard_slice(signature)
.map_err(|_| Secp256k1RecoverError::InvalidSignature)?;
let secp256k1_key = libsecp256k1::recover(&message, &signature, &recovery_id)
.map_err(|_| Secp256k1RecoverError::InvalidSignature)?;
Ok(Secp256k1Pubkey::new(&secp256k1_key.serialize()[1..65]))
}
}
| {
Self(
<[u8; SECP256K1_PUBLIC_KEY_LENGTH]>::try_from(<&[u8]>::clone(&pubkey_vec))
.expect("Slice must be the same length as a Pubkey"),
)
} | identifier_body |
buffer.rs | //! Buffer implementation like Bytes / BytesMut.
//!
//! It is simpler and contains less unsafe code.
use std::default::Default;
use std::fmt;
use std::io::{self, Read, Write};
use std::marker::Unpin;
use std::mem;
use std::ops::{Deref, DerefMut};
use std::pin::Pin;
use std::slice;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, ReadBuf};
/// A buffer structure, like Bytes/BytesMut.
///
/// It is not much more than a wrapper around Vec.
pub struct Buffer {
start_offset: usize,
rd_pos: usize,
data: Vec<u8>,
}
impl Buffer {
/// Create new Buffer.
pub fn new() -> Buffer {
Buffer {
start_offset: 0,
rd_pos: 0,
data: Vec::new(),
}
}
/// Create new Buffer.
pub fn with_capacity(cap: usize) -> Buffer {
Buffer {
start_offset: 0,
rd_pos: 0,
data: Vec::with_capacity(Self::round_size_up(cap)),
}
}
/// Clear this buffer.
pub fn clear(&mut self) {
self.start_offset = 0;
self.rd_pos = 0;
self.data.truncate(0);
}
/// Truncate this buffer.
pub fn truncate(&mut self, size: usize) {
if size == 0 {
self.clear();
return;
}
if size > self.len() {
panic!("Buffer::truncate(size): size > self.len()");
}
if self.rd_pos > size {
self.rd_pos = size;
}
self.data.truncate(size + self.start_offset);
}
pub fn bytes(&self) -> &[u8] {
if self.rd_pos >= self.len() {
return &[][..];
}
&self.data[self.start_offset + self.rd_pos..]
}
/// Split this Buffer in two parts.
///
/// The first part remains in this buffer. The second part is
/// returned as a new Buffer.
pub fn split_off(&mut self, at: usize) -> Buffer {
if at > self.len() {
panic!("Buffer:split_off(size): size > self.len()");
}
if self.rd_pos > at {
self.rd_pos = at;
}
// If "header" < 32K and "body" >= 32K, use a start_offset
// for "body" and copy "header".
if self.start_offset == 0 && at < 32000 && self.len() - at >= 32000 {
let mut bnew = Buffer::with_capacity(at);
mem::swap(self, &mut bnew);
self.extend_from_slice(&bnew[0..at]);
bnew.start_offset = at;
return bnew;
}
let mut bnew = Buffer::new();
let bytes = self.bytes();
bnew.extend_from_slice(&bytes[at..]);
self.truncate(at);
bnew
}
/// Add data to this buffer.
#[inline]
pub fn extend_from_slice(&mut self, extend: &[u8]) {
self.reserve(extend.len());
self.data.extend_from_slice(extend);
}
#[inline]
fn round_size_up(size: usize) -> usize {
if size < 128 {
128
} else if size < 4096 {
4096
} else if size < 65536 {
65536
} else if size < 2097152 {
size.next_power_of_two()
} else {
(1 + size / 1048576) * 1048576
}
}
/// Make sure at least `size` bytes are available.
#[inline]
pub fn reserve(&mut self, size: usize) {
let end = self.data.len() + size;
if end < self.data.capacity() {
return;
}
self.data.reserve_exact(Self::round_size_up(end) - self.data.len());
}
/// total length of all data in this Buffer.
#[inline]
pub fn len(&self) -> usize {
self.data.len() - self.start_offset
}
/// Split this Buffer in two parts.
///
/// The second part remains in this buffer. The first part is
/// returned to the caller.
pub fn split_to(&mut self, size: usize) -> Buffer {
let mut other = self.split_off(size);
mem::swap(self, &mut other);
other
}
/// Write all data in this `Buffer` to a file.
pub fn write_all(&mut self, mut file: impl Write) -> io::Result<()> {
while self.rd_pos < self.len() {
let bytes = self.bytes();
let size = bytes.len();
file.write_all(bytes)?;
self.rd_pos += size;
}
Ok(())
}
/// Add text data to this buffer.
#[inline]
pub fn push_str(&mut self, s: &str) |
/// Add a string to the buffer.
#[inline]
pub fn put_str(&mut self, s: impl AsRef<str>) {
self.extend_from_slice(s.as_ref().as_bytes());
}
/// Return a reference to this Buffer as an UTF-8 string.
#[inline]
pub fn as_utf8_str(&self) -> Result<&str, std::str::Utf8Error> {
std::str::from_utf8(self.bytes())
}
/// Convert this buffer into a Vec<u8>.
pub fn into_bytes(self) -> Vec<u8> {
if self.start_offset > 0 {
let mut v = Vec::with_capacity(Self::round_size_up(self.len()));
v.extend_from_slice(self.bytes());
v
} else {
self.data
}
}
//
// ===== Begin unsafe code =====
//
/// Read an exact number of bytes.
pub fn read_exact(&mut self, reader: &mut std::fs::File, len: usize) -> io::Result<()> {
self.reserve(len);
// Safety: it is safe for a std::fs::File to read into uninitialized memory.
unsafe {
let buf = self.spare_capacity_mut();
reader.read_exact(&mut buf[..len])?;
self.advance_mut(len);
}
Ok(())
}
unsafe fn spare_capacity_mut<T>(&mut self) -> &mut [T] {
let len = self.data.len();
let spare = self.data.capacity() - len;
let ptr = self.data.as_mut_ptr().add(len) as *mut T;
&mut slice::from_raw_parts_mut(ptr, spare)[..]
}
unsafe fn advance_mut(&mut self, cnt: usize) {
if self.data.len() + cnt > self.data.capacity() {
panic!("Buffer::advance_mut(cnt): would advance past end of Buffer");
}
self.data.set_len(self.data.len() + cnt);
}
pub fn poll_read<R>(&mut self, reader: Pin<&mut R>, cx: &mut Context<'_>) -> Poll<io::Result<usize>>
where R: AsyncRead + Unpin +?Sized {
// Safety: ReadBuf::uninit takes a MaybeUninit.
let mut buf = ReadBuf::uninit(unsafe { self.spare_capacity_mut() });
futures::ready!(reader.poll_read(cx, &mut buf))?;
let len = buf.filled().len();
// Safety: len = buf.filled().len() is guaranteed to be correct.
unsafe {
self.advance_mut(len);
}
Poll::Ready(Ok(len))
}
//
// ===== End unsafe code =====
//
}
impl bytes::Buf for Buffer {
fn advance(&mut self, cnt: usize) {
// advance buffer read pointer.
self.rd_pos += cnt;
if self.rd_pos > self.len() {
// "It is recommended for implementations of advance to
// panic if cnt > self.remaining()"
panic!("read position advanced beyond end of buffer");
}
}
#[inline]
fn chunk(&self) -> &[u8] {
self.bytes()
}
#[inline]
fn remaining(&self) -> usize {
self.len() - self.rd_pos
}
}
impl Deref for Buffer {
type Target = [u8];
#[inline]
fn deref(&self) -> &[u8] {
self.bytes()
}
}
impl DerefMut for Buffer {
#[inline]
fn deref_mut(&mut self) -> &mut [u8] {
&mut self.data[self.start_offset + self.rd_pos..]
}
}
impl fmt::Write for Buffer {
fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
self.push_str(s);
Ok(())
}
}
impl From<&[u8]> for Buffer {
fn from(src: &[u8]) -> Self {
let mut buffer = Buffer::new();
buffer.extend_from_slice(src);
buffer
}
}
impl From<Vec<u8>> for Buffer {
fn from(src: Vec<u8>) -> Self {
Buffer {
start_offset: 0,
rd_pos: 0,
data: src,
}
}
}
impl From<&str> for Buffer {
fn from(src: &str) -> Self {
Buffer::from(src.as_bytes())
}
}
impl From<String> for Buffer {
fn from(src: String) -> Self {
Buffer::from(src.into_bytes())
}
}
impl From<bytes::Bytes> for Buffer {
fn from(src: bytes::Bytes) -> Self {
Buffer::from(&src[..])
}
}
impl Default for Buffer {
fn default() -> Self {
Buffer::new()
}
}
impl fmt::Debug for Buffer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let cap = self.data.capacity();
let len = self.len();
f.debug_struct("Buffer")
.field("start_offset", &self.start_offset)
.field("rd_pos", &self.rd_pos)
.field("len", &len)
.field("capacity", &cap)
.field("data", &"[data]")
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_buffer() {
let mut b = Buffer::new();
b.reserve(4096);
b.start_offset = 23;
b.data.resize(b.start_offset, 0);
for _ in 0..50000 {
b.put_str("xyzzyxyzzy");
}
assert!(b.len() == 500000);
assert!(&b[1000..1010] == &b"xyzzyxyzzy"[..]);
}
#[test]
fn test_split() {
let mut b = Buffer::new();
for _ in 0..5000 {
b.put_str("xyzzyxyzzyz");
}
assert!(b.len() == 55000);
let mut n = b.split_off(4918);
assert!(b.len() == 4918);
assert!(n.len() == 50082);
println!("1. {}", std::str::from_utf8(&b[1100..1110]).unwrap());
println!("2. {}", std::str::from_utf8(&n[1100..1110]).unwrap());
assert!(&b[1100..1110] == &b"xyzzyxyzzy"[..]);
assert!(&n[1100..1110] == &b"yzzyxyzzyz"[..]);
n.start_offset += 13;
let x = n.split_to(20000);
println!("3. n.len() {}", n.len());
println!("4. x.len() {}", x.len());
println!("5. {}", std::str::from_utf8(&n[1000..1010]).unwrap());
println!("6. {}", std::str::from_utf8(&x[1000..1010]).unwrap());
assert!(n.len() == 30069);
assert!(x.len() == 20000);
assert!(&n[1000..1010] == &b"yxyzzyzxyz"[..]);
assert!(&x[1000..1010] == &b"zzyxyzzyzx"[..]);
}
#[test]
fn test_spare() {
let mut b = Buffer::with_capacity(194);
assert!(b.data.capacity() == 4096);
b.extend_from_slice(b"0123456789");
let buf: &mut [u8] = unsafe { b.spare_capacity_mut() };
assert!(buf.len() == 4086);
}
}
| {
self.extend_from_slice(s.as_bytes());
} | identifier_body |
buffer.rs | //! Buffer implementation like Bytes / BytesMut.
//!
//! It is simpler and contains less unsafe code.
use std::default::Default;
use std::fmt;
use std::io::{self, Read, Write};
use std::marker::Unpin;
use std::mem;
use std::ops::{Deref, DerefMut};
use std::pin::Pin;
use std::slice;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, ReadBuf};
/// A buffer structure, like Bytes/BytesMut.
///
/// It is not much more than a wrapper around Vec.
pub struct Buffer {
start_offset: usize,
rd_pos: usize,
data: Vec<u8>,
}
impl Buffer {
/// Create new Buffer.
pub fn new() -> Buffer {
Buffer {
start_offset: 0,
rd_pos: 0,
data: Vec::new(),
}
}
/// Create new Buffer.
pub fn with_capacity(cap: usize) -> Buffer {
Buffer {
start_offset: 0,
rd_pos: 0,
data: Vec::with_capacity(Self::round_size_up(cap)),
}
}
/// Clear this buffer.
pub fn clear(&mut self) {
self.start_offset = 0;
self.rd_pos = 0;
self.data.truncate(0);
}
/// Truncate this buffer.
pub fn truncate(&mut self, size: usize) {
if size == 0 {
self.clear();
return;
}
if size > self.len() {
panic!("Buffer::truncate(size): size > self.len()");
}
if self.rd_pos > size {
self.rd_pos = size;
}
self.data.truncate(size + self.start_offset);
}
pub fn bytes(&self) -> &[u8] {
if self.rd_pos >= self.len() {
return &[][..];
}
&self.data[self.start_offset + self.rd_pos..]
}
/// Split this Buffer in two parts.
///
/// The first part remains in this buffer. The second part is
/// returned as a new Buffer.
pub fn split_off(&mut self, at: usize) -> Buffer {
if at > self.len() {
panic!("Buffer:split_off(size): size > self.len()");
}
if self.rd_pos > at {
self.rd_pos = at;
}
// If "header" < 32K and "body" >= 32K, use a start_offset
// for "body" and copy "header".
if self.start_offset == 0 && at < 32000 && self.len() - at >= 32000 {
let mut bnew = Buffer::with_capacity(at);
mem::swap(self, &mut bnew);
self.extend_from_slice(&bnew[0..at]);
bnew.start_offset = at;
return bnew;
}
let mut bnew = Buffer::new();
let bytes = self.bytes();
bnew.extend_from_slice(&bytes[at..]);
self.truncate(at);
bnew
}
/// Add data to this buffer.
#[inline]
pub fn extend_from_slice(&mut self, extend: &[u8]) {
self.reserve(extend.len());
self.data.extend_from_slice(extend);
}
#[inline]
fn round_size_up(size: usize) -> usize {
if size < 128 {
128
} else if size < 4096 {
4096
} else if size < 65536 {
65536
} else if size < 2097152 {
size.next_power_of_two()
} else {
(1 + size / 1048576) * 1048576
}
}
/// Make sure at least `size` bytes are available.
#[inline]
pub fn reserve(&mut self, size: usize) {
let end = self.data.len() + size;
if end < self.data.capacity() {
return;
}
self.data.reserve_exact(Self::round_size_up(end) - self.data.len());
}
/// total length of all data in this Buffer.
#[inline]
pub fn len(&self) -> usize {
self.data.len() - self.start_offset
}
/// Split this Buffer in two parts.
///
/// The second part remains in this buffer. The first part is
/// returned to the caller.
pub fn split_to(&mut self, size: usize) -> Buffer {
let mut other = self.split_off(size);
mem::swap(self, &mut other);
other
}
/// Write all data in this `Buffer` to a file.
pub fn write_all(&mut self, mut file: impl Write) -> io::Result<()> {
while self.rd_pos < self.len() {
let bytes = self.bytes();
let size = bytes.len();
file.write_all(bytes)?;
self.rd_pos += size;
}
Ok(())
}
/// Add text data to this buffer.
#[inline]
pub fn push_str(&mut self, s: &str) {
self.extend_from_slice(s.as_bytes());
}
/// Add a string to the buffer.
#[inline]
pub fn put_str(&mut self, s: impl AsRef<str>) {
self.extend_from_slice(s.as_ref().as_bytes());
}
/// Return a reference to this Buffer as an UTF-8 string.
#[inline]
pub fn as_utf8_str(&self) -> Result<&str, std::str::Utf8Error> {
std::str::from_utf8(self.bytes())
}
/// Convert this buffer into a Vec<u8>.
pub fn into_bytes(self) -> Vec<u8> {
if self.start_offset > 0 {
let mut v = Vec::with_capacity(Self::round_size_up(self.len()));
v.extend_from_slice(self.bytes());
v
} else {
self.data
}
}
//
// ===== Begin unsafe code =====
//
/// Read an exact number of bytes.
pub fn read_exact(&mut self, reader: &mut std::fs::File, len: usize) -> io::Result<()> {
self.reserve(len);
// Safety: it is safe for a std::fs::File to read into uninitialized memory.
unsafe {
let buf = self.spare_capacity_mut();
reader.read_exact(&mut buf[..len])?;
self.advance_mut(len);
}
Ok(())
}
unsafe fn spare_capacity_mut<T>(&mut self) -> &mut [T] {
let len = self.data.len();
let spare = self.data.capacity() - len;
let ptr = self.data.as_mut_ptr().add(len) as *mut T;
&mut slice::from_raw_parts_mut(ptr, spare)[..]
}
unsafe fn advance_mut(&mut self, cnt: usize) {
if self.data.len() + cnt > self.data.capacity() {
panic!("Buffer::advance_mut(cnt): would advance past end of Buffer");
}
self.data.set_len(self.data.len() + cnt);
}
pub fn poll_read<R>(&mut self, reader: Pin<&mut R>, cx: &mut Context<'_>) -> Poll<io::Result<usize>>
where R: AsyncRead + Unpin +?Sized {
// Safety: ReadBuf::uninit takes a MaybeUninit.
let mut buf = ReadBuf::uninit(unsafe { self.spare_capacity_mut() });
futures::ready!(reader.poll_read(cx, &mut buf))?;
let len = buf.filled().len();
// Safety: len = buf.filled().len() is guaranteed to be correct.
unsafe {
self.advance_mut(len);
}
Poll::Ready(Ok(len))
}
//
// ===== End unsafe code =====
//
}
impl bytes::Buf for Buffer {
fn advance(&mut self, cnt: usize) {
// advance buffer read pointer.
self.rd_pos += cnt;
if self.rd_pos > self.len() {
// "It is recommended for implementations of advance to
// panic if cnt > self.remaining()"
panic!("read position advanced beyond end of buffer");
}
}
#[inline]
fn chunk(&self) -> &[u8] {
self.bytes()
}
#[inline]
fn remaining(&self) -> usize {
self.len() - self.rd_pos
}
}
impl Deref for Buffer {
type Target = [u8];
#[inline]
fn deref(&self) -> &[u8] {
self.bytes()
}
}
impl DerefMut for Buffer {
#[inline]
fn deref_mut(&mut self) -> &mut [u8] {
&mut self.data[self.start_offset + self.rd_pos..]
}
}
impl fmt::Write for Buffer {
fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
self.push_str(s);
Ok(())
}
}
impl From<&[u8]> for Buffer {
fn from(src: &[u8]) -> Self {
let mut buffer = Buffer::new();
buffer.extend_from_slice(src);
buffer
}
}
impl From<Vec<u8>> for Buffer {
fn from(src: Vec<u8>) -> Self {
Buffer {
start_offset: 0,
rd_pos: 0,
data: src,
}
}
}
impl From<&str> for Buffer {
fn from(src: &str) -> Self {
Buffer::from(src.as_bytes())
}
}
impl From<String> for Buffer {
fn from(src: String) -> Self {
Buffer::from(src.into_bytes())
}
}
impl From<bytes::Bytes> for Buffer {
fn from(src: bytes::Bytes) -> Self {
Buffer::from(&src[..])
}
}
impl Default for Buffer {
fn default() -> Self {
Buffer::new()
}
}
impl fmt::Debug for Buffer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let cap = self.data.capacity();
let len = self.len();
f.debug_struct("Buffer")
.field("start_offset", &self.start_offset)
.field("rd_pos", &self.rd_pos)
.field("len", &len)
.field("capacity", &cap)
.field("data", &"[data]")
.finish()
}
}
|
#[test]
fn test_buffer() {
let mut b = Buffer::new();
b.reserve(4096);
b.start_offset = 23;
b.data.resize(b.start_offset, 0);
for _ in 0..50000 {
b.put_str("xyzzyxyzzy");
}
assert!(b.len() == 500000);
assert!(&b[1000..1010] == &b"xyzzyxyzzy"[..]);
}
#[test]
fn test_split() {
let mut b = Buffer::new();
for _ in 0..5000 {
b.put_str("xyzzyxyzzyz");
}
assert!(b.len() == 55000);
let mut n = b.split_off(4918);
assert!(b.len() == 4918);
assert!(n.len() == 50082);
println!("1. {}", std::str::from_utf8(&b[1100..1110]).unwrap());
println!("2. {}", std::str::from_utf8(&n[1100..1110]).unwrap());
assert!(&b[1100..1110] == &b"xyzzyxyzzy"[..]);
assert!(&n[1100..1110] == &b"yzzyxyzzyz"[..]);
n.start_offset += 13;
let x = n.split_to(20000);
println!("3. n.len() {}", n.len());
println!("4. x.len() {}", x.len());
println!("5. {}", std::str::from_utf8(&n[1000..1010]).unwrap());
println!("6. {}", std::str::from_utf8(&x[1000..1010]).unwrap());
assert!(n.len() == 30069);
assert!(x.len() == 20000);
assert!(&n[1000..1010] == &b"yxyzzyzxyz"[..]);
assert!(&x[1000..1010] == &b"zzyxyzzyzx"[..]);
}
#[test]
fn test_spare() {
let mut b = Buffer::with_capacity(194);
assert!(b.data.capacity() == 4096);
b.extend_from_slice(b"0123456789");
let buf: &mut [u8] = unsafe { b.spare_capacity_mut() };
assert!(buf.len() == 4086);
}
} | #[cfg(test)]
mod tests {
use super::*; | random_line_split |
buffer.rs | //! Buffer implementation like Bytes / BytesMut.
//!
//! It is simpler and contains less unsafe code.
use std::default::Default;
use std::fmt;
use std::io::{self, Read, Write};
use std::marker::Unpin;
use std::mem;
use std::ops::{Deref, DerefMut};
use std::pin::Pin;
use std::slice;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, ReadBuf};
/// A buffer structure, like Bytes/BytesMut.
///
/// It is not much more than a wrapper around Vec.
pub struct Buffer {
start_offset: usize,
rd_pos: usize,
data: Vec<u8>,
}
impl Buffer {
/// Create new Buffer.
pub fn new() -> Buffer {
Buffer {
start_offset: 0,
rd_pos: 0,
data: Vec::new(),
}
}
/// Create new Buffer.
pub fn with_capacity(cap: usize) -> Buffer {
Buffer {
start_offset: 0,
rd_pos: 0,
data: Vec::with_capacity(Self::round_size_up(cap)),
}
}
/// Clear this buffer.
pub fn clear(&mut self) {
self.start_offset = 0;
self.rd_pos = 0;
self.data.truncate(0);
}
/// Truncate this buffer.
pub fn | (&mut self, size: usize) {
if size == 0 {
self.clear();
return;
}
if size > self.len() {
panic!("Buffer::truncate(size): size > self.len()");
}
if self.rd_pos > size {
self.rd_pos = size;
}
self.data.truncate(size + self.start_offset);
}
pub fn bytes(&self) -> &[u8] {
if self.rd_pos >= self.len() {
return &[][..];
}
&self.data[self.start_offset + self.rd_pos..]
}
/// Split this Buffer in two parts.
///
/// The first part remains in this buffer. The second part is
/// returned as a new Buffer.
pub fn split_off(&mut self, at: usize) -> Buffer {
if at > self.len() {
panic!("Buffer:split_off(size): size > self.len()");
}
if self.rd_pos > at {
self.rd_pos = at;
}
// If "header" < 32K and "body" >= 32K, use a start_offset
// for "body" and copy "header".
if self.start_offset == 0 && at < 32000 && self.len() - at >= 32000 {
let mut bnew = Buffer::with_capacity(at);
mem::swap(self, &mut bnew);
self.extend_from_slice(&bnew[0..at]);
bnew.start_offset = at;
return bnew;
}
let mut bnew = Buffer::new();
let bytes = self.bytes();
bnew.extend_from_slice(&bytes[at..]);
self.truncate(at);
bnew
}
/// Add data to this buffer.
#[inline]
pub fn extend_from_slice(&mut self, extend: &[u8]) {
self.reserve(extend.len());
self.data.extend_from_slice(extend);
}
#[inline]
fn round_size_up(size: usize) -> usize {
if size < 128 {
128
} else if size < 4096 {
4096
} else if size < 65536 {
65536
} else if size < 2097152 {
size.next_power_of_two()
} else {
(1 + size / 1048576) * 1048576
}
}
/// Make sure at least `size` bytes are available.
#[inline]
pub fn reserve(&mut self, size: usize) {
let end = self.data.len() + size;
if end < self.data.capacity() {
return;
}
self.data.reserve_exact(Self::round_size_up(end) - self.data.len());
}
/// total length of all data in this Buffer.
#[inline]
pub fn len(&self) -> usize {
self.data.len() - self.start_offset
}
/// Split this Buffer in two parts.
///
/// The second part remains in this buffer. The first part is
/// returned to the caller.
pub fn split_to(&mut self, size: usize) -> Buffer {
let mut other = self.split_off(size);
mem::swap(self, &mut other);
other
}
/// Write all data in this `Buffer` to a file.
pub fn write_all(&mut self, mut file: impl Write) -> io::Result<()> {
while self.rd_pos < self.len() {
let bytes = self.bytes();
let size = bytes.len();
file.write_all(bytes)?;
self.rd_pos += size;
}
Ok(())
}
/// Add text data to this buffer.
#[inline]
pub fn push_str(&mut self, s: &str) {
self.extend_from_slice(s.as_bytes());
}
/// Add a string to the buffer.
#[inline]
pub fn put_str(&mut self, s: impl AsRef<str>) {
self.extend_from_slice(s.as_ref().as_bytes());
}
/// Return a reference to this Buffer as an UTF-8 string.
#[inline]
pub fn as_utf8_str(&self) -> Result<&str, std::str::Utf8Error> {
std::str::from_utf8(self.bytes())
}
/// Convert this buffer into a Vec<u8>.
pub fn into_bytes(self) -> Vec<u8> {
if self.start_offset > 0 {
let mut v = Vec::with_capacity(Self::round_size_up(self.len()));
v.extend_from_slice(self.bytes());
v
} else {
self.data
}
}
//
// ===== Begin unsafe code =====
//
/// Read an exact number of bytes.
pub fn read_exact(&mut self, reader: &mut std::fs::File, len: usize) -> io::Result<()> {
self.reserve(len);
// Safety: it is safe for a std::fs::File to read into uninitialized memory.
unsafe {
let buf = self.spare_capacity_mut();
reader.read_exact(&mut buf[..len])?;
self.advance_mut(len);
}
Ok(())
}
unsafe fn spare_capacity_mut<T>(&mut self) -> &mut [T] {
let len = self.data.len();
let spare = self.data.capacity() - len;
let ptr = self.data.as_mut_ptr().add(len) as *mut T;
&mut slice::from_raw_parts_mut(ptr, spare)[..]
}
unsafe fn advance_mut(&mut self, cnt: usize) {
if self.data.len() + cnt > self.data.capacity() {
panic!("Buffer::advance_mut(cnt): would advance past end of Buffer");
}
self.data.set_len(self.data.len() + cnt);
}
pub fn poll_read<R>(&mut self, reader: Pin<&mut R>, cx: &mut Context<'_>) -> Poll<io::Result<usize>>
where R: AsyncRead + Unpin +?Sized {
// Safety: ReadBuf::uninit takes a MaybeUninit.
let mut buf = ReadBuf::uninit(unsafe { self.spare_capacity_mut() });
futures::ready!(reader.poll_read(cx, &mut buf))?;
let len = buf.filled().len();
// Safety: len = buf.filled().len() is guaranteed to be correct.
unsafe {
self.advance_mut(len);
}
Poll::Ready(Ok(len))
}
//
// ===== End unsafe code =====
//
}
impl bytes::Buf for Buffer {
fn advance(&mut self, cnt: usize) {
// advance buffer read pointer.
self.rd_pos += cnt;
if self.rd_pos > self.len() {
// "It is recommended for implementations of advance to
// panic if cnt > self.remaining()"
panic!("read position advanced beyond end of buffer");
}
}
#[inline]
fn chunk(&self) -> &[u8] {
self.bytes()
}
#[inline]
fn remaining(&self) -> usize {
self.len() - self.rd_pos
}
}
impl Deref for Buffer {
type Target = [u8];
#[inline]
fn deref(&self) -> &[u8] {
self.bytes()
}
}
impl DerefMut for Buffer {
#[inline]
fn deref_mut(&mut self) -> &mut [u8] {
&mut self.data[self.start_offset + self.rd_pos..]
}
}
impl fmt::Write for Buffer {
fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
self.push_str(s);
Ok(())
}
}
impl From<&[u8]> for Buffer {
fn from(src: &[u8]) -> Self {
let mut buffer = Buffer::new();
buffer.extend_from_slice(src);
buffer
}
}
impl From<Vec<u8>> for Buffer {
fn from(src: Vec<u8>) -> Self {
Buffer {
start_offset: 0,
rd_pos: 0,
data: src,
}
}
}
impl From<&str> for Buffer {
fn from(src: &str) -> Self {
Buffer::from(src.as_bytes())
}
}
impl From<String> for Buffer {
fn from(src: String) -> Self {
Buffer::from(src.into_bytes())
}
}
impl From<bytes::Bytes> for Buffer {
fn from(src: bytes::Bytes) -> Self {
Buffer::from(&src[..])
}
}
impl Default for Buffer {
fn default() -> Self {
Buffer::new()
}
}
impl fmt::Debug for Buffer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let cap = self.data.capacity();
let len = self.len();
f.debug_struct("Buffer")
.field("start_offset", &self.start_offset)
.field("rd_pos", &self.rd_pos)
.field("len", &len)
.field("capacity", &cap)
.field("data", &"[data]")
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_buffer() {
let mut b = Buffer::new();
b.reserve(4096);
b.start_offset = 23;
b.data.resize(b.start_offset, 0);
for _ in 0..50000 {
b.put_str("xyzzyxyzzy");
}
assert!(b.len() == 500000);
assert!(&b[1000..1010] == &b"xyzzyxyzzy"[..]);
}
#[test]
fn test_split() {
let mut b = Buffer::new();
for _ in 0..5000 {
b.put_str("xyzzyxyzzyz");
}
assert!(b.len() == 55000);
let mut n = b.split_off(4918);
assert!(b.len() == 4918);
assert!(n.len() == 50082);
println!("1. {}", std::str::from_utf8(&b[1100..1110]).unwrap());
println!("2. {}", std::str::from_utf8(&n[1100..1110]).unwrap());
assert!(&b[1100..1110] == &b"xyzzyxyzzy"[..]);
assert!(&n[1100..1110] == &b"yzzyxyzzyz"[..]);
n.start_offset += 13;
let x = n.split_to(20000);
println!("3. n.len() {}", n.len());
println!("4. x.len() {}", x.len());
println!("5. {}", std::str::from_utf8(&n[1000..1010]).unwrap());
println!("6. {}", std::str::from_utf8(&x[1000..1010]).unwrap());
assert!(n.len() == 30069);
assert!(x.len() == 20000);
assert!(&n[1000..1010] == &b"yxyzzyzxyz"[..]);
assert!(&x[1000..1010] == &b"zzyxyzzyzx"[..]);
}
#[test]
fn test_spare() {
let mut b = Buffer::with_capacity(194);
assert!(b.data.capacity() == 4096);
b.extend_from_slice(b"0123456789");
let buf: &mut [u8] = unsafe { b.spare_capacity_mut() };
assert!(buf.len() == 4086);
}
}
| truncate | identifier_name |
buffer.rs | //! Buffer implementation like Bytes / BytesMut.
//!
//! It is simpler and contains less unsafe code.
use std::default::Default;
use std::fmt;
use std::io::{self, Read, Write};
use std::marker::Unpin;
use std::mem;
use std::ops::{Deref, DerefMut};
use std::pin::Pin;
use std::slice;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, ReadBuf};
/// A buffer structure, like Bytes/BytesMut.
///
/// It is not much more than a wrapper around Vec.
pub struct Buffer {
start_offset: usize,
rd_pos: usize,
data: Vec<u8>,
}
impl Buffer {
/// Create new Buffer.
pub fn new() -> Buffer {
Buffer {
start_offset: 0,
rd_pos: 0,
data: Vec::new(),
}
}
/// Create new Buffer.
pub fn with_capacity(cap: usize) -> Buffer {
Buffer {
start_offset: 0,
rd_pos: 0,
data: Vec::with_capacity(Self::round_size_up(cap)),
}
}
/// Clear this buffer.
pub fn clear(&mut self) {
self.start_offset = 0;
self.rd_pos = 0;
self.data.truncate(0);
}
/// Truncate this buffer.
pub fn truncate(&mut self, size: usize) {
if size == 0 {
self.clear();
return;
}
if size > self.len() {
panic!("Buffer::truncate(size): size > self.len()");
}
if self.rd_pos > size {
self.rd_pos = size;
}
self.data.truncate(size + self.start_offset);
}
pub fn bytes(&self) -> &[u8] {
if self.rd_pos >= self.len() {
return &[][..];
}
&self.data[self.start_offset + self.rd_pos..]
}
/// Split this Buffer in two parts.
///
/// The first part remains in this buffer. The second part is
/// returned as a new Buffer.
pub fn split_off(&mut self, at: usize) -> Buffer {
if at > self.len() {
panic!("Buffer:split_off(size): size > self.len()");
}
if self.rd_pos > at {
self.rd_pos = at;
}
// If "header" < 32K and "body" >= 32K, use a start_offset
// for "body" and copy "header".
if self.start_offset == 0 && at < 32000 && self.len() - at >= 32000 {
let mut bnew = Buffer::with_capacity(at);
mem::swap(self, &mut bnew);
self.extend_from_slice(&bnew[0..at]);
bnew.start_offset = at;
return bnew;
}
let mut bnew = Buffer::new();
let bytes = self.bytes();
bnew.extend_from_slice(&bytes[at..]);
self.truncate(at);
bnew
}
/// Add data to this buffer.
#[inline]
pub fn extend_from_slice(&mut self, extend: &[u8]) {
self.reserve(extend.len());
self.data.extend_from_slice(extend);
}
#[inline]
fn round_size_up(size: usize) -> usize {
if size < 128 {
128
} else if size < 4096 {
4096
} else if size < 65536 {
65536
} else if size < 2097152 {
size.next_power_of_two()
} else {
(1 + size / 1048576) * 1048576
}
}
/// Make sure at least `size` bytes are available.
#[inline]
pub fn reserve(&mut self, size: usize) {
let end = self.data.len() + size;
if end < self.data.capacity() {
return;
}
self.data.reserve_exact(Self::round_size_up(end) - self.data.len());
}
/// total length of all data in this Buffer.
#[inline]
pub fn len(&self) -> usize {
self.data.len() - self.start_offset
}
/// Split this Buffer in two parts.
///
/// The second part remains in this buffer. The first part is
/// returned to the caller.
pub fn split_to(&mut self, size: usize) -> Buffer {
let mut other = self.split_off(size);
mem::swap(self, &mut other);
other
}
/// Write all data in this `Buffer` to a file.
pub fn write_all(&mut self, mut file: impl Write) -> io::Result<()> {
while self.rd_pos < self.len() {
let bytes = self.bytes();
let size = bytes.len();
file.write_all(bytes)?;
self.rd_pos += size;
}
Ok(())
}
/// Add text data to this buffer.
#[inline]
pub fn push_str(&mut self, s: &str) {
self.extend_from_slice(s.as_bytes());
}
/// Add a string to the buffer.
#[inline]
pub fn put_str(&mut self, s: impl AsRef<str>) {
self.extend_from_slice(s.as_ref().as_bytes());
}
/// Return a reference to this Buffer as an UTF-8 string.
#[inline]
pub fn as_utf8_str(&self) -> Result<&str, std::str::Utf8Error> {
std::str::from_utf8(self.bytes())
}
/// Convert this buffer into a Vec<u8>.
pub fn into_bytes(self) -> Vec<u8> {
if self.start_offset > 0 {
let mut v = Vec::with_capacity(Self::round_size_up(self.len()));
v.extend_from_slice(self.bytes());
v
} else {
self.data
}
}
//
// ===== Begin unsafe code =====
//
/// Read an exact number of bytes.
pub fn read_exact(&mut self, reader: &mut std::fs::File, len: usize) -> io::Result<()> {
self.reserve(len);
// Safety: it is safe for a std::fs::File to read into uninitialized memory.
unsafe {
let buf = self.spare_capacity_mut();
reader.read_exact(&mut buf[..len])?;
self.advance_mut(len);
}
Ok(())
}
unsafe fn spare_capacity_mut<T>(&mut self) -> &mut [T] {
let len = self.data.len();
let spare = self.data.capacity() - len;
let ptr = self.data.as_mut_ptr().add(len) as *mut T;
&mut slice::from_raw_parts_mut(ptr, spare)[..]
}
unsafe fn advance_mut(&mut self, cnt: usize) {
if self.data.len() + cnt > self.data.capacity() {
panic!("Buffer::advance_mut(cnt): would advance past end of Buffer");
}
self.data.set_len(self.data.len() + cnt);
}
pub fn poll_read<R>(&mut self, reader: Pin<&mut R>, cx: &mut Context<'_>) -> Poll<io::Result<usize>>
where R: AsyncRead + Unpin +?Sized {
// Safety: ReadBuf::uninit takes a MaybeUninit.
let mut buf = ReadBuf::uninit(unsafe { self.spare_capacity_mut() });
futures::ready!(reader.poll_read(cx, &mut buf))?;
let len = buf.filled().len();
// Safety: len = buf.filled().len() is guaranteed to be correct.
unsafe {
self.advance_mut(len);
}
Poll::Ready(Ok(len))
}
//
// ===== End unsafe code =====
//
}
impl bytes::Buf for Buffer {
fn advance(&mut self, cnt: usize) {
// advance buffer read pointer.
self.rd_pos += cnt;
if self.rd_pos > self.len() |
}
#[inline]
fn chunk(&self) -> &[u8] {
self.bytes()
}
#[inline]
fn remaining(&self) -> usize {
self.len() - self.rd_pos
}
}
impl Deref for Buffer {
type Target = [u8];
#[inline]
fn deref(&self) -> &[u8] {
self.bytes()
}
}
impl DerefMut for Buffer {
#[inline]
fn deref_mut(&mut self) -> &mut [u8] {
&mut self.data[self.start_offset + self.rd_pos..]
}
}
impl fmt::Write for Buffer {
fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
self.push_str(s);
Ok(())
}
}
impl From<&[u8]> for Buffer {
fn from(src: &[u8]) -> Self {
let mut buffer = Buffer::new();
buffer.extend_from_slice(src);
buffer
}
}
impl From<Vec<u8>> for Buffer {
fn from(src: Vec<u8>) -> Self {
Buffer {
start_offset: 0,
rd_pos: 0,
data: src,
}
}
}
impl From<&str> for Buffer {
fn from(src: &str) -> Self {
Buffer::from(src.as_bytes())
}
}
impl From<String> for Buffer {
fn from(src: String) -> Self {
Buffer::from(src.into_bytes())
}
}
impl From<bytes::Bytes> for Buffer {
fn from(src: bytes::Bytes) -> Self {
Buffer::from(&src[..])
}
}
impl Default for Buffer {
fn default() -> Self {
Buffer::new()
}
}
impl fmt::Debug for Buffer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let cap = self.data.capacity();
let len = self.len();
f.debug_struct("Buffer")
.field("start_offset", &self.start_offset)
.field("rd_pos", &self.rd_pos)
.field("len", &len)
.field("capacity", &cap)
.field("data", &"[data]")
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_buffer() {
let mut b = Buffer::new();
b.reserve(4096);
b.start_offset = 23;
b.data.resize(b.start_offset, 0);
for _ in 0..50000 {
b.put_str("xyzzyxyzzy");
}
assert!(b.len() == 500000);
assert!(&b[1000..1010] == &b"xyzzyxyzzy"[..]);
}
#[test]
fn test_split() {
let mut b = Buffer::new();
for _ in 0..5000 {
b.put_str("xyzzyxyzzyz");
}
assert!(b.len() == 55000);
let mut n = b.split_off(4918);
assert!(b.len() == 4918);
assert!(n.len() == 50082);
println!("1. {}", std::str::from_utf8(&b[1100..1110]).unwrap());
println!("2. {}", std::str::from_utf8(&n[1100..1110]).unwrap());
assert!(&b[1100..1110] == &b"xyzzyxyzzy"[..]);
assert!(&n[1100..1110] == &b"yzzyxyzzyz"[..]);
n.start_offset += 13;
let x = n.split_to(20000);
println!("3. n.len() {}", n.len());
println!("4. x.len() {}", x.len());
println!("5. {}", std::str::from_utf8(&n[1000..1010]).unwrap());
println!("6. {}", std::str::from_utf8(&x[1000..1010]).unwrap());
assert!(n.len() == 30069);
assert!(x.len() == 20000);
assert!(&n[1000..1010] == &b"yxyzzyzxyz"[..]);
assert!(&x[1000..1010] == &b"zzyxyzzyzx"[..]);
}
#[test]
fn test_spare() {
let mut b = Buffer::with_capacity(194);
assert!(b.data.capacity() == 4096);
b.extend_from_slice(b"0123456789");
let buf: &mut [u8] = unsafe { b.spare_capacity_mut() };
assert!(buf.len() == 4086);
}
}
| {
// "It is recommended for implementations of advance to
// panic if cnt > self.remaining()"
panic!("read position advanced beyond end of buffer");
} | conditional_block |
udp.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
crate::{fastboot::InterfaceFactory, target::Target},
anyhow::{anyhow, bail, Context as _, Result},
async_io::Async,
async_net::UdpSocket,
async_trait::async_trait,
byteorder::{BigEndian, ByteOrder},
futures::{
io::{AsyncRead, AsyncWrite},
task::{Context, Poll},
Future,
},
std::io::ErrorKind,
std::net::SocketAddr,
std::num::Wrapping,
std::pin::Pin,
std::time::Duration,
timeout::timeout,
zerocopy::{byteorder::big_endian::U16, ByteSlice, FromBytes, LayoutVerified, Unaligned},
};
const HOST_PORT: u16 = 5554;
const REPLY_TIMEOUT: Duration = Duration::from_millis(500);
const MAX_SIZE: u16 = 2048; // Maybe handle larger?
enum PacketType {
Error,
Query,
Init,
Fastboot,
}
#[derive(FromBytes, Unaligned)]
#[repr(C)]
struct Header {
id: u8,
flags: u8,
sequence: U16,
}
struct Packet<B: ByteSlice> {
header: LayoutVerified<B, Header>,
data: B,
}
impl<B: ByteSlice> Packet<B> {
fn parse(bytes: B) -> Option<Packet<B>> {
let (header, data) = LayoutVerified::new_from_prefix(bytes)?;
Some(Self { header, data })
}
#[allow(dead_code)]
fn is_continuation(&self) -> bool {
self.header.flags & 0x001!= 0
}
fn packet_type(&self) -> Result<PacketType> {
match self.header.id {
0x00 => Ok(PacketType::Error),
0x01 => Ok(PacketType::Query),
0x02 => Ok(PacketType::Init),
0x03 => Ok(PacketType::Fastboot),
_ => bail!("Unknown packet type"),
}
}
}
pub struct UdpNetworkInterface {
maximum_size: u16,
sequence: Wrapping<u16>,
socket: UdpSocket,
read_task: Option<Pin<Box<dyn Future<Output = std::io::Result<(usize, Vec<u8>)>>>>>,
write_task: Option<Pin<Box<dyn Future<Output = std::io::Result<usize>>>>>,
}
impl UdpNetworkInterface {
fn create_fastboot_packets(&mut self, buf: &[u8]) -> Result<Vec<Vec<u8>>> {
// Leave four bytes for the header.
let header_size = std::mem::size_of::<Header>() as u16;
let max_chunk_size = self.maximum_size - header_size;
let mut seq = self.sequence;
let mut result = Vec::new();
let mut iter = buf.chunks(max_chunk_size.into()).peekable();
while let Some(chunk) = iter.next() {
let mut packet: Vec<u8> = Vec::with_capacity(chunk.len() + header_size as usize);
packet.push(0x03);
if iter.peek().is_none() {
packet.push(0x00);
} else {
packet.push(0x01); // Mark as continuation.
}
for _ in 0..2 {
packet.push(0);
}
BigEndian::write_u16(&mut packet[2..4], seq.0);
seq += Wrapping(1u16);
packet.extend_from_slice(chunk);
result.push(packet);
}
Ok(result)
}
}
impl AsyncRead for UdpNetworkInterface {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<std::io::Result<usize>> {
if self.read_task.is_none() {
let socket = self.socket.clone();
let seq = self.sequence;
self.read_task.replace(Box::pin(async move {
let (out_buf, sz) = send_to_device(&make_empty_fastboot_packet(seq.0), &socket)
.await
.map_err(|e| {
std::io::Error::new(
ErrorKind::Other,
format!("Could not send emtpy fastboot packet to device: {}", e),
)
})?;
let packet = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new(
ErrorKind::Other,
format!("Could not parse response packet"),
))?;
let mut buf_inner = Vec::new();
match packet.packet_type() {
Ok(PacketType::Fastboot) => {
let size = packet.data.len();
buf_inner.extend(packet.data);
Ok((size, buf_inner))
}
_ => Err(std::io::Error::new(
ErrorKind::Other,
format!("Unexpected reply from device"),
)),
}
}));
}
if let Some(ref mut task) = self.read_task {
match task.as_mut().poll(cx) {
Poll::Ready(Ok((sz, out_buf))) => {
self.read_task = None;
for i in 0..sz {
buf[i] = out_buf[i];
}
self.sequence += Wrapping(1u16);
Poll::Ready(Ok(sz))
}
Poll::Ready(Err(e)) => {
self.read_task = None;
Poll::Ready(Err(e))
}
Poll::Pending => Poll::Pending,
}
} else {
// Really shouldn't get here
Poll::Ready(Err(std::io::Error::new(
ErrorKind::Other,
format!("Could not add async task to read"),
)))
}
}
}
impl AsyncWrite for UdpNetworkInterface {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
if self.write_task.is_none() | format!("Could not parse response packet"),
))?;
match response.packet_type() {
Ok(PacketType::Fastboot) => (),
_ => {
return Err(std::io::Error::new(
ErrorKind::Other,
format!("Unexpected Response packet"),
))
}
}
}
Ok(packets.len())
}));
}
if let Some(ref mut task) = self.write_task {
match task.as_mut().poll(cx) {
Poll::Ready(Ok(s)) => {
self.write_task = None;
for _i in 0..s {
self.sequence += Wrapping(1u16);
}
Poll::Ready(Ok(buf.len()))
}
Poll::Ready(Err(e)) => {
self.write_task = None;
Poll::Ready(Err(e))
}
Poll::Pending => Poll::Pending,
}
} else {
// Really shouldn't get here
Poll::Ready(Err(std::io::Error::new(
ErrorKind::Other,
format!("Could not add async task to write"),
)))
}
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
unimplemented!();
}
fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
unimplemented!();
}
}
async fn send_to_device(buf: &[u8], socket: &UdpSocket) -> Result<([u8; 1500], usize)> {
// Try sending twice
socket.send(buf).await?;
match wait_for_response(socket).await {
Ok(r) => Ok(r),
Err(e) => {
tracing::error!("Could not get reply from Fastboot device - trying again: {}", e);
socket.send(buf).await?;
wait_for_response(socket)
.await
.or_else(|e| bail!("Did not get reply from Fastboot device: {}", e))
}
}
}
async fn wait_for_response(socket: &UdpSocket) -> Result<([u8; 1500], usize)> {
let mut buf = [0u8; 1500]; // Responses should never get this big.
timeout(REPLY_TIMEOUT, Box::pin(socket.recv(&mut buf[..])))
.await
.map_err(|_| anyhow!("Timed out waiting for reply"))?
.map_err(|e| anyhow!("Recv error: {}", e))
.map(|size| (buf, size))
}
async fn make_sender_socket(addr: SocketAddr) -> Result<UdpSocket> {
let socket: std::net::UdpSocket = match addr {
SocketAddr::V4(ref _saddr) => socket2::Socket::new(
socket2::Domain::IPV4,
socket2::Type::DGRAM,
Some(socket2::Protocol::UDP),
)
.context("construct datagram socket")?,
SocketAddr::V6(ref _saddr) => socket2::Socket::new(
socket2::Domain::IPV6,
socket2::Type::DGRAM,
Some(socket2::Protocol::UDP),
)
.context("construct datagram socket")?,
}
.into();
let result: UdpSocket = Async::new(socket)?.into();
result.connect(addr).await.context("connect to remote address")?;
Ok(result)
}
fn make_query_packet() -> [u8; 4] {
let mut packet = [0u8; 4];
packet[0] = 0x01;
packet
}
fn make_init_packet(sequence: u16) -> [u8; 8] {
let mut packet = [0u8; 8];
packet[0] = 0x02;
packet[1] = 0x00;
BigEndian::write_u16(&mut packet[2..4], sequence);
BigEndian::write_u16(&mut packet[4..6], 1);
BigEndian::write_u16(&mut packet[6..8], MAX_SIZE);
packet
}
fn make_empty_fastboot_packet(sequence: u16) -> [u8; 4] {
let mut packet = [0u8; 4];
packet[0] = 0x03;
packet[1] = 0x00;
BigEndian::write_u16(&mut packet[2..4], sequence);
packet
}
pub struct UdpNetworkFactory {}
impl UdpNetworkFactory {
pub fn new() -> Self {
Self {}
}
}
#[async_trait(?Send)]
impl InterfaceFactory<UdpNetworkInterface> for UdpNetworkFactory {
async fn open(&mut self, target: &Target) -> Result<UdpNetworkInterface> {
let addr = target.fastboot_address().ok_or(anyhow!("No network address for fastboot"))?.0;
let mut to_sock: SocketAddr = addr.into();
// TODO(fxb/78977): get the port from the mdns packet
to_sock.set_port(HOST_PORT);
let socket = make_sender_socket(to_sock).await?;
let (buf, sz) = send_to_device(&make_query_packet(), &socket)
.await
.map_err(|e| anyhow!("Sending error: {}", e))?;
let packet =
Packet::parse(&buf[..sz]).ok_or(anyhow!("Could not parse response packet."))?;
let sequence = match packet.packet_type() {
Ok(PacketType::Query) => BigEndian::read_u16(&packet.data),
_ => bail!("Unexpected response to query packet"),
};
let (buf, sz) = send_to_device(&make_init_packet(sequence), &socket)
.await
.map_err(|e| anyhow!("Sending error: {}", e))?;
let packet =
Packet::parse(&buf[..sz]).ok_or(anyhow!("Could not parse response packet."))?;
let (version, max) = match packet.packet_type() {
Ok(PacketType::Init) => {
(BigEndian::read_u16(&packet.data[..2]), BigEndian::read_u16(&packet.data[2..4]))
}
_ => bail!("Unexpected response to init packet"),
};
let maximum_size = std::cmp::min(max, MAX_SIZE);
tracing::debug!(
"Fastboot over UDP connection established. Version {}. Max Size: {}",
version,
maximum_size
);
Ok(UdpNetworkInterface {
socket,
maximum_size,
sequence: Wrapping(sequence + 1),
read_task: None,
write_task: None,
})
}
async fn close(&self) {}
}
| {
// TODO(fxb/78975): unfortunately the Task requires the 'static lifetime so we have to
// copy the bytes and move them into the async block.
let packets = self.create_fastboot_packets(buf).map_err(|e| {
std::io::Error::new(
ErrorKind::Other,
format!("Could not create fastboot packets: {}", e),
)
})?;
let socket = self.socket.clone();
self.write_task.replace(Box::pin(async move {
for packet in &packets {
let (out_buf, sz) = send_to_device(&packet, &socket).await.map_err(|e| {
std::io::Error::new(
ErrorKind::Other,
format!("Could not send emtpy fastboot packet to device: {}", e),
)
})?;
let response = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new(
ErrorKind::Other, | conditional_block |
udp.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
crate::{fastboot::InterfaceFactory, target::Target},
anyhow::{anyhow, bail, Context as _, Result},
async_io::Async,
async_net::UdpSocket,
async_trait::async_trait,
byteorder::{BigEndian, ByteOrder},
futures::{
io::{AsyncRead, AsyncWrite},
task::{Context, Poll},
Future,
},
std::io::ErrorKind,
std::net::SocketAddr,
std::num::Wrapping,
std::pin::Pin,
std::time::Duration,
timeout::timeout,
zerocopy::{byteorder::big_endian::U16, ByteSlice, FromBytes, LayoutVerified, Unaligned},
};
const HOST_PORT: u16 = 5554;
const REPLY_TIMEOUT: Duration = Duration::from_millis(500);
const MAX_SIZE: u16 = 2048; // Maybe handle larger?
enum PacketType {
Error,
Query,
Init,
Fastboot,
}
#[derive(FromBytes, Unaligned)]
#[repr(C)]
struct Header {
id: u8,
flags: u8,
sequence: U16,
}
struct Packet<B: ByteSlice> {
header: LayoutVerified<B, Header>,
data: B,
}
impl<B: ByteSlice> Packet<B> {
fn parse(bytes: B) -> Option<Packet<B>> {
let (header, data) = LayoutVerified::new_from_prefix(bytes)?;
Some(Self { header, data })
}
#[allow(dead_code)]
fn is_continuation(&self) -> bool {
self.header.flags & 0x001!= 0
}
fn packet_type(&self) -> Result<PacketType> {
match self.header.id {
0x00 => Ok(PacketType::Error),
0x01 => Ok(PacketType::Query),
0x02 => Ok(PacketType::Init),
0x03 => Ok(PacketType::Fastboot),
_ => bail!("Unknown packet type"),
}
}
}
pub struct UdpNetworkInterface {
maximum_size: u16,
sequence: Wrapping<u16>,
socket: UdpSocket,
read_task: Option<Pin<Box<dyn Future<Output = std::io::Result<(usize, Vec<u8>)>>>>>,
write_task: Option<Pin<Box<dyn Future<Output = std::io::Result<usize>>>>>,
}
impl UdpNetworkInterface {
fn create_fastboot_packets(&mut self, buf: &[u8]) -> Result<Vec<Vec<u8>>> {
// Leave four bytes for the header.
let header_size = std::mem::size_of::<Header>() as u16;
let max_chunk_size = self.maximum_size - header_size;
let mut seq = self.sequence;
let mut result = Vec::new();
let mut iter = buf.chunks(max_chunk_size.into()).peekable();
while let Some(chunk) = iter.next() {
let mut packet: Vec<u8> = Vec::with_capacity(chunk.len() + header_size as usize);
packet.push(0x03);
if iter.peek().is_none() {
packet.push(0x00);
} else {
packet.push(0x01); // Mark as continuation.
}
for _ in 0..2 {
packet.push(0);
}
BigEndian::write_u16(&mut packet[2..4], seq.0);
seq += Wrapping(1u16);
packet.extend_from_slice(chunk);
result.push(packet);
}
Ok(result)
}
}
impl AsyncRead for UdpNetworkInterface {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<std::io::Result<usize>> {
if self.read_task.is_none() {
let socket = self.socket.clone();
let seq = self.sequence;
self.read_task.replace(Box::pin(async move {
let (out_buf, sz) = send_to_device(&make_empty_fastboot_packet(seq.0), &socket)
.await
.map_err(|e| {
std::io::Error::new(
ErrorKind::Other,
format!("Could not send emtpy fastboot packet to device: {}", e),
)
})?;
let packet = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new(
ErrorKind::Other,
format!("Could not parse response packet"),
))?;
let mut buf_inner = Vec::new();
match packet.packet_type() {
Ok(PacketType::Fastboot) => {
let size = packet.data.len();
buf_inner.extend(packet.data);
Ok((size, buf_inner))
}
_ => Err(std::io::Error::new(
ErrorKind::Other,
format!("Unexpected reply from device"),
)),
}
}));
}
if let Some(ref mut task) = self.read_task {
match task.as_mut().poll(cx) {
Poll::Ready(Ok((sz, out_buf))) => {
self.read_task = None;
for i in 0..sz {
buf[i] = out_buf[i];
}
self.sequence += Wrapping(1u16);
Poll::Ready(Ok(sz))
}
Poll::Ready(Err(e)) => {
self.read_task = None;
Poll::Ready(Err(e))
}
Poll::Pending => Poll::Pending,
}
} else {
// Really shouldn't get here
Poll::Ready(Err(std::io::Error::new(
ErrorKind::Other,
format!("Could not add async task to read"),
)))
}
}
}
impl AsyncWrite for UdpNetworkInterface {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
if self.write_task.is_none() { | format!("Could not create fastboot packets: {}", e),
)
})?;
let socket = self.socket.clone();
self.write_task.replace(Box::pin(async move {
for packet in &packets {
let (out_buf, sz) = send_to_device(&packet, &socket).await.map_err(|e| {
std::io::Error::new(
ErrorKind::Other,
format!("Could not send emtpy fastboot packet to device: {}", e),
)
})?;
let response = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new(
ErrorKind::Other,
format!("Could not parse response packet"),
))?;
match response.packet_type() {
Ok(PacketType::Fastboot) => (),
_ => {
return Err(std::io::Error::new(
ErrorKind::Other,
format!("Unexpected Response packet"),
))
}
}
}
Ok(packets.len())
}));
}
if let Some(ref mut task) = self.write_task {
match task.as_mut().poll(cx) {
Poll::Ready(Ok(s)) => {
self.write_task = None;
for _i in 0..s {
self.sequence += Wrapping(1u16);
}
Poll::Ready(Ok(buf.len()))
}
Poll::Ready(Err(e)) => {
self.write_task = None;
Poll::Ready(Err(e))
}
Poll::Pending => Poll::Pending,
}
} else {
// Really shouldn't get here
Poll::Ready(Err(std::io::Error::new(
ErrorKind::Other,
format!("Could not add async task to write"),
)))
}
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
unimplemented!();
}
fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
unimplemented!();
}
}
async fn send_to_device(buf: &[u8], socket: &UdpSocket) -> Result<([u8; 1500], usize)> {
// Try sending twice
socket.send(buf).await?;
match wait_for_response(socket).await {
Ok(r) => Ok(r),
Err(e) => {
tracing::error!("Could not get reply from Fastboot device - trying again: {}", e);
socket.send(buf).await?;
wait_for_response(socket)
.await
.or_else(|e| bail!("Did not get reply from Fastboot device: {}", e))
}
}
}
async fn wait_for_response(socket: &UdpSocket) -> Result<([u8; 1500], usize)> {
let mut buf = [0u8; 1500]; // Responses should never get this big.
timeout(REPLY_TIMEOUT, Box::pin(socket.recv(&mut buf[..])))
.await
.map_err(|_| anyhow!("Timed out waiting for reply"))?
.map_err(|e| anyhow!("Recv error: {}", e))
.map(|size| (buf, size))
}
async fn make_sender_socket(addr: SocketAddr) -> Result<UdpSocket> {
let socket: std::net::UdpSocket = match addr {
SocketAddr::V4(ref _saddr) => socket2::Socket::new(
socket2::Domain::IPV4,
socket2::Type::DGRAM,
Some(socket2::Protocol::UDP),
)
.context("construct datagram socket")?,
SocketAddr::V6(ref _saddr) => socket2::Socket::new(
socket2::Domain::IPV6,
socket2::Type::DGRAM,
Some(socket2::Protocol::UDP),
)
.context("construct datagram socket")?,
}
.into();
let result: UdpSocket = Async::new(socket)?.into();
result.connect(addr).await.context("connect to remote address")?;
Ok(result)
}
fn make_query_packet() -> [u8; 4] {
let mut packet = [0u8; 4];
packet[0] = 0x01;
packet
}
fn make_init_packet(sequence: u16) -> [u8; 8] {
let mut packet = [0u8; 8];
packet[0] = 0x02;
packet[1] = 0x00;
BigEndian::write_u16(&mut packet[2..4], sequence);
BigEndian::write_u16(&mut packet[4..6], 1);
BigEndian::write_u16(&mut packet[6..8], MAX_SIZE);
packet
}
fn make_empty_fastboot_packet(sequence: u16) -> [u8; 4] {
let mut packet = [0u8; 4];
packet[0] = 0x03;
packet[1] = 0x00;
BigEndian::write_u16(&mut packet[2..4], sequence);
packet
}
pub struct UdpNetworkFactory {}
impl UdpNetworkFactory {
pub fn new() -> Self {
Self {}
}
}
#[async_trait(?Send)]
impl InterfaceFactory<UdpNetworkInterface> for UdpNetworkFactory {
async fn open(&mut self, target: &Target) -> Result<UdpNetworkInterface> {
let addr = target.fastboot_address().ok_or(anyhow!("No network address for fastboot"))?.0;
let mut to_sock: SocketAddr = addr.into();
// TODO(fxb/78977): get the port from the mdns packet
to_sock.set_port(HOST_PORT);
let socket = make_sender_socket(to_sock).await?;
let (buf, sz) = send_to_device(&make_query_packet(), &socket)
.await
.map_err(|e| anyhow!("Sending error: {}", e))?;
let packet =
Packet::parse(&buf[..sz]).ok_or(anyhow!("Could not parse response packet."))?;
let sequence = match packet.packet_type() {
Ok(PacketType::Query) => BigEndian::read_u16(&packet.data),
_ => bail!("Unexpected response to query packet"),
};
let (buf, sz) = send_to_device(&make_init_packet(sequence), &socket)
.await
.map_err(|e| anyhow!("Sending error: {}", e))?;
let packet =
Packet::parse(&buf[..sz]).ok_or(anyhow!("Could not parse response packet."))?;
let (version, max) = match packet.packet_type() {
Ok(PacketType::Init) => {
(BigEndian::read_u16(&packet.data[..2]), BigEndian::read_u16(&packet.data[2..4]))
}
_ => bail!("Unexpected response to init packet"),
};
let maximum_size = std::cmp::min(max, MAX_SIZE);
tracing::debug!(
"Fastboot over UDP connection established. Version {}. Max Size: {}",
version,
maximum_size
);
Ok(UdpNetworkInterface {
socket,
maximum_size,
sequence: Wrapping(sequence + 1),
read_task: None,
write_task: None,
})
}
async fn close(&self) {}
} | // TODO(fxb/78975): unfortunately the Task requires the 'static lifetime so we have to
// copy the bytes and move them into the async block.
let packets = self.create_fastboot_packets(buf).map_err(|e| {
std::io::Error::new(
ErrorKind::Other, | random_line_split |
udp.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
crate::{fastboot::InterfaceFactory, target::Target},
anyhow::{anyhow, bail, Context as _, Result},
async_io::Async,
async_net::UdpSocket,
async_trait::async_trait,
byteorder::{BigEndian, ByteOrder},
futures::{
io::{AsyncRead, AsyncWrite},
task::{Context, Poll},
Future,
},
std::io::ErrorKind,
std::net::SocketAddr,
std::num::Wrapping,
std::pin::Pin,
std::time::Duration,
timeout::timeout,
zerocopy::{byteorder::big_endian::U16, ByteSlice, FromBytes, LayoutVerified, Unaligned},
};
const HOST_PORT: u16 = 5554;
const REPLY_TIMEOUT: Duration = Duration::from_millis(500);
const MAX_SIZE: u16 = 2048; // Maybe handle larger?
enum PacketType {
Error,
Query,
Init,
Fastboot,
}
#[derive(FromBytes, Unaligned)]
#[repr(C)]
struct Header {
id: u8,
flags: u8,
sequence: U16,
}
struct Packet<B: ByteSlice> {
header: LayoutVerified<B, Header>,
data: B,
}
impl<B: ByteSlice> Packet<B> {
fn parse(bytes: B) -> Option<Packet<B>> {
let (header, data) = LayoutVerified::new_from_prefix(bytes)?;
Some(Self { header, data })
}
#[allow(dead_code)]
fn is_continuation(&self) -> bool {
self.header.flags & 0x001!= 0
}
fn packet_type(&self) -> Result<PacketType> {
match self.header.id {
0x00 => Ok(PacketType::Error),
0x01 => Ok(PacketType::Query),
0x02 => Ok(PacketType::Init),
0x03 => Ok(PacketType::Fastboot),
_ => bail!("Unknown packet type"),
}
}
}
pub struct UdpNetworkInterface {
maximum_size: u16,
sequence: Wrapping<u16>,
socket: UdpSocket,
read_task: Option<Pin<Box<dyn Future<Output = std::io::Result<(usize, Vec<u8>)>>>>>,
write_task: Option<Pin<Box<dyn Future<Output = std::io::Result<usize>>>>>,
}
impl UdpNetworkInterface {
fn create_fastboot_packets(&mut self, buf: &[u8]) -> Result<Vec<Vec<u8>>> {
// Leave four bytes for the header.
let header_size = std::mem::size_of::<Header>() as u16;
let max_chunk_size = self.maximum_size - header_size;
let mut seq = self.sequence;
let mut result = Vec::new();
let mut iter = buf.chunks(max_chunk_size.into()).peekable();
while let Some(chunk) = iter.next() {
let mut packet: Vec<u8> = Vec::with_capacity(chunk.len() + header_size as usize);
packet.push(0x03);
if iter.peek().is_none() {
packet.push(0x00);
} else {
packet.push(0x01); // Mark as continuation.
}
for _ in 0..2 {
packet.push(0);
}
BigEndian::write_u16(&mut packet[2..4], seq.0);
seq += Wrapping(1u16);
packet.extend_from_slice(chunk);
result.push(packet);
}
Ok(result)
}
}
impl AsyncRead for UdpNetworkInterface {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<std::io::Result<usize>> {
if self.read_task.is_none() {
let socket = self.socket.clone();
let seq = self.sequence;
self.read_task.replace(Box::pin(async move {
let (out_buf, sz) = send_to_device(&make_empty_fastboot_packet(seq.0), &socket)
.await
.map_err(|e| {
std::io::Error::new(
ErrorKind::Other,
format!("Could not send emtpy fastboot packet to device: {}", e),
)
})?;
let packet = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new(
ErrorKind::Other,
format!("Could not parse response packet"),
))?;
let mut buf_inner = Vec::new();
match packet.packet_type() {
Ok(PacketType::Fastboot) => {
let size = packet.data.len();
buf_inner.extend(packet.data);
Ok((size, buf_inner))
}
_ => Err(std::io::Error::new(
ErrorKind::Other,
format!("Unexpected reply from device"),
)),
}
}));
}
if let Some(ref mut task) = self.read_task {
match task.as_mut().poll(cx) {
Poll::Ready(Ok((sz, out_buf))) => {
self.read_task = None;
for i in 0..sz {
buf[i] = out_buf[i];
}
self.sequence += Wrapping(1u16);
Poll::Ready(Ok(sz))
}
Poll::Ready(Err(e)) => {
self.read_task = None;
Poll::Ready(Err(e))
}
Poll::Pending => Poll::Pending,
}
} else {
// Really shouldn't get here
Poll::Ready(Err(std::io::Error::new(
ErrorKind::Other,
format!("Could not add async task to read"),
)))
}
}
}
impl AsyncWrite for UdpNetworkInterface {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
if self.write_task.is_none() {
// TODO(fxb/78975): unfortunately the Task requires the'static lifetime so we have to
// copy the bytes and move them into the async block.
let packets = self.create_fastboot_packets(buf).map_err(|e| {
std::io::Error::new(
ErrorKind::Other,
format!("Could not create fastboot packets: {}", e),
)
})?;
let socket = self.socket.clone();
self.write_task.replace(Box::pin(async move {
for packet in &packets {
let (out_buf, sz) = send_to_device(&packet, &socket).await.map_err(|e| {
std::io::Error::new(
ErrorKind::Other,
format!("Could not send emtpy fastboot packet to device: {}", e),
)
})?;
let response = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new(
ErrorKind::Other,
format!("Could not parse response packet"),
))?;
match response.packet_type() {
Ok(PacketType::Fastboot) => (),
_ => {
return Err(std::io::Error::new(
ErrorKind::Other,
format!("Unexpected Response packet"),
))
}
}
}
Ok(packets.len())
}));
}
if let Some(ref mut task) = self.write_task {
match task.as_mut().poll(cx) {
Poll::Ready(Ok(s)) => {
self.write_task = None;
for _i in 0..s {
self.sequence += Wrapping(1u16);
}
Poll::Ready(Ok(buf.len()))
}
Poll::Ready(Err(e)) => {
self.write_task = None;
Poll::Ready(Err(e))
}
Poll::Pending => Poll::Pending,
}
} else {
// Really shouldn't get here
Poll::Ready(Err(std::io::Error::new(
ErrorKind::Other,
format!("Could not add async task to write"),
)))
}
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
unimplemented!();
}
fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
unimplemented!();
}
}
async fn send_to_device(buf: &[u8], socket: &UdpSocket) -> Result<([u8; 1500], usize)> {
// Try sending twice
socket.send(buf).await?;
match wait_for_response(socket).await {
Ok(r) => Ok(r),
Err(e) => {
tracing::error!("Could not get reply from Fastboot device - trying again: {}", e);
socket.send(buf).await?;
wait_for_response(socket)
.await
.or_else(|e| bail!("Did not get reply from Fastboot device: {}", e))
}
}
}
async fn wait_for_response(socket: &UdpSocket) -> Result<([u8; 1500], usize)> {
let mut buf = [0u8; 1500]; // Responses should never get this big.
timeout(REPLY_TIMEOUT, Box::pin(socket.recv(&mut buf[..])))
.await
.map_err(|_| anyhow!("Timed out waiting for reply"))?
.map_err(|e| anyhow!("Recv error: {}", e))
.map(|size| (buf, size))
}
async fn make_sender_socket(addr: SocketAddr) -> Result<UdpSocket> {
let socket: std::net::UdpSocket = match addr {
SocketAddr::V4(ref _saddr) => socket2::Socket::new(
socket2::Domain::IPV4,
socket2::Type::DGRAM,
Some(socket2::Protocol::UDP),
)
.context("construct datagram socket")?,
SocketAddr::V6(ref _saddr) => socket2::Socket::new(
socket2::Domain::IPV6,
socket2::Type::DGRAM,
Some(socket2::Protocol::UDP),
)
.context("construct datagram socket")?,
}
.into();
let result: UdpSocket = Async::new(socket)?.into();
result.connect(addr).await.context("connect to remote address")?;
Ok(result)
}
fn make_query_packet() -> [u8; 4] {
let mut packet = [0u8; 4];
packet[0] = 0x01;
packet
}
fn | (sequence: u16) -> [u8; 8] {
let mut packet = [0u8; 8];
packet[0] = 0x02;
packet[1] = 0x00;
BigEndian::write_u16(&mut packet[2..4], sequence);
BigEndian::write_u16(&mut packet[4..6], 1);
BigEndian::write_u16(&mut packet[6..8], MAX_SIZE);
packet
}
fn make_empty_fastboot_packet(sequence: u16) -> [u8; 4] {
let mut packet = [0u8; 4];
packet[0] = 0x03;
packet[1] = 0x00;
BigEndian::write_u16(&mut packet[2..4], sequence);
packet
}
pub struct UdpNetworkFactory {}
impl UdpNetworkFactory {
pub fn new() -> Self {
Self {}
}
}
#[async_trait(?Send)]
impl InterfaceFactory<UdpNetworkInterface> for UdpNetworkFactory {
async fn open(&mut self, target: &Target) -> Result<UdpNetworkInterface> {
let addr = target.fastboot_address().ok_or(anyhow!("No network address for fastboot"))?.0;
let mut to_sock: SocketAddr = addr.into();
// TODO(fxb/78977): get the port from the mdns packet
to_sock.set_port(HOST_PORT);
let socket = make_sender_socket(to_sock).await?;
let (buf, sz) = send_to_device(&make_query_packet(), &socket)
.await
.map_err(|e| anyhow!("Sending error: {}", e))?;
let packet =
Packet::parse(&buf[..sz]).ok_or(anyhow!("Could not parse response packet."))?;
let sequence = match packet.packet_type() {
Ok(PacketType::Query) => BigEndian::read_u16(&packet.data),
_ => bail!("Unexpected response to query packet"),
};
let (buf, sz) = send_to_device(&make_init_packet(sequence), &socket)
.await
.map_err(|e| anyhow!("Sending error: {}", e))?;
let packet =
Packet::parse(&buf[..sz]).ok_or(anyhow!("Could not parse response packet."))?;
let (version, max) = match packet.packet_type() {
Ok(PacketType::Init) => {
(BigEndian::read_u16(&packet.data[..2]), BigEndian::read_u16(&packet.data[2..4]))
}
_ => bail!("Unexpected response to init packet"),
};
let maximum_size = std::cmp::min(max, MAX_SIZE);
tracing::debug!(
"Fastboot over UDP connection established. Version {}. Max Size: {}",
version,
maximum_size
);
Ok(UdpNetworkInterface {
socket,
maximum_size,
sequence: Wrapping(sequence + 1),
read_task: None,
write_task: None,
})
}
async fn close(&self) {}
}
| make_init_packet | identifier_name |
udp.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
crate::{fastboot::InterfaceFactory, target::Target},
anyhow::{anyhow, bail, Context as _, Result},
async_io::Async,
async_net::UdpSocket,
async_trait::async_trait,
byteorder::{BigEndian, ByteOrder},
futures::{
io::{AsyncRead, AsyncWrite},
task::{Context, Poll},
Future,
},
std::io::ErrorKind,
std::net::SocketAddr,
std::num::Wrapping,
std::pin::Pin,
std::time::Duration,
timeout::timeout,
zerocopy::{byteorder::big_endian::U16, ByteSlice, FromBytes, LayoutVerified, Unaligned},
};
const HOST_PORT: u16 = 5554;
const REPLY_TIMEOUT: Duration = Duration::from_millis(500);
const MAX_SIZE: u16 = 2048; // Maybe handle larger?
enum PacketType {
Error,
Query,
Init,
Fastboot,
}
#[derive(FromBytes, Unaligned)]
#[repr(C)]
struct Header {
id: u8,
flags: u8,
sequence: U16,
}
struct Packet<B: ByteSlice> {
header: LayoutVerified<B, Header>,
data: B,
}
impl<B: ByteSlice> Packet<B> {
fn parse(bytes: B) -> Option<Packet<B>> {
let (header, data) = LayoutVerified::new_from_prefix(bytes)?;
Some(Self { header, data })
}
#[allow(dead_code)]
fn is_continuation(&self) -> bool {
self.header.flags & 0x001!= 0
}
fn packet_type(&self) -> Result<PacketType> {
match self.header.id {
0x00 => Ok(PacketType::Error),
0x01 => Ok(PacketType::Query),
0x02 => Ok(PacketType::Init),
0x03 => Ok(PacketType::Fastboot),
_ => bail!("Unknown packet type"),
}
}
}
pub struct UdpNetworkInterface {
maximum_size: u16,
sequence: Wrapping<u16>,
socket: UdpSocket,
read_task: Option<Pin<Box<dyn Future<Output = std::io::Result<(usize, Vec<u8>)>>>>>,
write_task: Option<Pin<Box<dyn Future<Output = std::io::Result<usize>>>>>,
}
impl UdpNetworkInterface {
fn create_fastboot_packets(&mut self, buf: &[u8]) -> Result<Vec<Vec<u8>>> {
// Leave four bytes for the header.
let header_size = std::mem::size_of::<Header>() as u16;
let max_chunk_size = self.maximum_size - header_size;
let mut seq = self.sequence;
let mut result = Vec::new();
let mut iter = buf.chunks(max_chunk_size.into()).peekable();
while let Some(chunk) = iter.next() {
let mut packet: Vec<u8> = Vec::with_capacity(chunk.len() + header_size as usize);
packet.push(0x03);
if iter.peek().is_none() {
packet.push(0x00);
} else {
packet.push(0x01); // Mark as continuation.
}
for _ in 0..2 {
packet.push(0);
}
BigEndian::write_u16(&mut packet[2..4], seq.0);
seq += Wrapping(1u16);
packet.extend_from_slice(chunk);
result.push(packet);
}
Ok(result)
}
}
impl AsyncRead for UdpNetworkInterface {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<std::io::Result<usize>> {
if self.read_task.is_none() {
let socket = self.socket.clone();
let seq = self.sequence;
self.read_task.replace(Box::pin(async move {
let (out_buf, sz) = send_to_device(&make_empty_fastboot_packet(seq.0), &socket)
.await
.map_err(|e| {
std::io::Error::new(
ErrorKind::Other,
format!("Could not send emtpy fastboot packet to device: {}", e),
)
})?;
let packet = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new(
ErrorKind::Other,
format!("Could not parse response packet"),
))?;
let mut buf_inner = Vec::new();
match packet.packet_type() {
Ok(PacketType::Fastboot) => {
let size = packet.data.len();
buf_inner.extend(packet.data);
Ok((size, buf_inner))
}
_ => Err(std::io::Error::new(
ErrorKind::Other,
format!("Unexpected reply from device"),
)),
}
}));
}
if let Some(ref mut task) = self.read_task {
match task.as_mut().poll(cx) {
Poll::Ready(Ok((sz, out_buf))) => {
self.read_task = None;
for i in 0..sz {
buf[i] = out_buf[i];
}
self.sequence += Wrapping(1u16);
Poll::Ready(Ok(sz))
}
Poll::Ready(Err(e)) => {
self.read_task = None;
Poll::Ready(Err(e))
}
Poll::Pending => Poll::Pending,
}
} else {
// Really shouldn't get here
Poll::Ready(Err(std::io::Error::new(
ErrorKind::Other,
format!("Could not add async task to read"),
)))
}
}
}
impl AsyncWrite for UdpNetworkInterface {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
if self.write_task.is_none() {
// TODO(fxb/78975): unfortunately the Task requires the'static lifetime so we have to
// copy the bytes and move them into the async block.
let packets = self.create_fastboot_packets(buf).map_err(|e| {
std::io::Error::new(
ErrorKind::Other,
format!("Could not create fastboot packets: {}", e),
)
})?;
let socket = self.socket.clone();
self.write_task.replace(Box::pin(async move {
for packet in &packets {
let (out_buf, sz) = send_to_device(&packet, &socket).await.map_err(|e| {
std::io::Error::new(
ErrorKind::Other,
format!("Could not send emtpy fastboot packet to device: {}", e),
)
})?;
let response = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new(
ErrorKind::Other,
format!("Could not parse response packet"),
))?;
match response.packet_type() {
Ok(PacketType::Fastboot) => (),
_ => {
return Err(std::io::Error::new(
ErrorKind::Other,
format!("Unexpected Response packet"),
))
}
}
}
Ok(packets.len())
}));
}
if let Some(ref mut task) = self.write_task {
match task.as_mut().poll(cx) {
Poll::Ready(Ok(s)) => {
self.write_task = None;
for _i in 0..s {
self.sequence += Wrapping(1u16);
}
Poll::Ready(Ok(buf.len()))
}
Poll::Ready(Err(e)) => {
self.write_task = None;
Poll::Ready(Err(e))
}
Poll::Pending => Poll::Pending,
}
} else {
// Really shouldn't get here
Poll::Ready(Err(std::io::Error::new(
ErrorKind::Other,
format!("Could not add async task to write"),
)))
}
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
unimplemented!();
}
fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
unimplemented!();
}
}
async fn send_to_device(buf: &[u8], socket: &UdpSocket) -> Result<([u8; 1500], usize)> {
// Try sending twice
socket.send(buf).await?;
match wait_for_response(socket).await {
Ok(r) => Ok(r),
Err(e) => {
tracing::error!("Could not get reply from Fastboot device - trying again: {}", e);
socket.send(buf).await?;
wait_for_response(socket)
.await
.or_else(|e| bail!("Did not get reply from Fastboot device: {}", e))
}
}
}
async fn wait_for_response(socket: &UdpSocket) -> Result<([u8; 1500], usize)> |
async fn make_sender_socket(addr: SocketAddr) -> Result<UdpSocket> {
let socket: std::net::UdpSocket = match addr {
SocketAddr::V4(ref _saddr) => socket2::Socket::new(
socket2::Domain::IPV4,
socket2::Type::DGRAM,
Some(socket2::Protocol::UDP),
)
.context("construct datagram socket")?,
SocketAddr::V6(ref _saddr) => socket2::Socket::new(
socket2::Domain::IPV6,
socket2::Type::DGRAM,
Some(socket2::Protocol::UDP),
)
.context("construct datagram socket")?,
}
.into();
let result: UdpSocket = Async::new(socket)?.into();
result.connect(addr).await.context("connect to remote address")?;
Ok(result)
}
fn make_query_packet() -> [u8; 4] {
let mut packet = [0u8; 4];
packet[0] = 0x01;
packet
}
fn make_init_packet(sequence: u16) -> [u8; 8] {
let mut packet = [0u8; 8];
packet[0] = 0x02;
packet[1] = 0x00;
BigEndian::write_u16(&mut packet[2..4], sequence);
BigEndian::write_u16(&mut packet[4..6], 1);
BigEndian::write_u16(&mut packet[6..8], MAX_SIZE);
packet
}
fn make_empty_fastboot_packet(sequence: u16) -> [u8; 4] {
let mut packet = [0u8; 4];
packet[0] = 0x03;
packet[1] = 0x00;
BigEndian::write_u16(&mut packet[2..4], sequence);
packet
}
pub struct UdpNetworkFactory {}
impl UdpNetworkFactory {
pub fn new() -> Self {
Self {}
}
}
#[async_trait(?Send)]
impl InterfaceFactory<UdpNetworkInterface> for UdpNetworkFactory {
async fn open(&mut self, target: &Target) -> Result<UdpNetworkInterface> {
let addr = target.fastboot_address().ok_or(anyhow!("No network address for fastboot"))?.0;
let mut to_sock: SocketAddr = addr.into();
// TODO(fxb/78977): get the port from the mdns packet
to_sock.set_port(HOST_PORT);
let socket = make_sender_socket(to_sock).await?;
let (buf, sz) = send_to_device(&make_query_packet(), &socket)
.await
.map_err(|e| anyhow!("Sending error: {}", e))?;
let packet =
Packet::parse(&buf[..sz]).ok_or(anyhow!("Could not parse response packet."))?;
let sequence = match packet.packet_type() {
Ok(PacketType::Query) => BigEndian::read_u16(&packet.data),
_ => bail!("Unexpected response to query packet"),
};
let (buf, sz) = send_to_device(&make_init_packet(sequence), &socket)
.await
.map_err(|e| anyhow!("Sending error: {}", e))?;
let packet =
Packet::parse(&buf[..sz]).ok_or(anyhow!("Could not parse response packet."))?;
let (version, max) = match packet.packet_type() {
Ok(PacketType::Init) => {
(BigEndian::read_u16(&packet.data[..2]), BigEndian::read_u16(&packet.data[2..4]))
}
_ => bail!("Unexpected response to init packet"),
};
let maximum_size = std::cmp::min(max, MAX_SIZE);
tracing::debug!(
"Fastboot over UDP connection established. Version {}. Max Size: {}",
version,
maximum_size
);
Ok(UdpNetworkInterface {
socket,
maximum_size,
sequence: Wrapping(sequence + 1),
read_task: None,
write_task: None,
})
}
async fn close(&self) {}
}
| {
let mut buf = [0u8; 1500]; // Responses should never get this big.
timeout(REPLY_TIMEOUT, Box::pin(socket.recv(&mut buf[..])))
.await
.map_err(|_| anyhow!("Timed out waiting for reply"))?
.map_err(|e| anyhow!("Recv error: {}", e))
.map(|size| (buf, size))
} | identifier_body |
peer_connection.rs | and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use std::{
fmt,
future::Future,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
time::{Duration, Instant},
};
use log::*;
use multiaddr::Multiaddr;
use tokio::{
sync::{mpsc, oneshot},
time,
};
use tokio_stream::StreamExt;
use tracing::{self, span, Instrument, Level};
use super::{
direction::ConnectionDirection,
error::{ConnectionManagerError, PeerConnectionError},
manager::ConnectionManagerEvent,
};
#[cfg(feature = "rpc")]
use crate::protocol::rpc::{
pool::RpcClientPool,
pool::RpcPoolClient,
NamedProtocolService,
RpcClient,
RpcClientBuilder,
RpcError,
RPC_MAX_FRAME_SIZE,
};
use crate::{
framing,
framing::CanonicalFraming,
multiplexing::{Control, IncomingSubstreams, Substream, Yamux},
peer_manager::{NodeId, PeerFeatures, PeerIdentityClaim},
protocol::{ProtocolId, ProtocolNegotiation},
utils::atomic_ref_counter::AtomicRefCounter,
};
const LOG_TARGET: &str = "comms::connection_manager::peer_connection";
static ID_COUNTER: AtomicUsize = AtomicUsize::new(0);
pub fn try_create(
connection: Yamux,
peer_addr: Multiaddr,
peer_node_id: NodeId,
peer_features: PeerFeatures,
direction: ConnectionDirection,
event_notifier: mpsc::Sender<ConnectionManagerEvent>,
our_supported_protocols: Vec<ProtocolId>,
their_supported_protocols: Vec<ProtocolId>,
peer_identity_claim: PeerIdentityClaim,
) -> Result<PeerConnection, ConnectionManagerError> {
trace!(
target: LOG_TARGET,
"(Peer={}) Socket successfully upgraded to multiplexed socket",
peer_node_id.short_str()
);
// All requests are request/response, so a channel size of 1 is all that is needed
let (peer_tx, peer_rx) = mpsc::channel(1);
let id = ID_COUNTER.fetch_add(1, Ordering::Relaxed); // Monotonic
let substream_counter = connection.substream_counter();
let peer_conn = PeerConnection::new(
id,
peer_tx,
peer_node_id.clone(),
peer_features,
peer_addr,
direction,
substream_counter,
peer_identity_claim,
);
let peer_actor = PeerConnectionActor::new(
id,
peer_node_id,
direction,
connection,
peer_rx,
event_notifier,
our_supported_protocols,
their_supported_protocols,
);
tokio::spawn(peer_actor.run());
Ok(peer_conn)
}
/// Request types for the PeerConnection actor.
#[derive(Debug)]
pub enum PeerConnectionRequest {
/// Open a new substream and negotiate the given protocol
OpenSubstream {
protocol_id: ProtocolId,
reply_tx: oneshot::Sender<Result<NegotiatedSubstream<Substream>, PeerConnectionError>>,
},
/// Disconnect all substreams and close the transport connection
Disconnect(bool, oneshot::Sender<Result<(), PeerConnectionError>>),
}
/// ID type for peer connections
pub type ConnectionId = usize;
/// Request handle for an active peer connection
#[derive(Debug, Clone)]
pub struct PeerConnection {
id: ConnectionId,
peer_node_id: NodeId,
peer_features: PeerFeatures,
request_tx: mpsc::Sender<PeerConnectionRequest>,
address: Arc<Multiaddr>,
direction: ConnectionDirection,
started_at: Instant,
substream_counter: AtomicRefCounter,
handle_counter: Arc<()>,
peer_identity_claim: Option<PeerIdentityClaim>,
}
impl PeerConnection {
pub(crate) fn new(
id: ConnectionId,
request_tx: mpsc::Sender<PeerConnectionRequest>,
peer_node_id: NodeId,
peer_features: PeerFeatures,
address: Multiaddr,
direction: ConnectionDirection,
substream_counter: AtomicRefCounter,
peer_identity_claim: PeerIdentityClaim,
) -> Self {
Self {
id,
request_tx,
peer_node_id,
peer_features,
address: Arc::new(address),
direction,
started_at: Instant::now(),
substream_counter,
handle_counter: Arc::new(()),
peer_identity_claim: Some(peer_identity_claim),
}
}
/// Should only be used in tests
pub(crate) fn unverified(
id: ConnectionId,
request_tx: mpsc::Sender<PeerConnectionRequest>,
peer_node_id: NodeId,
peer_features: PeerFeatures,
address: Multiaddr,
direction: ConnectionDirection,
substream_counter: AtomicRefCounter,
) -> Self {
Self {
id,
request_tx,
peer_node_id,
peer_features,
address: Arc::new(address),
direction,
started_at: Instant::now(),
substream_counter,
handle_counter: Arc::new(()),
peer_identity_claim: None,
}
}
pub fn peer_node_id(&self) -> &NodeId {
&self.peer_node_id
}
pub fn peer_features(&self) -> PeerFeatures {
self.peer_features
}
pub fn direction(&self) -> ConnectionDirection {
self.direction
}
pub fn address(&self) -> &Multiaddr {
&self.address
}
pub fn id(&self) -> ConnectionId {
self.id
}
pub fn is_connected(&self) -> bool {
!self.request_tx.is_closed()
}
/// Returns a owned future that resolves on disconnection
pub fn on_disconnect(&self) -> impl Future<Output = ()> +'static {
let request_tx = self.request_tx.clone();
async move { request_tx.closed().await }
}
pub fn age(&self) -> Duration {
self.started_at.elapsed()
}
pub fn substream_count(&self) -> usize {
self.substream_counter.get()
}
pub fn handle_count(&self) -> usize {
Arc::strong_count(&self.handle_counter)
}
pub fn peer_identity_claim(&self) -> Option<&PeerIdentityClaim> {
self.peer_identity_claim.as_ref()
}
#[tracing::instrument(level = "trace", "peer_connection::open_substream", skip(self))]
pub async fn open_substream(
&mut self,
protocol_id: &ProtocolId,
) -> Result<NegotiatedSubstream<Substream>, PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::OpenSubstream {
protocol_id: protocol_id.clone(),
reply_tx,
})
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
#[tracing::instrument(level = "trace", "peer_connection::open_framed_substream", skip(self))]
pub async fn open_framed_substream(
&mut self,
protocol_id: &ProtocolId,
max_frame_size: usize,
) -> Result<CanonicalFraming<Substream>, PeerConnectionError> |
#[cfg(feature = "rpc")]
#[tracing::instrument("peer_connection::connect_rpc", level="trace", skip(self), fields(peer_node_id = self.peer_node_id.to_string().as_str()))]
pub async fn connect_rpc<T>(&mut self) -> Result<T, RpcError>
where T: From<RpcClient> + NamedProtocolService {
self.connect_rpc_using_builder(Default::default()).await
}
#[cfg(feature = "rpc")]
#[tracing::instrument("peer_connection::connect_rpc_with_builder", level = "trace", skip(self, builder))]
pub async fn connect_rpc_using_builder<T>(&mut self, builder: RpcClientBuilder<T>) -> Result<T, RpcError>
where T: From<RpcClient> + NamedProtocolService {
let protocol = ProtocolId::from_static(T::PROTOCOL_NAME);
debug!(
target: LOG_TARGET,
"Attempting to establish RPC protocol `{}` to peer `{}`",
String::from_utf8_lossy(&protocol),
self.peer_node_id
);
let framed = self.open_framed_substream(&protocol, RPC_MAX_FRAME_SIZE).await?;
builder
.with_protocol_id(protocol)
.with_node_id(self.peer_node_id.clone())
.connect(framed)
.await
}
/// Creates a new RpcClientPool that can be shared between tasks. The client pool will lazily establish up to
/// `max_sessions` sessions and provides client session that is least used.
#[cfg(feature = "rpc")]
pub fn create_rpc_client_pool<T>(
&self,
max_sessions: usize,
client_config: RpcClientBuilder<T>,
) -> RpcClientPool<T>
where
T: RpcPoolClient + From<RpcClient> + NamedProtocolService + Clone,
{
RpcClientPool::new(self.clone(), max_sessions, client_config)
}
/// Immediately disconnects the peer connection. This can only fail if the peer connection worker
/// is shut down (and the peer is already disconnected)
pub async fn disconnect(&mut self) -> Result<(), PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::Disconnect(false, reply_tx))
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
pub(crate) async fn disconnect_silent(&mut self) -> Result<(), PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::Disconnect(true, reply_tx))
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
}
impl fmt::Display for PeerConnection {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(
f,
"Id: {}, Node ID: {}, Direction: {}, Peer Address: {}, Age: {:.0?}, #Substreams: {}, #Refs: {}",
self.id,
self.peer_node_id.short_str(),
self.direction,
self.address,
self.age(),
self.substream_count(),
self.handle_count()
)
}
}
impl PartialEq for PeerConnection {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
/// Actor for an active connection to a peer.
struct PeerConnectionActor {
id: ConnectionId,
peer_node_id: NodeId,
request_rx: mpsc::Receiver<PeerConnectionRequest>,
direction: ConnectionDirection,
incoming_substreams: IncomingSubstreams,
control: Control,
event_notifier: mpsc::Sender<ConnectionManagerEvent>,
our_supported_protocols: Vec<ProtocolId>,
their_supported_protocols: Vec<ProtocolId>,
}
impl PeerConnectionActor {
fn new(
id: ConnectionId,
peer_node_id: NodeId,
direction: ConnectionDirection,
connection: Yamux,
request_rx: mpsc::Receiver<PeerConnectionRequest>,
event_notifier: mpsc::Sender<ConnectionManagerEvent>,
our_supported_protocols: Vec<ProtocolId>,
their_supported_protocols: Vec<ProtocolId>,
) -> Self {
Self {
id,
peer_node_id,
direction,
control: connection.get_yamux_control(),
incoming_substreams: connection.into_incoming(),
request_rx,
event_notifier,
our_supported_protocols,
their_supported_protocols,
}
}
pub async fn run(mut self) {
loop {
tokio::select! {
maybe_request = self.request_rx.recv() => {
match maybe_request {
Some(request) => self.handle_request(request).await,
None => {
debug!(target: LOG_TARGET, "[{}] All peer connection handles dropped closing the connection", self);
break;
}
}
},
maybe_substream = self.incoming_substreams.next() => {
match maybe_substream {
Some(substream) => {
if let Err(err) = self.handle_incoming_substream(substream).await {
error!(
target: LOG_TARGET,
"[{}] Incoming substream for peer '{}' failed to open because '{error}'",
self,
self.peer_node_id.short_str(),
error = err
)
}
},
None => {
debug!(target: LOG_TARGET, "[{}] Peer '{}' closed the connection", self, self.peer_node_id.short_str());
break;
},
}
}
}
}
if let Err(err) = self.disconnect(false).await {
warn!(
target: LOG_TARGET,
"[{}] Failed to politely close connection to peer '{}' because '{}'",
self,
self.peer_node_id.short_str(),
err
);
}
}
async fn handle_request(&mut self, request: PeerConnectionRequest) {
use PeerConnectionRequest::{Disconnect, OpenSubstream};
match request {
OpenSubstream { protocol_id, reply_tx } => {
let tracing_id = tracing::Span::current().id();
let span = span!(Level::TRACE, "handle_request");
span.follows_from(tracing_id);
let result = self.open_negotiated_protocol_stream(protocol_id).instrument(span).await;
log_if_error_fmt!(
target: LOG_TARGET,
reply_tx.send(result),
"Reply oneshot closed when sending reply",
);
},
Disconnect(silent, reply_tx) => {
debug!(
target: LOG_TARGET,
"[{}] Disconnect{}requested for {} connection to peer '{}'",
self,
if silent { " (silent) " } else { " " },
self.direction,
self.peer_node_id.short_str()
);
let _result = reply_tx.send(self.disconnect(silent).await);
},
}
}
#[tracing::instrument(level="trace", skip(self, stream),fields(comms.direction="inbound"))]
async fn handle_incoming_substream(&mut self, mut stream: Substream) -> Result<(), PeerConnectionError> {
let selected_protocol = ProtocolNegotiation::new(&mut stream)
.negotiate_protocol_inbound(&self.our_supported_protocols)
.await?;
self.notify_event(ConnectionManagerEvent::NewInboundSubstream(
self.peer_node_id.clone(),
selected_protocol,
stream,
))
.await;
Ok(())
}
#[tracing::instrument(skip(self))]
async fn open_negotiated_protocol_stream(
&mut self,
protocol: ProtocolId,
) -> Result<NegotiatedSubstream<Substream>, PeerConnectionError> {
const PROTOCOL_NEGOTIATION_TIMEOUT: Duration = Duration::from_secs(10);
debug!(
target: LOG_TARGET,
"[{}] Negotiating protocol '{}' on new substream for peer '{}'",
self,
String::from_utf8_lossy(&protocol),
self.peer_node_id.short_str()
);
let mut stream = self.control.open_stream().await?;
let mut negotiation = ProtocolNegotiation::new(&mut stream);
let selected_protocol = if self.their_supported_protocols.contains(&protocol) {
let fut = negotiation.negotiate_protocol_outbound_optimistic(&protocol);
time::timeout(PROTOCOL_NEGOTIATION_TIMEOUT, fut).await??
} else {
let selected_protocols = [protocol];
let fut = negotiation.negotiate_protocol_outbound(&selected_protocols);
time::timeout(PROTOCOL_NEGOTIATION_TIMEOUT, fut).await??
};
Ok(NegotiatedSubstream::new(selected_protocol, stream))
}
async fn notify_event(&mut self, event: ConnectionManagerEvent) {
let _result = self.event_notifier.send(event).await;
}
/// Disconnect this peer connection.
///
/// # Arguments
///
/// silent - true to suppress the PeerDisconnected event, false to publish the event
async fn disconnect(&mut self, silent: bool) -> Result<(), PeerConnectionError> {
self.request_rx.close();
match self.control.close().await {
Err(yamux::ConnectionError::Closed) => {
debug!(
target: LOG_TARGET,
"(Peer = {}) Connection already closed",
self.peer_node_id.short_str()
);
return Ok(());
},
// Only emit closed event once
_ => {
if!silent {
self.notify_event(ConnectionManagerEvent::PeerDisconnected(
self.id,
self.peer_node_id.clone(),
))
.await;
}
},
}
debug!(
target: LOG_TARGET,
"(Peer = {}) Connection closed",
self.peer_node_id.short_str()
);
Ok(())
}
}
impl fmt::Display for PeerConnectionActor {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"PeerConnection(id={}, peer_node_id={}, direction={})",
self.id,
self.peer_node_id.short_str(),
self.direction,
)
}
}
/// Contains the substream and the ProtocolId that was successfully negotiated.
pub struct NegotiatedSubstream<TSubstream> {
pub protocol: ProtocolId,
| {
let substream = self.open_substream(protocol_id).await?;
Ok(framing::canonical(substream.stream, max_frame_size))
} | identifier_body |
peer_connection.rs | the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use std::{
fmt,
future::Future,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
time::{Duration, Instant},
};
use log::*;
use multiaddr::Multiaddr;
use tokio::{
sync::{mpsc, oneshot},
time,
};
use tokio_stream::StreamExt;
use tracing::{self, span, Instrument, Level};
use super::{
direction::ConnectionDirection,
error::{ConnectionManagerError, PeerConnectionError},
manager::ConnectionManagerEvent,
};
#[cfg(feature = "rpc")]
use crate::protocol::rpc::{
pool::RpcClientPool,
pool::RpcPoolClient,
NamedProtocolService,
RpcClient,
RpcClientBuilder,
RpcError,
RPC_MAX_FRAME_SIZE,
};
use crate::{
framing,
framing::CanonicalFraming,
multiplexing::{Control, IncomingSubstreams, Substream, Yamux},
peer_manager::{NodeId, PeerFeatures, PeerIdentityClaim},
protocol::{ProtocolId, ProtocolNegotiation},
utils::atomic_ref_counter::AtomicRefCounter,
};
const LOG_TARGET: &str = "comms::connection_manager::peer_connection";
static ID_COUNTER: AtomicUsize = AtomicUsize::new(0);
pub fn try_create(
connection: Yamux,
peer_addr: Multiaddr,
peer_node_id: NodeId,
peer_features: PeerFeatures,
direction: ConnectionDirection,
event_notifier: mpsc::Sender<ConnectionManagerEvent>,
our_supported_protocols: Vec<ProtocolId>,
their_supported_protocols: Vec<ProtocolId>,
peer_identity_claim: PeerIdentityClaim,
) -> Result<PeerConnection, ConnectionManagerError> {
trace!(
target: LOG_TARGET,
"(Peer={}) Socket successfully upgraded to multiplexed socket",
peer_node_id.short_str()
);
// All requests are request/response, so a channel size of 1 is all that is needed
let (peer_tx, peer_rx) = mpsc::channel(1);
let id = ID_COUNTER.fetch_add(1, Ordering::Relaxed); // Monotonic
let substream_counter = connection.substream_counter();
let peer_conn = PeerConnection::new(
id,
peer_tx,
peer_node_id.clone(),
peer_features,
peer_addr,
direction,
substream_counter,
peer_identity_claim,
);
let peer_actor = PeerConnectionActor::new(
id,
peer_node_id,
direction,
connection,
peer_rx,
event_notifier,
our_supported_protocols,
their_supported_protocols,
);
tokio::spawn(peer_actor.run());
Ok(peer_conn)
}
/// Request types for the PeerConnection actor.
#[derive(Debug)]
pub enum PeerConnectionRequest {
/// Open a new substream and negotiate the given protocol
OpenSubstream {
protocol_id: ProtocolId,
reply_tx: oneshot::Sender<Result<NegotiatedSubstream<Substream>, PeerConnectionError>>,
},
/// Disconnect all substreams and close the transport connection
Disconnect(bool, oneshot::Sender<Result<(), PeerConnectionError>>),
}
/// ID type for peer connections
pub type ConnectionId = usize;
/// Request handle for an active peer connection
#[derive(Debug, Clone)]
pub struct PeerConnection {
id: ConnectionId,
peer_node_id: NodeId,
peer_features: PeerFeatures,
request_tx: mpsc::Sender<PeerConnectionRequest>,
address: Arc<Multiaddr>,
direction: ConnectionDirection,
started_at: Instant,
substream_counter: AtomicRefCounter,
handle_counter: Arc<()>,
peer_identity_claim: Option<PeerIdentityClaim>,
}
impl PeerConnection {
pub(crate) fn new(
id: ConnectionId,
request_tx: mpsc::Sender<PeerConnectionRequest>,
peer_node_id: NodeId,
peer_features: PeerFeatures,
address: Multiaddr,
direction: ConnectionDirection,
substream_counter: AtomicRefCounter,
peer_identity_claim: PeerIdentityClaim,
) -> Self {
Self {
id,
request_tx,
peer_node_id,
peer_features,
address: Arc::new(address),
direction,
started_at: Instant::now(),
substream_counter,
handle_counter: Arc::new(()),
peer_identity_claim: Some(peer_identity_claim),
}
}
/// Should only be used in tests
pub(crate) fn unverified(
id: ConnectionId,
request_tx: mpsc::Sender<PeerConnectionRequest>,
peer_node_id: NodeId,
peer_features: PeerFeatures,
address: Multiaddr,
direction: ConnectionDirection,
substream_counter: AtomicRefCounter,
) -> Self {
Self {
id,
request_tx,
peer_node_id,
peer_features,
address: Arc::new(address),
direction,
started_at: Instant::now(),
substream_counter,
handle_counter: Arc::new(()),
peer_identity_claim: None,
}
}
pub fn peer_node_id(&self) -> &NodeId {
&self.peer_node_id
}
pub fn peer_features(&self) -> PeerFeatures {
self.peer_features
}
pub fn direction(&self) -> ConnectionDirection {
self.direction
}
pub fn address(&self) -> &Multiaddr {
&self.address
}
pub fn id(&self) -> ConnectionId {
self.id
}
pub fn is_connected(&self) -> bool {
!self.request_tx.is_closed()
}
/// Returns a owned future that resolves on disconnection
pub fn on_disconnect(&self) -> impl Future<Output = ()> +'static {
let request_tx = self.request_tx.clone();
async move { request_tx.closed().await }
}
pub fn age(&self) -> Duration {
self.started_at.elapsed()
}
pub fn substream_count(&self) -> usize {
self.substream_counter.get()
}
pub fn handle_count(&self) -> usize {
Arc::strong_count(&self.handle_counter)
}
pub fn peer_identity_claim(&self) -> Option<&PeerIdentityClaim> {
self.peer_identity_claim.as_ref()
}
#[tracing::instrument(level = "trace", "peer_connection::open_substream", skip(self))]
pub async fn open_substream(
&mut self,
protocol_id: &ProtocolId,
) -> Result<NegotiatedSubstream<Substream>, PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::OpenSubstream {
protocol_id: protocol_id.clone(),
reply_tx,
})
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
#[tracing::instrument(level = "trace", "peer_connection::open_framed_substream", skip(self))]
pub async fn open_framed_substream(
&mut self,
protocol_id: &ProtocolId,
max_frame_size: usize,
) -> Result<CanonicalFraming<Substream>, PeerConnectionError> {
let substream = self.open_substream(protocol_id).await?;
Ok(framing::canonical(substream.stream, max_frame_size))
}
#[cfg(feature = "rpc")]
#[tracing::instrument("peer_connection::connect_rpc", level="trace", skip(self), fields(peer_node_id = self.peer_node_id.to_string().as_str()))]
pub async fn connect_rpc<T>(&mut self) -> Result<T, RpcError>
where T: From<RpcClient> + NamedProtocolService {
self.connect_rpc_using_builder(Default::default()).await
}
#[cfg(feature = "rpc")]
#[tracing::instrument("peer_connection::connect_rpc_with_builder", level = "trace", skip(self, builder))]
pub async fn connect_rpc_using_builder<T>(&mut self, builder: RpcClientBuilder<T>) -> Result<T, RpcError>
where T: From<RpcClient> + NamedProtocolService {
let protocol = ProtocolId::from_static(T::PROTOCOL_NAME);
debug!(
target: LOG_TARGET,
"Attempting to establish RPC protocol `{}` to peer `{}`",
String::from_utf8_lossy(&protocol),
self.peer_node_id
);
let framed = self.open_framed_substream(&protocol, RPC_MAX_FRAME_SIZE).await?;
builder
.with_protocol_id(protocol)
.with_node_id(self.peer_node_id.clone())
.connect(framed)
.await
}
/// Creates a new RpcClientPool that can be shared between tasks. The client pool will lazily establish up to
/// `max_sessions` sessions and provides client session that is least used.
#[cfg(feature = "rpc")]
pub fn create_rpc_client_pool<T>(
&self,
max_sessions: usize,
client_config: RpcClientBuilder<T>,
) -> RpcClientPool<T>
where
T: RpcPoolClient + From<RpcClient> + NamedProtocolService + Clone,
{
RpcClientPool::new(self.clone(), max_sessions, client_config)
}
/// Immediately disconnects the peer connection. This can only fail if the peer connection worker
/// is shut down (and the peer is already disconnected)
pub async fn disconnect(&mut self) -> Result<(), PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::Disconnect(false, reply_tx))
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
pub(crate) async fn disconnect_silent(&mut self) -> Result<(), PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::Disconnect(true, reply_tx))
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
}
impl fmt::Display for PeerConnection {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(
f,
"Id: {}, Node ID: {}, Direction: {}, Peer Address: {}, Age: {:.0?}, #Substreams: {}, #Refs: {}",
self.id,
self.peer_node_id.short_str(),
self.direction,
self.address,
self.age(),
self.substream_count(),
self.handle_count()
)
}
}
impl PartialEq for PeerConnection {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
/// Actor for an active connection to a peer.
struct PeerConnectionActor {
id: ConnectionId,
peer_node_id: NodeId,
request_rx: mpsc::Receiver<PeerConnectionRequest>,
direction: ConnectionDirection,
incoming_substreams: IncomingSubstreams,
control: Control,
event_notifier: mpsc::Sender<ConnectionManagerEvent>,
our_supported_protocols: Vec<ProtocolId>,
their_supported_protocols: Vec<ProtocolId>,
}
impl PeerConnectionActor {
fn new(
id: ConnectionId,
peer_node_id: NodeId,
direction: ConnectionDirection,
connection: Yamux,
request_rx: mpsc::Receiver<PeerConnectionRequest>,
event_notifier: mpsc::Sender<ConnectionManagerEvent>,
our_supported_protocols: Vec<ProtocolId>,
their_supported_protocols: Vec<ProtocolId>,
) -> Self {
Self {
id,
peer_node_id,
direction,
control: connection.get_yamux_control(),
incoming_substreams: connection.into_incoming(),
request_rx,
event_notifier,
our_supported_protocols,
their_supported_protocols,
}
}
pub async fn run(mut self) {
loop {
tokio::select! {
maybe_request = self.request_rx.recv() => {
match maybe_request {
Some(request) => self.handle_request(request).await,
None => {
debug!(target: LOG_TARGET, "[{}] All peer connection handles dropped closing the connection", self);
break;
}
}
},
maybe_substream = self.incoming_substreams.next() => {
match maybe_substream {
Some(substream) => {
if let Err(err) = self.handle_incoming_substream(substream).await {
error!(
target: LOG_TARGET,
"[{}] Incoming substream for peer '{}' failed to open because '{error}'",
self,
self.peer_node_id.short_str(),
error = err
)
}
},
None => {
debug!(target: LOG_TARGET, "[{}] Peer '{}' closed the connection", self, self.peer_node_id.short_str());
break;
},
}
}
}
}
if let Err(err) = self.disconnect(false).await {
warn!(
target: LOG_TARGET,
"[{}] Failed to politely close connection to peer '{}' because '{}'",
self,
self.peer_node_id.short_str(),
err
);
}
}
async fn handle_request(&mut self, request: PeerConnectionRequest) {
use PeerConnectionRequest::{Disconnect, OpenSubstream};
match request {
OpenSubstream { protocol_id, reply_tx } => | ,
Disconnect(silent, reply_tx) => {
debug!(
target: LOG_TARGET,
"[{}] Disconnect{}requested for {} connection to peer '{}'",
self,
if silent { " (silent) " } else { " " },
self.direction,
self.peer_node_id.short_str()
);
let _result = reply_tx.send(self.disconnect(silent).await);
},
}
}
#[tracing::instrument(level="trace", skip(self, stream),fields(comms.direction="inbound"))]
async fn handle_incoming_substream(&mut self, mut stream: Substream) -> Result<(), PeerConnectionError> {
let selected_protocol = ProtocolNegotiation::new(&mut stream)
.negotiate_protocol_inbound(&self.our_supported_protocols)
.await?;
self.notify_event(ConnectionManagerEvent::NewInboundSubstream(
self.peer_node_id.clone(),
selected_protocol,
stream,
))
.await;
Ok(())
}
#[tracing::instrument(skip(self))]
async fn open_negotiated_protocol_stream(
&mut self,
protocol: ProtocolId,
) -> Result<NegotiatedSubstream<Substream>, PeerConnectionError> {
const PROTOCOL_NEGOTIATION_TIMEOUT: Duration = Duration::from_secs(10);
debug!(
target: LOG_TARGET,
"[{}] Negotiating protocol '{}' on new substream for peer '{}'",
self,
String::from_utf8_lossy(&protocol),
self.peer_node_id.short_str()
);
let mut stream = self.control.open_stream().await?;
let mut negotiation = ProtocolNegotiation::new(&mut stream);
let selected_protocol = if self.their_supported_protocols.contains(&protocol) {
let fut = negotiation.negotiate_protocol_outbound_optimistic(&protocol);
time::timeout(PROTOCOL_NEGOTIATION_TIMEOUT, fut).await??
} else {
let selected_protocols = [protocol];
let fut = negotiation.negotiate_protocol_outbound(&selected_protocols);
time::timeout(PROTOCOL_NEGOTIATION_TIMEOUT, fut).await??
};
Ok(NegotiatedSubstream::new(selected_protocol, stream))
}
async fn notify_event(&mut self, event: ConnectionManagerEvent) {
let _result = self.event_notifier.send(event).await;
}
/// Disconnect this peer connection.
///
/// # Arguments
///
/// silent - true to suppress the PeerDisconnected event, false to publish the event
async fn disconnect(&mut self, silent: bool) -> Result<(), PeerConnectionError> {
self.request_rx.close();
match self.control.close().await {
Err(yamux::ConnectionError::Closed) => {
debug!(
target: LOG_TARGET,
"(Peer = {}) Connection already closed",
self.peer_node_id.short_str()
);
return Ok(());
},
// Only emit closed event once
_ => {
if!silent {
self.notify_event(ConnectionManagerEvent::PeerDisconnected(
self.id,
self.peer_node_id.clone(),
))
.await;
}
},
}
debug!(
target: LOG_TARGET,
"(Peer = {}) Connection closed",
self.peer_node_id.short_str()
);
Ok(())
}
}
impl fmt::Display for PeerConnectionActor {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"PeerConnection(id={}, peer_node_id={}, direction={})",
self.id,
self.peer_node_id.short_str(),
self.direction,
)
}
}
/// Contains the substream and the ProtocolId that was successfully negotiated.
pub struct NegotiatedSubstream<TSubstream> {
pub protocol: ProtocolId,
| {
let tracing_id = tracing::Span::current().id();
let span = span!(Level::TRACE, "handle_request");
span.follows_from(tracing_id);
let result = self.open_negotiated_protocol_stream(protocol_id).instrument(span).await;
log_if_error_fmt!(
target: LOG_TARGET,
reply_tx.send(result),
"Reply oneshot closed when sending reply",
);
} | conditional_block |
peer_connection.rs | and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use std::{
fmt,
future::Future,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
time::{Duration, Instant},
};
use log::*;
use multiaddr::Multiaddr;
use tokio::{
sync::{mpsc, oneshot},
time,
};
use tokio_stream::StreamExt;
use tracing::{self, span, Instrument, Level};
use super::{
direction::ConnectionDirection,
error::{ConnectionManagerError, PeerConnectionError},
manager::ConnectionManagerEvent,
};
#[cfg(feature = "rpc")]
use crate::protocol::rpc::{
pool::RpcClientPool,
pool::RpcPoolClient,
NamedProtocolService,
RpcClient,
RpcClientBuilder,
RpcError,
RPC_MAX_FRAME_SIZE,
};
use crate::{
framing,
framing::CanonicalFraming,
multiplexing::{Control, IncomingSubstreams, Substream, Yamux},
peer_manager::{NodeId, PeerFeatures, PeerIdentityClaim},
protocol::{ProtocolId, ProtocolNegotiation},
utils::atomic_ref_counter::AtomicRefCounter,
};
const LOG_TARGET: &str = "comms::connection_manager::peer_connection";
static ID_COUNTER: AtomicUsize = AtomicUsize::new(0);
pub fn try_create(
connection: Yamux,
peer_addr: Multiaddr,
peer_node_id: NodeId,
peer_features: PeerFeatures,
direction: ConnectionDirection,
event_notifier: mpsc::Sender<ConnectionManagerEvent>,
our_supported_protocols: Vec<ProtocolId>,
their_supported_protocols: Vec<ProtocolId>,
peer_identity_claim: PeerIdentityClaim,
) -> Result<PeerConnection, ConnectionManagerError> {
trace!(
target: LOG_TARGET,
"(Peer={}) Socket successfully upgraded to multiplexed socket",
peer_node_id.short_str()
);
// All requests are request/response, so a channel size of 1 is all that is needed
let (peer_tx, peer_rx) = mpsc::channel(1);
let id = ID_COUNTER.fetch_add(1, Ordering::Relaxed); // Monotonic
let substream_counter = connection.substream_counter();
let peer_conn = PeerConnection::new(
id,
peer_tx,
peer_node_id.clone(),
peer_features,
peer_addr,
direction,
substream_counter,
peer_identity_claim,
);
let peer_actor = PeerConnectionActor::new(
id,
peer_node_id,
direction,
connection,
peer_rx,
event_notifier,
our_supported_protocols,
their_supported_protocols,
);
tokio::spawn(peer_actor.run());
Ok(peer_conn)
}
/// Request types for the PeerConnection actor.
#[derive(Debug)]
pub enum PeerConnectionRequest {
/// Open a new substream and negotiate the given protocol
OpenSubstream {
protocol_id: ProtocolId,
reply_tx: oneshot::Sender<Result<NegotiatedSubstream<Substream>, PeerConnectionError>>,
},
/// Disconnect all substreams and close the transport connection
Disconnect(bool, oneshot::Sender<Result<(), PeerConnectionError>>),
}
/// ID type for peer connections
pub type ConnectionId = usize;
/// Request handle for an active peer connection
#[derive(Debug, Clone)]
pub struct PeerConnection {
id: ConnectionId,
peer_node_id: NodeId,
peer_features: PeerFeatures,
request_tx: mpsc::Sender<PeerConnectionRequest>,
address: Arc<Multiaddr>,
direction: ConnectionDirection,
started_at: Instant,
substream_counter: AtomicRefCounter,
handle_counter: Arc<()>,
peer_identity_claim: Option<PeerIdentityClaim>,
}
impl PeerConnection {
pub(crate) fn new(
id: ConnectionId,
request_tx: mpsc::Sender<PeerConnectionRequest>,
peer_node_id: NodeId,
peer_features: PeerFeatures,
address: Multiaddr,
direction: ConnectionDirection,
substream_counter: AtomicRefCounter,
peer_identity_claim: PeerIdentityClaim,
) -> Self {
Self {
id,
request_tx,
peer_node_id,
peer_features,
address: Arc::new(address),
direction,
started_at: Instant::now(),
substream_counter,
handle_counter: Arc::new(()),
peer_identity_claim: Some(peer_identity_claim),
}
}
/// Should only be used in tests
pub(crate) fn unverified(
id: ConnectionId,
request_tx: mpsc::Sender<PeerConnectionRequest>,
peer_node_id: NodeId,
peer_features: PeerFeatures,
address: Multiaddr,
direction: ConnectionDirection,
substream_counter: AtomicRefCounter,
) -> Self {
Self {
id,
request_tx,
peer_node_id,
peer_features,
address: Arc::new(address),
direction,
started_at: Instant::now(),
substream_counter,
handle_counter: Arc::new(()),
peer_identity_claim: None,
}
}
pub fn peer_node_id(&self) -> &NodeId {
&self.peer_node_id
}
pub fn peer_features(&self) -> PeerFeatures {
self.peer_features
}
pub fn direction(&self) -> ConnectionDirection {
self.direction
}
pub fn address(&self) -> &Multiaddr {
&self.address
}
pub fn id(&self) -> ConnectionId {
self.id
}
pub fn is_connected(&self) -> bool {
!self.request_tx.is_closed()
}
/// Returns a owned future that resolves on disconnection
pub fn on_disconnect(&self) -> impl Future<Output = ()> +'static {
let request_tx = self.request_tx.clone();
async move { request_tx.closed().await }
}
pub fn age(&self) -> Duration {
self.started_at.elapsed()
}
pub fn substream_count(&self) -> usize {
self.substream_counter.get()
}
pub fn handle_count(&self) -> usize {
Arc::strong_count(&self.handle_counter)
}
pub fn peer_identity_claim(&self) -> Option<&PeerIdentityClaim> {
self.peer_identity_claim.as_ref()
}
#[tracing::instrument(level = "trace", "peer_connection::open_substream", skip(self))]
pub async fn open_substream(
&mut self,
protocol_id: &ProtocolId,
) -> Result<NegotiatedSubstream<Substream>, PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::OpenSubstream {
protocol_id: protocol_id.clone(),
reply_tx,
})
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
#[tracing::instrument(level = "trace", "peer_connection::open_framed_substream", skip(self))]
pub async fn open_framed_substream(
&mut self,
protocol_id: &ProtocolId,
max_frame_size: usize,
) -> Result<CanonicalFraming<Substream>, PeerConnectionError> {
let substream = self.open_substream(protocol_id).await?;
Ok(framing::canonical(substream.stream, max_frame_size))
}
#[cfg(feature = "rpc")]
#[tracing::instrument("peer_connection::connect_rpc", level="trace", skip(self), fields(peer_node_id = self.peer_node_id.to_string().as_str()))]
pub async fn connect_rpc<T>(&mut self) -> Result<T, RpcError>
where T: From<RpcClient> + NamedProtocolService {
self.connect_rpc_using_builder(Default::default()).await
}
#[cfg(feature = "rpc")]
#[tracing::instrument("peer_connection::connect_rpc_with_builder", level = "trace", skip(self, builder))]
pub async fn connect_rpc_using_builder<T>(&mut self, builder: RpcClientBuilder<T>) -> Result<T, RpcError>
where T: From<RpcClient> + NamedProtocolService {
let protocol = ProtocolId::from_static(T::PROTOCOL_NAME);
debug!(
target: LOG_TARGET,
"Attempting to establish RPC protocol `{}` to peer `{}`",
String::from_utf8_lossy(&protocol),
self.peer_node_id
);
let framed = self.open_framed_substream(&protocol, RPC_MAX_FRAME_SIZE).await?;
builder
.with_protocol_id(protocol)
.with_node_id(self.peer_node_id.clone())
.connect(framed)
.await
}
/// Creates a new RpcClientPool that can be shared between tasks. The client pool will lazily establish up to
/// `max_sessions` sessions and provides client session that is least used.
#[cfg(feature = "rpc")]
pub fn create_rpc_client_pool<T>(
&self,
max_sessions: usize,
client_config: RpcClientBuilder<T>,
) -> RpcClientPool<T>
where
T: RpcPoolClient + From<RpcClient> + NamedProtocolService + Clone,
{
RpcClientPool::new(self.clone(), max_sessions, client_config)
}
/// Immediately disconnects the peer connection. This can only fail if the peer connection worker
/// is shut down (and the peer is already disconnected)
pub async fn disconnect(&mut self) -> Result<(), PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::Disconnect(false, reply_tx))
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
pub(crate) async fn disconnect_silent(&mut self) -> Result<(), PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::Disconnect(true, reply_tx))
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
}
impl fmt::Display for PeerConnection {
fn | (&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(
f,
"Id: {}, Node ID: {}, Direction: {}, Peer Address: {}, Age: {:.0?}, #Substreams: {}, #Refs: {}",
self.id,
self.peer_node_id.short_str(),
self.direction,
self.address,
self.age(),
self.substream_count(),
self.handle_count()
)
}
}
impl PartialEq for PeerConnection {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
/// Actor for an active connection to a peer.
struct PeerConnectionActor {
id: ConnectionId,
peer_node_id: NodeId,
request_rx: mpsc::Receiver<PeerConnectionRequest>,
direction: ConnectionDirection,
incoming_substreams: IncomingSubstreams,
control: Control,
event_notifier: mpsc::Sender<ConnectionManagerEvent>,
our_supported_protocols: Vec<ProtocolId>,
their_supported_protocols: Vec<ProtocolId>,
}
impl PeerConnectionActor {
fn new(
id: ConnectionId,
peer_node_id: NodeId,
direction: ConnectionDirection,
connection: Yamux,
request_rx: mpsc::Receiver<PeerConnectionRequest>,
event_notifier: mpsc::Sender<ConnectionManagerEvent>,
our_supported_protocols: Vec<ProtocolId>,
their_supported_protocols: Vec<ProtocolId>,
) -> Self {
Self {
id,
peer_node_id,
direction,
control: connection.get_yamux_control(),
incoming_substreams: connection.into_incoming(),
request_rx,
event_notifier,
our_supported_protocols,
their_supported_protocols,
}
}
pub async fn run(mut self) {
loop {
tokio::select! {
maybe_request = self.request_rx.recv() => {
match maybe_request {
Some(request) => self.handle_request(request).await,
None => {
debug!(target: LOG_TARGET, "[{}] All peer connection handles dropped closing the connection", self);
break;
}
}
},
maybe_substream = self.incoming_substreams.next() => {
match maybe_substream {
Some(substream) => {
if let Err(err) = self.handle_incoming_substream(substream).await {
error!(
target: LOG_TARGET,
"[{}] Incoming substream for peer '{}' failed to open because '{error}'",
self,
self.peer_node_id.short_str(),
error = err
)
}
},
None => {
debug!(target: LOG_TARGET, "[{}] Peer '{}' closed the connection", self, self.peer_node_id.short_str());
break;
},
}
}
}
}
if let Err(err) = self.disconnect(false).await {
warn!(
target: LOG_TARGET,
"[{}] Failed to politely close connection to peer '{}' because '{}'",
self,
self.peer_node_id.short_str(),
err
);
}
}
async fn handle_request(&mut self, request: PeerConnectionRequest) {
use PeerConnectionRequest::{Disconnect, OpenSubstream};
match request {
OpenSubstream { protocol_id, reply_tx } => {
let tracing_id = tracing::Span::current().id();
let span = span!(Level::TRACE, "handle_request");
span.follows_from(tracing_id);
let result = self.open_negotiated_protocol_stream(protocol_id).instrument(span).await;
log_if_error_fmt!(
target: LOG_TARGET,
reply_tx.send(result),
"Reply oneshot closed when sending reply",
);
},
Disconnect(silent, reply_tx) => {
debug!(
target: LOG_TARGET,
"[{}] Disconnect{}requested for {} connection to peer '{}'",
self,
if silent { " (silent) " } else { " " },
self.direction,
self.peer_node_id.short_str()
);
let _result = reply_tx.send(self.disconnect(silent).await);
},
}
}
#[tracing::instrument(level="trace", skip(self, stream),fields(comms.direction="inbound"))]
async fn handle_incoming_substream(&mut self, mut stream: Substream) -> Result<(), PeerConnectionError> {
let selected_protocol = ProtocolNegotiation::new(&mut stream)
.negotiate_protocol_inbound(&self.our_supported_protocols)
.await?;
self.notify_event(ConnectionManagerEvent::NewInboundSubstream(
self.peer_node_id.clone(),
selected_protocol,
stream,
))
.await;
Ok(())
}
#[tracing::instrument(skip(self))]
async fn open_negotiated_protocol_stream(
&mut self,
protocol: ProtocolId,
) -> Result<NegotiatedSubstream<Substream>, PeerConnectionError> {
const PROTOCOL_NEGOTIATION_TIMEOUT: Duration = Duration::from_secs(10);
debug!(
target: LOG_TARGET,
"[{}] Negotiating protocol '{}' on new substream for peer '{}'",
self,
String::from_utf8_lossy(&protocol),
self.peer_node_id.short_str()
);
let mut stream = self.control.open_stream().await?;
let mut negotiation = ProtocolNegotiation::new(&mut stream);
let selected_protocol = if self.their_supported_protocols.contains(&protocol) {
let fut = negotiation.negotiate_protocol_outbound_optimistic(&protocol);
time::timeout(PROTOCOL_NEGOTIATION_TIMEOUT, fut).await??
} else {
let selected_protocols = [protocol];
let fut = negotiation.negotiate_protocol_outbound(&selected_protocols);
time::timeout(PROTOCOL_NEGOTIATION_TIMEOUT, fut).await??
};
Ok(NegotiatedSubstream::new(selected_protocol, stream))
}
async fn notify_event(&mut self, event: ConnectionManagerEvent) {
let _result = self.event_notifier.send(event).await;
}
/// Disconnect this peer connection.
///
/// # Arguments
///
/// silent - true to suppress the PeerDisconnected event, false to publish the event
async fn disconnect(&mut self, silent: bool) -> Result<(), PeerConnectionError> {
self.request_rx.close();
match self.control.close().await {
Err(yamux::ConnectionError::Closed) => {
debug!(
target: LOG_TARGET,
"(Peer = {}) Connection already closed",
self.peer_node_id.short_str()
);
return Ok(());
},
// Only emit closed event once
_ => {
if!silent {
self.notify_event(ConnectionManagerEvent::PeerDisconnected(
self.id,
self.peer_node_id.clone(),
))
.await;
}
},
}
debug!(
target: LOG_TARGET,
"(Peer = {}) Connection closed",
self.peer_node_id.short_str()
);
Ok(())
}
}
impl fmt::Display for PeerConnectionActor {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"PeerConnection(id={}, peer_node_id={}, direction={})",
self.id,
self.peer_node_id.short_str(),
self.direction,
)
}
}
/// Contains the substream and the ProtocolId that was successfully negotiated.
pub struct NegotiatedSubstream<TSubstream> {
pub protocol: ProtocolId,
| fmt | identifier_name |
peer_connection.rs | conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use std::{
fmt,
future::Future,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
time::{Duration, Instant},
};
use log::*;
use multiaddr::Multiaddr;
use tokio::{
sync::{mpsc, oneshot},
time,
};
use tokio_stream::StreamExt;
use tracing::{self, span, Instrument, Level};
use super::{
direction::ConnectionDirection,
error::{ConnectionManagerError, PeerConnectionError},
manager::ConnectionManagerEvent,
};
#[cfg(feature = "rpc")]
use crate::protocol::rpc::{
pool::RpcClientPool,
pool::RpcPoolClient,
NamedProtocolService,
RpcClient,
RpcClientBuilder,
RpcError,
RPC_MAX_FRAME_SIZE,
};
use crate::{
framing,
framing::CanonicalFraming,
multiplexing::{Control, IncomingSubstreams, Substream, Yamux},
peer_manager::{NodeId, PeerFeatures, PeerIdentityClaim},
protocol::{ProtocolId, ProtocolNegotiation},
utils::atomic_ref_counter::AtomicRefCounter,
};
const LOG_TARGET: &str = "comms::connection_manager::peer_connection";
static ID_COUNTER: AtomicUsize = AtomicUsize::new(0);
pub fn try_create(
connection: Yamux,
peer_addr: Multiaddr,
peer_node_id: NodeId,
peer_features: PeerFeatures,
direction: ConnectionDirection,
event_notifier: mpsc::Sender<ConnectionManagerEvent>,
our_supported_protocols: Vec<ProtocolId>,
their_supported_protocols: Vec<ProtocolId>,
peer_identity_claim: PeerIdentityClaim,
) -> Result<PeerConnection, ConnectionManagerError> {
trace!(
target: LOG_TARGET,
"(Peer={}) Socket successfully upgraded to multiplexed socket",
peer_node_id.short_str()
);
// All requests are request/response, so a channel size of 1 is all that is needed
let (peer_tx, peer_rx) = mpsc::channel(1);
let id = ID_COUNTER.fetch_add(1, Ordering::Relaxed); // Monotonic
let substream_counter = connection.substream_counter();
let peer_conn = PeerConnection::new(
id,
peer_tx,
peer_node_id.clone(),
peer_features,
peer_addr,
direction,
substream_counter,
peer_identity_claim,
);
let peer_actor = PeerConnectionActor::new(
id,
peer_node_id,
direction,
connection,
peer_rx,
event_notifier,
our_supported_protocols,
their_supported_protocols,
);
tokio::spawn(peer_actor.run());
Ok(peer_conn)
}
/// Request types for the PeerConnection actor.
#[derive(Debug)]
pub enum PeerConnectionRequest {
/// Open a new substream and negotiate the given protocol
OpenSubstream {
protocol_id: ProtocolId,
reply_tx: oneshot::Sender<Result<NegotiatedSubstream<Substream>, PeerConnectionError>>,
},
/// Disconnect all substreams and close the transport connection
Disconnect(bool, oneshot::Sender<Result<(), PeerConnectionError>>),
}
/// ID type for peer connections
pub type ConnectionId = usize;
/// Request handle for an active peer connection
#[derive(Debug, Clone)]
pub struct PeerConnection {
id: ConnectionId,
peer_node_id: NodeId, | address: Arc<Multiaddr>,
direction: ConnectionDirection,
started_at: Instant,
substream_counter: AtomicRefCounter,
handle_counter: Arc<()>,
peer_identity_claim: Option<PeerIdentityClaim>,
}
impl PeerConnection {
pub(crate) fn new(
id: ConnectionId,
request_tx: mpsc::Sender<PeerConnectionRequest>,
peer_node_id: NodeId,
peer_features: PeerFeatures,
address: Multiaddr,
direction: ConnectionDirection,
substream_counter: AtomicRefCounter,
peer_identity_claim: PeerIdentityClaim,
) -> Self {
Self {
id,
request_tx,
peer_node_id,
peer_features,
address: Arc::new(address),
direction,
started_at: Instant::now(),
substream_counter,
handle_counter: Arc::new(()),
peer_identity_claim: Some(peer_identity_claim),
}
}
/// Should only be used in tests
pub(crate) fn unverified(
id: ConnectionId,
request_tx: mpsc::Sender<PeerConnectionRequest>,
peer_node_id: NodeId,
peer_features: PeerFeatures,
address: Multiaddr,
direction: ConnectionDirection,
substream_counter: AtomicRefCounter,
) -> Self {
Self {
id,
request_tx,
peer_node_id,
peer_features,
address: Arc::new(address),
direction,
started_at: Instant::now(),
substream_counter,
handle_counter: Arc::new(()),
peer_identity_claim: None,
}
}
pub fn peer_node_id(&self) -> &NodeId {
&self.peer_node_id
}
pub fn peer_features(&self) -> PeerFeatures {
self.peer_features
}
pub fn direction(&self) -> ConnectionDirection {
self.direction
}
pub fn address(&self) -> &Multiaddr {
&self.address
}
pub fn id(&self) -> ConnectionId {
self.id
}
pub fn is_connected(&self) -> bool {
!self.request_tx.is_closed()
}
/// Returns a owned future that resolves on disconnection
pub fn on_disconnect(&self) -> impl Future<Output = ()> +'static {
let request_tx = self.request_tx.clone();
async move { request_tx.closed().await }
}
pub fn age(&self) -> Duration {
self.started_at.elapsed()
}
pub fn substream_count(&self) -> usize {
self.substream_counter.get()
}
pub fn handle_count(&self) -> usize {
Arc::strong_count(&self.handle_counter)
}
pub fn peer_identity_claim(&self) -> Option<&PeerIdentityClaim> {
self.peer_identity_claim.as_ref()
}
#[tracing::instrument(level = "trace", "peer_connection::open_substream", skip(self))]
pub async fn open_substream(
&mut self,
protocol_id: &ProtocolId,
) -> Result<NegotiatedSubstream<Substream>, PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::OpenSubstream {
protocol_id: protocol_id.clone(),
reply_tx,
})
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
#[tracing::instrument(level = "trace", "peer_connection::open_framed_substream", skip(self))]
pub async fn open_framed_substream(
&mut self,
protocol_id: &ProtocolId,
max_frame_size: usize,
) -> Result<CanonicalFraming<Substream>, PeerConnectionError> {
let substream = self.open_substream(protocol_id).await?;
Ok(framing::canonical(substream.stream, max_frame_size))
}
#[cfg(feature = "rpc")]
#[tracing::instrument("peer_connection::connect_rpc", level="trace", skip(self), fields(peer_node_id = self.peer_node_id.to_string().as_str()))]
pub async fn connect_rpc<T>(&mut self) -> Result<T, RpcError>
where T: From<RpcClient> + NamedProtocolService {
self.connect_rpc_using_builder(Default::default()).await
}
#[cfg(feature = "rpc")]
#[tracing::instrument("peer_connection::connect_rpc_with_builder", level = "trace", skip(self, builder))]
pub async fn connect_rpc_using_builder<T>(&mut self, builder: RpcClientBuilder<T>) -> Result<T, RpcError>
where T: From<RpcClient> + NamedProtocolService {
let protocol = ProtocolId::from_static(T::PROTOCOL_NAME);
debug!(
target: LOG_TARGET,
"Attempting to establish RPC protocol `{}` to peer `{}`",
String::from_utf8_lossy(&protocol),
self.peer_node_id
);
let framed = self.open_framed_substream(&protocol, RPC_MAX_FRAME_SIZE).await?;
builder
.with_protocol_id(protocol)
.with_node_id(self.peer_node_id.clone())
.connect(framed)
.await
}
/// Creates a new RpcClientPool that can be shared between tasks. The client pool will lazily establish up to
/// `max_sessions` sessions and provides client session that is least used.
#[cfg(feature = "rpc")]
pub fn create_rpc_client_pool<T>(
&self,
max_sessions: usize,
client_config: RpcClientBuilder<T>,
) -> RpcClientPool<T>
where
T: RpcPoolClient + From<RpcClient> + NamedProtocolService + Clone,
{
RpcClientPool::new(self.clone(), max_sessions, client_config)
}
/// Immediately disconnects the peer connection. This can only fail if the peer connection worker
/// is shut down (and the peer is already disconnected)
pub async fn disconnect(&mut self) -> Result<(), PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::Disconnect(false, reply_tx))
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
pub(crate) async fn disconnect_silent(&mut self) -> Result<(), PeerConnectionError> {
let (reply_tx, reply_rx) = oneshot::channel();
self.request_tx
.send(PeerConnectionRequest::Disconnect(true, reply_tx))
.await?;
reply_rx
.await
.map_err(|_| PeerConnectionError::InternalReplyCancelled)?
}
}
impl fmt::Display for PeerConnection {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(
f,
"Id: {}, Node ID: {}, Direction: {}, Peer Address: {}, Age: {:.0?}, #Substreams: {}, #Refs: {}",
self.id,
self.peer_node_id.short_str(),
self.direction,
self.address,
self.age(),
self.substream_count(),
self.handle_count()
)
}
}
impl PartialEq for PeerConnection {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
/// Actor for an active connection to a peer.
struct PeerConnectionActor {
id: ConnectionId,
peer_node_id: NodeId,
request_rx: mpsc::Receiver<PeerConnectionRequest>,
direction: ConnectionDirection,
incoming_substreams: IncomingSubstreams,
control: Control,
event_notifier: mpsc::Sender<ConnectionManagerEvent>,
our_supported_protocols: Vec<ProtocolId>,
their_supported_protocols: Vec<ProtocolId>,
}
impl PeerConnectionActor {
fn new(
id: ConnectionId,
peer_node_id: NodeId,
direction: ConnectionDirection,
connection: Yamux,
request_rx: mpsc::Receiver<PeerConnectionRequest>,
event_notifier: mpsc::Sender<ConnectionManagerEvent>,
our_supported_protocols: Vec<ProtocolId>,
their_supported_protocols: Vec<ProtocolId>,
) -> Self {
Self {
id,
peer_node_id,
direction,
control: connection.get_yamux_control(),
incoming_substreams: connection.into_incoming(),
request_rx,
event_notifier,
our_supported_protocols,
their_supported_protocols,
}
}
pub async fn run(mut self) {
loop {
tokio::select! {
maybe_request = self.request_rx.recv() => {
match maybe_request {
Some(request) => self.handle_request(request).await,
None => {
debug!(target: LOG_TARGET, "[{}] All peer connection handles dropped closing the connection", self);
break;
}
}
},
maybe_substream = self.incoming_substreams.next() => {
match maybe_substream {
Some(substream) => {
if let Err(err) = self.handle_incoming_substream(substream).await {
error!(
target: LOG_TARGET,
"[{}] Incoming substream for peer '{}' failed to open because '{error}'",
self,
self.peer_node_id.short_str(),
error = err
)
}
},
None => {
debug!(target: LOG_TARGET, "[{}] Peer '{}' closed the connection", self, self.peer_node_id.short_str());
break;
},
}
}
}
}
if let Err(err) = self.disconnect(false).await {
warn!(
target: LOG_TARGET,
"[{}] Failed to politely close connection to peer '{}' because '{}'",
self,
self.peer_node_id.short_str(),
err
);
}
}
async fn handle_request(&mut self, request: PeerConnectionRequest) {
use PeerConnectionRequest::{Disconnect, OpenSubstream};
match request {
OpenSubstream { protocol_id, reply_tx } => {
let tracing_id = tracing::Span::current().id();
let span = span!(Level::TRACE, "handle_request");
span.follows_from(tracing_id);
let result = self.open_negotiated_protocol_stream(protocol_id).instrument(span).await;
log_if_error_fmt!(
target: LOG_TARGET,
reply_tx.send(result),
"Reply oneshot closed when sending reply",
);
},
Disconnect(silent, reply_tx) => {
debug!(
target: LOG_TARGET,
"[{}] Disconnect{}requested for {} connection to peer '{}'",
self,
if silent { " (silent) " } else { " " },
self.direction,
self.peer_node_id.short_str()
);
let _result = reply_tx.send(self.disconnect(silent).await);
},
}
}
#[tracing::instrument(level="trace", skip(self, stream),fields(comms.direction="inbound"))]
async fn handle_incoming_substream(&mut self, mut stream: Substream) -> Result<(), PeerConnectionError> {
let selected_protocol = ProtocolNegotiation::new(&mut stream)
.negotiate_protocol_inbound(&self.our_supported_protocols)
.await?;
self.notify_event(ConnectionManagerEvent::NewInboundSubstream(
self.peer_node_id.clone(),
selected_protocol,
stream,
))
.await;
Ok(())
}
#[tracing::instrument(skip(self))]
async fn open_negotiated_protocol_stream(
&mut self,
protocol: ProtocolId,
) -> Result<NegotiatedSubstream<Substream>, PeerConnectionError> {
const PROTOCOL_NEGOTIATION_TIMEOUT: Duration = Duration::from_secs(10);
debug!(
target: LOG_TARGET,
"[{}] Negotiating protocol '{}' on new substream for peer '{}'",
self,
String::from_utf8_lossy(&protocol),
self.peer_node_id.short_str()
);
let mut stream = self.control.open_stream().await?;
let mut negotiation = ProtocolNegotiation::new(&mut stream);
let selected_protocol = if self.their_supported_protocols.contains(&protocol) {
let fut = negotiation.negotiate_protocol_outbound_optimistic(&protocol);
time::timeout(PROTOCOL_NEGOTIATION_TIMEOUT, fut).await??
} else {
let selected_protocols = [protocol];
let fut = negotiation.negotiate_protocol_outbound(&selected_protocols);
time::timeout(PROTOCOL_NEGOTIATION_TIMEOUT, fut).await??
};
Ok(NegotiatedSubstream::new(selected_protocol, stream))
}
async fn notify_event(&mut self, event: ConnectionManagerEvent) {
let _result = self.event_notifier.send(event).await;
}
/// Disconnect this peer connection.
///
/// # Arguments
///
/// silent - true to suppress the PeerDisconnected event, false to publish the event
async fn disconnect(&mut self, silent: bool) -> Result<(), PeerConnectionError> {
self.request_rx.close();
match self.control.close().await {
Err(yamux::ConnectionError::Closed) => {
debug!(
target: LOG_TARGET,
"(Peer = {}) Connection already closed",
self.peer_node_id.short_str()
);
return Ok(());
},
// Only emit closed event once
_ => {
if!silent {
self.notify_event(ConnectionManagerEvent::PeerDisconnected(
self.id,
self.peer_node_id.clone(),
))
.await;
}
},
}
debug!(
target: LOG_TARGET,
"(Peer = {}) Connection closed",
self.peer_node_id.short_str()
);
Ok(())
}
}
impl fmt::Display for PeerConnectionActor {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"PeerConnection(id={}, peer_node_id={}, direction={})",
self.id,
self.peer_node_id.short_str(),
self.direction,
)
}
}
/// Contains the substream and the ProtocolId that was successfully negotiated.
pub struct NegotiatedSubstream<TSubstream> {
pub protocol: ProtocolId,
pub stream | peer_features: PeerFeatures,
request_tx: mpsc::Sender<PeerConnectionRequest>, | random_line_split |
lib.rs | //! Usage
//! -----
//!
//! For simple applications, use one of the utility functions `listen` and `connect`:
//!
//! `listen` accpets a string that represents a socket address and a Factory, see
//! [Architecture](#architecture).
//!
//! ```no_run
//! // A WebSocket echo server
//!
//! use ws::listen;
//!
//! listen("127.0.0.1:3012", |out| {
//! move |msg| {
//! out.send(msg)
//! }
//! }).unwrap()
//! ```
//!
//! `connect` accepts a string that represents a WebSocket URL (i.e. one that starts with ws://),
//! and it will attempt to connect to a WebSocket server at that location. It also accepts a
//! Factory.
//!
//! ```no_run
//! // A WebSocket client that sends one message then closes
//!
//! use ws::{connect, CloseCode};
//!
//! connect("ws://127.0.0.1:3012", |out| {
//! out.send("Hello WebSocket").unwrap();
//!
//! move |msg| {
//! println!("Got message: {}", msg);
//! out.close(CloseCode::Normal)
//! }
//! }).unwrap()
//! ```
//!
//! Each of these functions encapsulates a mio EventLoop, creating and running a WebSocket in the
//! current thread. These are blocking functions, so they will only return after the encapsulated
//! WebSocket has been shutdown.
//!
//! Architecture
//! ------
//!
//! A WebSocket requires two basic components: a Factory and a Handler. A Factory is any struct
//! that implements the `Factory` trait. WS-RS already provides an implementation of `Factory` for
//! closures, so it is possible to pass a closure as a Factory to either of the utility functions.
//! Your Factory will be called each time the underlying TCP connection has been successfully
//! established, and it will need to return a Handler that will handle the new WebSocket connection.
//!
//! Factories can be used to manage state that applies to multiple WebSocket connections,
//! whereas Handlers manage the state of individual connections. Most of the time, a closure
//! Factory is sufficient, and you will only need to focus on writing your Handler.
//! Your Factory will be passed a Sender struct that represents the output of the WebSocket.
//! The Sender allows the Handler to send messages, initiate a WebSocket closing handshake
//! by sending a close code, and other useful actions. If you need to send messages from other parts
//! of your application it is possible to clone and send the Sender across threads allowing
//! other code to send messages on the WebSocket without blocking the event loop.
//!
//! Just as with the Factory, it is possible to use a closure as a simple Handler. The closure must
//! take a Message as it's only argument, and it may close over variables that exist in
//! the Factory. For example, in the above examples using `listen` and `connect`, the closure
//! Factory returns another closure as the Handler for the new connection. This closure closes over
//! the variable `out`, which is the Sender, representing the output of the WebSocket, so that it
//! can use that sender later to send a Message. Closure Handlers generally need to take ownership of the variables
//! that they close over because the Factory may be called multiple times. Think of Handlers as
//! though they are threads and Rust's memory model should make sense. Closure Handlers must return
//! a `Result<()>`, in order to handle errors without panicking.
//!
//! In the above examples, `out.close` and `out.send` both actually return a `Result<()>` indicating
//! whether they were able to schedule the requested command (either `close` or `send`) with the
//! EventLoop.
//!
//! *It is important that your Handler does not panic carelessly because a handler that panics will
//! disconnect every other connection that is using that WebSocket. Don't panic unless you want all
//! connections to immediately fail.*
//!
//! Guide
//! -----
//!
//! You may have noticed in the usage exmaples that the client example calls `unwrap` when sending the first
//! message, which will panic in the factory if the Message can't be sent for some reason. Also,
//! sending messages before a handler is returned means that the message will be queued before
//! the WebSocket handshake is complete. The handshake could fail for some reason, and then the
//! queued message would be wasted effort. Sending messages in the Factory is not bad for simple,
//! short-lived, or toy projects, but let's explore writing a handler that is better for
//! long-running applications.
//!
//! In order to solve the problem of sending a message immediately when a WebSocket connection is
//! established, you will need to write a Handler that implements the `on_open` method. For
//! example:
//!
//! ```no_run
//! use ws::{connect, Handler, Sender, Handshake, Result, Message, CloseCode};
//!
//! // Our Handler struct.
//! // Here we explicity indicate that the Client needs a Sender,
//! // whereas a closure captures the Sender for us automatically.
//! struct Client {
//! out: Sender,
//! }
//!
//! // We implement the Handler trait for Client so that we can get more
//! // fine-grained control of the connection.
//! impl Handler for Client {
//!
//! // `on_open` will be called only after the WebSocket handshake is successful
//! // so at this point we know that the connection is ready to send/receive messages.
//! // We ignore the `Handshake` for now, but you could also use this method to setup
//! // Handler state or reject the connection based on the details of the Request
//! // or Response, such as by checking cookies or Auth headers.
//! fn on_open(&mut self, _: Handshake) -> Result<()> {
//! // Now we don't need to call unwrap since `on_open` returns a `Result<()>`.
//! // If this call fails, it will only result in this connection disconnecting.
//! self.out.send("Hello WebSocket")
//! }
//!
//! // `on_message` is roughly equivalent to the Handler closure. It takes a `Message`
//! // and returns a `Result<()>`.
//! fn on_message(&mut self, msg: Message) -> Result<()> {
//! // Close the connection when we get a response from the server
//! println!("Got message: {}", msg);
//! self.out.close(CloseCode::Normal)
//! }
//! }
//!
//! // Now, instead of a closure, the Factory returns a new instance of our Handler.
//! connect("ws://127.0.0.1:3012", |out| { Client { out: out } }).unwrap()
//! ```
//!
//! That is a big increase in verbosity in order to accomplish the same effect as the
//! original example, but this way is more flexible and gives you access to more of the underlying
//! details of the WebSocket connection.
//!
//! Another method you will probably want to implement is `on_close`. This method is called anytime
//! the other side of the WebSocket connection attempts to close the connection. Implementing
//! `on_close` gives you a mechanism for informing the user regarding why the WebSocket connection
//! may have been closed, and it also gives you an opportunity to clean up any resources or state
//! that may be dependent on the connection that is now about to disconnect.
//!
//! An example server might use this as follows:
//!
//! ```no_run
//! use ws::{listen, Handler, Sender, Result, Message, CloseCode};
//!
//! struct Server {
//! out: Sender,
//! }
//!
//! impl Handler for Server {
//!
//! fn on_message(&mut self, msg: Message) -> Result<()> {
//! // Echo the message back
//! self.out.send(msg)
//! }
//!
//! fn on_close(&mut self, code: CloseCode, reason: &str) {
//! // The WebSocket protocol allows for a utf8 reason for the closing state after the
//! // close code. WS-RS will attempt to interpret this data as a utf8 description of the
//! // reason for closing the connection. I many cases, `reason` will be an empty string.
//! // So, you may not normally want to display `reason` to the user,
//! // but let's assume that we know that `reason` is human-readable.
//! match code {
//! CloseCode::Normal => println!("The client is done with the connection."),
//! CloseCode::Away => println!("The client is leaving the site."),
//! _ => println!("The client encountered an error: {}", reason),
//! }
//! }
//! }
//!
//! listen("127.0.0.1:3012", |out| { Server { out: out } }).unwrap()
//! ```
//!
//! Errors don't just occur on the other side of the connection, sometimes your code will encounter
//! an exceptional state too. You can access errors by implementing `on_error`. By implementing
//! `on_error` you can inform the user of an error and tear down any resources that you may have
//! setup for the connection, but which are not owned by the Handler. Also, note that certain kinds
//! of errors have certain ramifications within the WebSocket protocol. WS-RS will take care of
//! sending the appropriate close code.
//!
//! A server that tracks state outside of the handler might be as follows:
//!
//! ```no_run
//!
//! use std::rc::Rc;
//! use std::cell::RefCell;
//!
//! use ws::{listen, Handler, Sender, Result, Message, Handshake, CloseCode, Error};
//!
//! struct Server {
//! out: Sender,
//! count: Rc<RefCell<usize>>,
//! }
//!
//! impl Handler for Server {
//!
//! fn on_open(&mut self, _: Handshake) -> Result<()> {
//! // We have a new connection, so we increment the connection counter
//! Ok(*self.count.borrow_mut() += 1)
//! }
//!
//! fn on_message(&mut self, msg: Message) -> Result<()> {
//! // Tell the user the current count
//! println!("The number of live connections is {}", *self.count.borrow());
//!
//! // Echo the message back
//! self.out.send(msg)
//! }
//!
//! fn on_close(&mut self, code: CloseCode, reason: &str) {
//! match code {
//! CloseCode::Normal => println!("The client is done with the connection."),
//! CloseCode::Away => println!("The client is leaving the site."),
//! _ => println!("The client encountered an error: {}", reason),
//! }
//!
//! // The connection is going down, so we need to decrement the count
//! *self.count.borrow_mut() -= 1
//! }
//!
//! fn on_error(&mut self, err: Error) {
//! println!("The server encountered an error: {:?}", err);
//!
//! // The connection is going down, so we need to decrement the count
//! *self.count.borrow_mut() -= 1
//! }
//!
//! }
//! // RefCell enforces Rust borrowing rules at runtime.
//! // Calling borrow_mut will panic if the count being borrowed,
//! // but we know already that only one handler at a time will ever try to change the count.
//! // Rc is a reference-counted box for sharing the count between handlers
//! // since each handler needs to own its contents.
//! let count = Rc::new(RefCell::new(0));
//! listen("127.0.0.1:3012", |out| { Server { out: out, count: count.clone() } }).unwrap()
//! ```
//!
//! There are other Handler methods that allow even more fine-grained access, but most applications
//! will usually only need these four methods.
//!
extern crate httparse;
extern crate mio;
extern crate sha1;
extern crate rand;
extern crate url;
#[macro_use] extern crate log;
mod result;
mod connection;
mod frame;
mod message;
mod handshake;
mod protocol;
mod communication;
mod io;
pub use connection::factory::Factory;
pub use connection::factory::Settings as WebSocketSettings;
pub use connection::handler::Handler;
pub use connection::handler::Settings as ConnectionSettings;
pub use result::{Result, Error};
pub use result::Kind as ErrorKind;
pub use message::Message;
pub use communication::Sender;
pub use protocol::CloseCode;
pub use handshake::{Handshake, Request, Response};
use std::fmt;
use std::net::ToSocketAddrs;
use mio::EventLoopConfig;
use std::borrow::Borrow;
/// A utility function for setting up a WebSocket server.
///
/// # Safety
///
/// This function blocks until the EventLoop finishes running. Avoid calling this method within
/// another WebSocket handler.
///
/// # Examples
///
/// ```no_run
/// use ws::listen;
///
/// listen("127.0.0.1:3012", |out| {
/// move |msg| {
/// out.send(msg)
/// }
/// }).unwrap()
/// ```
///
pub fn listen<A, F, H>(addr: A, factory: F) -> Result<()>
where
A: ToSocketAddrs + fmt::Debug,
F: FnMut(Sender) -> H,
H: Handler,
{
let ws = try!(WebSocket::new(factory));
try!(ws.listen(addr));
Ok(())
}
/// A utility function for setting up a WebSocket client.
///
/// # Safety
///
/// This function blocks until the EventLoop finishes running. Avoid calling this method within
/// another WebSocket handler. If you need to establish a connection from inside of a handler,
/// use the `connect` method on the Sender.
///
/// # Examples
///
/// ```no_run
/// use ws::{connect, CloseCode};
///
/// connect("ws://127.0.0.1:3012", |out| {
/// out.send("Hello WebSocket").unwrap();
///
/// move |msg| {
/// println!("Got message: {}", msg);
/// out.close(CloseCode::Normal)
/// }
/// }).unwrap()
/// ```
///
pub fn connect<U, F, H>(url: U, factory: F) -> Result<()>
where
U: Borrow<str>,
F: FnMut(Sender) -> H,
H: Handler
{
let mut ws = try!(WebSocket::new(factory));
let parsed = try!(
url::Url::parse(url.borrow())
.map_err(|err| Error::new(
ErrorKind::Internal,
format!("Unable to parse {} as url due to {:?}", url.borrow(), err))));
try!(ws.connect(parsed));
try!(ws.run());
Ok(())
}
/// The WebSocket struct. A WebSocket can support multiple incoming and outgoing connections.
pub struct WebSocket<F>
where F: Factory
{
event_loop: io::Loop<F>,
handler: io::Handler<F>,
}
impl<F> WebSocket<F>
where F: Factory
{
/// Create a new WebSocket using the given Factory to create handlers.
pub fn new(mut factory: F) -> Result<WebSocket<F>> {
let max = factory.settings().max_connections;
let mut config = EventLoopConfig::new();
config.notify_capacity(max + 1000);
WebSocket::with_config(factory, config)
}
/// Create a new WebSocket with a Factory and use the event loop config to provide settings for
/// the event loop.
pub fn with_config(factory: F, config: EventLoopConfig) -> Result<WebSocket<F>> {
Ok(WebSocket {
event_loop: try!(io::Loop::configured(config)),
handler: io::Handler::new(factory),
})
}
/// Consume the WebSocket and listen for new connections on the specified address.
///
/// # Safety
///
/// This method will block until the event loop finishes running.
pub fn listen<A>(mut self, addr_spec: A) -> Result<WebSocket<F>>
where A: ToSocketAddrs + fmt::Debug
{
let mut result = Err(Error::new(ErrorKind::Internal, format!("Unable to listen on {:?}", addr_spec)));
for addr in try!(addr_spec.to_socket_addrs()) {
result = self.handler.listen(&mut self.event_loop, &addr).map(|_| ());
if result.is_ok() |
}
result.map(|_| self)
}
/// Queue an outgoing connection on this WebSocket. This method may be called multiple times,
/// but the actuall connections will not be established until after `run` is called.
pub fn connect(&mut self, url: url::Url) -> Result<&mut WebSocket<F>> {
let sender = Sender::new(io::ALL, self.event_loop.channel());
try!(sender.connect(url));
Ok(self)
}
/// Run the WebSocket. This will run the encapsulated event loop blocking until the WebSocket
/// is shutdown.
pub fn run(mut self) -> Result<WebSocket<F>> {
try!(self.event_loop.run(&mut self.handler));
Ok(self)
}
}
| {
return self.run()
} | conditional_block |
lib.rs | //! Usage
//! -----
//!
//! For simple applications, use one of the utility functions `listen` and `connect`:
//!
//! `listen` accpets a string that represents a socket address and a Factory, see
//! [Architecture](#architecture).
//!
//! ```no_run
//! // A WebSocket echo server
//!
//! use ws::listen;
//!
//! listen("127.0.0.1:3012", |out| {
//! move |msg| {
//! out.send(msg)
//! }
//! }).unwrap()
//! ```
//!
//! `connect` accepts a string that represents a WebSocket URL (i.e. one that starts with ws://),
//! and it will attempt to connect to a WebSocket server at that location. It also accepts a
//! Factory.
//!
//! ```no_run
//! // A WebSocket client that sends one message then closes
//!
//! use ws::{connect, CloseCode};
//!
//! connect("ws://127.0.0.1:3012", |out| {
//! out.send("Hello WebSocket").unwrap();
//!
//! move |msg| {
//! println!("Got message: {}", msg);
//! out.close(CloseCode::Normal)
//! }
//! }).unwrap()
//! ```
//!
//! Each of these functions encapsulates a mio EventLoop, creating and running a WebSocket in the
//! current thread. These are blocking functions, so they will only return after the encapsulated
//! WebSocket has been shutdown.
//!
//! Architecture
//! ------
//!
//! A WebSocket requires two basic components: a Factory and a Handler. A Factory is any struct
//! that implements the `Factory` trait. WS-RS already provides an implementation of `Factory` for
//! closures, so it is possible to pass a closure as a Factory to either of the utility functions.
//! Your Factory will be called each time the underlying TCP connection has been successfully
//! established, and it will need to return a Handler that will handle the new WebSocket connection.
//!
//! Factories can be used to manage state that applies to multiple WebSocket connections,
//! whereas Handlers manage the state of individual connections. Most of the time, a closure
//! Factory is sufficient, and you will only need to focus on writing your Handler.
//! Your Factory will be passed a Sender struct that represents the output of the WebSocket.
//! The Sender allows the Handler to send messages, initiate a WebSocket closing handshake
//! by sending a close code, and other useful actions. If you need to send messages from other parts
//! of your application it is possible to clone and send the Sender across threads allowing
//! other code to send messages on the WebSocket without blocking the event loop.
//!
//! Just as with the Factory, it is possible to use a closure as a simple Handler. The closure must
//! take a Message as it's only argument, and it may close over variables that exist in
//! the Factory. For example, in the above examples using `listen` and `connect`, the closure
//! Factory returns another closure as the Handler for the new connection. This closure closes over
//! the variable `out`, which is the Sender, representing the output of the WebSocket, so that it
//! can use that sender later to send a Message. Closure Handlers generally need to take ownership of the variables
//! that they close over because the Factory may be called multiple times. Think of Handlers as
//! though they are threads and Rust's memory model should make sense. Closure Handlers must return
//! a `Result<()>`, in order to handle errors without panicking.
//!
//! In the above examples, `out.close` and `out.send` both actually return a `Result<()>` indicating
//! whether they were able to schedule the requested command (either `close` or `send`) with the
//! EventLoop.
//!
//! *It is important that your Handler does not panic carelessly because a handler that panics will
//! disconnect every other connection that is using that WebSocket. Don't panic unless you want all
//! connections to immediately fail.*
//!
//! Guide
//! -----
//!
//! You may have noticed in the usage exmaples that the client example calls `unwrap` when sending the first
//! message, which will panic in the factory if the Message can't be sent for some reason. Also,
//! sending messages before a handler is returned means that the message will be queued before
//! the WebSocket handshake is complete. The handshake could fail for some reason, and then the
//! queued message would be wasted effort. Sending messages in the Factory is not bad for simple,
//! short-lived, or toy projects, but let's explore writing a handler that is better for
//! long-running applications.
//!
//! In order to solve the problem of sending a message immediately when a WebSocket connection is
//! established, you will need to write a Handler that implements the `on_open` method. For
//! example:
//!
//! ```no_run
//! use ws::{connect, Handler, Sender, Handshake, Result, Message, CloseCode};
//!
//! // Our Handler struct.
//! // Here we explicity indicate that the Client needs a Sender,
//! // whereas a closure captures the Sender for us automatically.
//! struct Client {
//! out: Sender,
//! }
//!
//! // We implement the Handler trait for Client so that we can get more
//! // fine-grained control of the connection.
//! impl Handler for Client {
//!
//! // `on_open` will be called only after the WebSocket handshake is successful
//! // so at this point we know that the connection is ready to send/receive messages.
//! // We ignore the `Handshake` for now, but you could also use this method to setup
//! // Handler state or reject the connection based on the details of the Request
//! // or Response, such as by checking cookies or Auth headers.
//! fn on_open(&mut self, _: Handshake) -> Result<()> {
//! // Now we don't need to call unwrap since `on_open` returns a `Result<()>`.
//! // If this call fails, it will only result in this connection disconnecting.
//! self.out.send("Hello WebSocket")
//! }
//!
//! // `on_message` is roughly equivalent to the Handler closure. It takes a `Message`
//! // and returns a `Result<()>`.
//! fn on_message(&mut self, msg: Message) -> Result<()> {
//! // Close the connection when we get a response from the server
//! println!("Got message: {}", msg);
//! self.out.close(CloseCode::Normal)
//! }
//! }
//!
//! // Now, instead of a closure, the Factory returns a new instance of our Handler.
//! connect("ws://127.0.0.1:3012", |out| { Client { out: out } }).unwrap()
//! ```
//!
//! That is a big increase in verbosity in order to accomplish the same effect as the
//! original example, but this way is more flexible and gives you access to more of the underlying
//! details of the WebSocket connection.
//!
//! Another method you will probably want to implement is `on_close`. This method is called anytime
//! the other side of the WebSocket connection attempts to close the connection. Implementing
//! `on_close` gives you a mechanism for informing the user regarding why the WebSocket connection
//! may have been closed, and it also gives you an opportunity to clean up any resources or state
//! that may be dependent on the connection that is now about to disconnect.
//!
//! An example server might use this as follows:
//!
//! ```no_run
//! use ws::{listen, Handler, Sender, Result, Message, CloseCode};
//!
//! struct Server {
//! out: Sender,
//! }
//!
//! impl Handler for Server {
//!
//! fn on_message(&mut self, msg: Message) -> Result<()> {
//! // Echo the message back
//! self.out.send(msg)
//! }
//!
//! fn on_close(&mut self, code: CloseCode, reason: &str) {
//! // The WebSocket protocol allows for a utf8 reason for the closing state after the
//! // close code. WS-RS will attempt to interpret this data as a utf8 description of the
//! // reason for closing the connection. I many cases, `reason` will be an empty string.
//! // So, you may not normally want to display `reason` to the user,
//! // but let's assume that we know that `reason` is human-readable.
//! match code {
//! CloseCode::Normal => println!("The client is done with the connection."),
//! CloseCode::Away => println!("The client is leaving the site."),
//! _ => println!("The client encountered an error: {}", reason),
//! }
//! }
//! }
//!
//! listen("127.0.0.1:3012", |out| { Server { out: out } }).unwrap()
//! ```
//!
//! Errors don't just occur on the other side of the connection, sometimes your code will encounter
//! an exceptional state too. You can access errors by implementing `on_error`. By implementing
//! `on_error` you can inform the user of an error and tear down any resources that you may have
//! setup for the connection, but which are not owned by the Handler. Also, note that certain kinds
//! of errors have certain ramifications within the WebSocket protocol. WS-RS will take care of
//! sending the appropriate close code.
//!
//! A server that tracks state outside of the handler might be as follows:
//!
//! ```no_run
//!
//! use std::rc::Rc;
//! use std::cell::RefCell;
//!
//! use ws::{listen, Handler, Sender, Result, Message, Handshake, CloseCode, Error};
//!
//! struct Server {
//! out: Sender,
//! count: Rc<RefCell<usize>>,
//! }
//!
//! impl Handler for Server {
//!
//! fn on_open(&mut self, _: Handshake) -> Result<()> {
//! // We have a new connection, so we increment the connection counter
//! Ok(*self.count.borrow_mut() += 1)
//! }
//!
//! fn on_message(&mut self, msg: Message) -> Result<()> {
//! // Tell the user the current count
//! println!("The number of live connections is {}", *self.count.borrow());
//!
//! // Echo the message back
//! self.out.send(msg)
//! }
//!
//! fn on_close(&mut self, code: CloseCode, reason: &str) {
//! match code {
//! CloseCode::Normal => println!("The client is done with the connection."),
//! CloseCode::Away => println!("The client is leaving the site."),
//! _ => println!("The client encountered an error: {}", reason),
//! }
//!
//! // The connection is going down, so we need to decrement the count
//! *self.count.borrow_mut() -= 1
//! }
//!
//! fn on_error(&mut self, err: Error) {
//! println!("The server encountered an error: {:?}", err);
//!
//! // The connection is going down, so we need to decrement the count
//! *self.count.borrow_mut() -= 1
//! }
//!
//! }
//! // RefCell enforces Rust borrowing rules at runtime.
//! // Calling borrow_mut will panic if the count being borrowed,
//! // but we know already that only one handler at a time will ever try to change the count.
//! // Rc is a reference-counted box for sharing the count between handlers
//! // since each handler needs to own its contents.
//! let count = Rc::new(RefCell::new(0));
//! listen("127.0.0.1:3012", |out| { Server { out: out, count: count.clone() } }).unwrap()
//! ```
//!
//! There are other Handler methods that allow even more fine-grained access, but most applications
//! will usually only need these four methods.
//!
extern crate httparse;
extern crate mio;
extern crate sha1;
extern crate rand;
extern crate url;
#[macro_use] extern crate log;
mod result;
mod connection;
mod frame;
mod message;
mod handshake;
mod protocol;
mod communication;
mod io;
pub use connection::factory::Factory;
pub use connection::factory::Settings as WebSocketSettings;
pub use connection::handler::Handler;
pub use connection::handler::Settings as ConnectionSettings;
pub use result::{Result, Error};
pub use result::Kind as ErrorKind;
pub use message::Message;
pub use communication::Sender;
pub use protocol::CloseCode;
pub use handshake::{Handshake, Request, Response};
use std::fmt;
use std::net::ToSocketAddrs;
use mio::EventLoopConfig;
use std::borrow::Borrow;
/// A utility function for setting up a WebSocket server.
///
/// # Safety
///
/// This function blocks until the EventLoop finishes running. Avoid calling this method within
/// another WebSocket handler.
///
/// # Examples
///
/// ```no_run
/// use ws::listen;
///
/// listen("127.0.0.1:3012", |out| {
/// move |msg| {
/// out.send(msg)
/// }
/// }).unwrap()
/// ```
///
pub fn listen<A, F, H>(addr: A, factory: F) -> Result<()>
where
A: ToSocketAddrs + fmt::Debug,
F: FnMut(Sender) -> H,
H: Handler,
{
let ws = try!(WebSocket::new(factory));
try!(ws.listen(addr));
Ok(())
}
/// A utility function for setting up a WebSocket client.
///
/// # Safety
///
/// This function blocks until the EventLoop finishes running. Avoid calling this method within
/// another WebSocket handler. If you need to establish a connection from inside of a handler,
/// use the `connect` method on the Sender.
///
/// # Examples
///
/// ```no_run
/// use ws::{connect, CloseCode};
///
/// connect("ws://127.0.0.1:3012", |out| {
/// out.send("Hello WebSocket").unwrap();
///
/// move |msg| {
/// println!("Got message: {}", msg);
/// out.close(CloseCode::Normal)
/// }
/// }).unwrap()
/// ```
///
pub fn | <U, F, H>(url: U, factory: F) -> Result<()>
where
U: Borrow<str>,
F: FnMut(Sender) -> H,
H: Handler
{
let mut ws = try!(WebSocket::new(factory));
let parsed = try!(
url::Url::parse(url.borrow())
.map_err(|err| Error::new(
ErrorKind::Internal,
format!("Unable to parse {} as url due to {:?}", url.borrow(), err))));
try!(ws.connect(parsed));
try!(ws.run());
Ok(())
}
/// The WebSocket struct. A WebSocket can support multiple incoming and outgoing connections.
pub struct WebSocket<F>
where F: Factory
{
event_loop: io::Loop<F>,
handler: io::Handler<F>,
}
impl<F> WebSocket<F>
where F: Factory
{
/// Create a new WebSocket using the given Factory to create handlers.
pub fn new(mut factory: F) -> Result<WebSocket<F>> {
let max = factory.settings().max_connections;
let mut config = EventLoopConfig::new();
config.notify_capacity(max + 1000);
WebSocket::with_config(factory, config)
}
/// Create a new WebSocket with a Factory and use the event loop config to provide settings for
/// the event loop.
pub fn with_config(factory: F, config: EventLoopConfig) -> Result<WebSocket<F>> {
Ok(WebSocket {
event_loop: try!(io::Loop::configured(config)),
handler: io::Handler::new(factory),
})
}
/// Consume the WebSocket and listen for new connections on the specified address.
///
/// # Safety
///
/// This method will block until the event loop finishes running.
pub fn listen<A>(mut self, addr_spec: A) -> Result<WebSocket<F>>
where A: ToSocketAddrs + fmt::Debug
{
let mut result = Err(Error::new(ErrorKind::Internal, format!("Unable to listen on {:?}", addr_spec)));
for addr in try!(addr_spec.to_socket_addrs()) {
result = self.handler.listen(&mut self.event_loop, &addr).map(|_| ());
if result.is_ok() {
return self.run()
}
}
result.map(|_| self)
}
/// Queue an outgoing connection on this WebSocket. This method may be called multiple times,
/// but the actuall connections will not be established until after `run` is called.
pub fn connect(&mut self, url: url::Url) -> Result<&mut WebSocket<F>> {
let sender = Sender::new(io::ALL, self.event_loop.channel());
try!(sender.connect(url));
Ok(self)
}
/// Run the WebSocket. This will run the encapsulated event loop blocking until the WebSocket
/// is shutdown.
pub fn run(mut self) -> Result<WebSocket<F>> {
try!(self.event_loop.run(&mut self.handler));
Ok(self)
}
}
| connect | identifier_name |
lib.rs | //! Usage
//! -----
//!
//! For simple applications, use one of the utility functions `listen` and `connect`:
//!
//! `listen` accpets a string that represents a socket address and a Factory, see
//! [Architecture](#architecture).
//!
//! ```no_run
//! // A WebSocket echo server
//!
//! use ws::listen;
//!
//! listen("127.0.0.1:3012", |out| {
//! move |msg| {
//! out.send(msg)
//! }
//! }).unwrap()
//! ```
//!
//! `connect` accepts a string that represents a WebSocket URL (i.e. one that starts with ws://),
//! and it will attempt to connect to a WebSocket server at that location. It also accepts a
//! Factory.
//!
//! ```no_run
//! // A WebSocket client that sends one message then closes
//!
//! use ws::{connect, CloseCode};
//!
//! connect("ws://127.0.0.1:3012", |out| {
//! out.send("Hello WebSocket").unwrap();
//!
//! move |msg| {
//! println!("Got message: {}", msg);
//! out.close(CloseCode::Normal)
//! }
//! }).unwrap()
//! ```
//!
//! Each of these functions encapsulates a mio EventLoop, creating and running a WebSocket in the
//! current thread. These are blocking functions, so they will only return after the encapsulated
//! WebSocket has been shutdown.
//!
//! Architecture
//! ------
//!
//! A WebSocket requires two basic components: a Factory and a Handler. A Factory is any struct
//! that implements the `Factory` trait. WS-RS already provides an implementation of `Factory` for
//! closures, so it is possible to pass a closure as a Factory to either of the utility functions.
//! Your Factory will be called each time the underlying TCP connection has been successfully
//! established, and it will need to return a Handler that will handle the new WebSocket connection.
//!
//! Factories can be used to manage state that applies to multiple WebSocket connections,
//! whereas Handlers manage the state of individual connections. Most of the time, a closure
//! Factory is sufficient, and you will only need to focus on writing your Handler.
//! Your Factory will be passed a Sender struct that represents the output of the WebSocket.
//! The Sender allows the Handler to send messages, initiate a WebSocket closing handshake
//! by sending a close code, and other useful actions. If you need to send messages from other parts
//! of your application it is possible to clone and send the Sender across threads allowing
//! other code to send messages on the WebSocket without blocking the event loop.
//!
//! Just as with the Factory, it is possible to use a closure as a simple Handler. The closure must
//! take a Message as it's only argument, and it may close over variables that exist in
//! the Factory. For example, in the above examples using `listen` and `connect`, the closure
//! Factory returns another closure as the Handler for the new connection. This closure closes over
//! the variable `out`, which is the Sender, representing the output of the WebSocket, so that it
//! can use that sender later to send a Message. Closure Handlers generally need to take ownership of the variables
//! that they close over because the Factory may be called multiple times. Think of Handlers as
//! though they are threads and Rust's memory model should make sense. Closure Handlers must return
//! a `Result<()>`, in order to handle errors without panicking.
//!
//! In the above examples, `out.close` and `out.send` both actually return a `Result<()>` indicating
//! whether they were able to schedule the requested command (either `close` or `send`) with the
//! EventLoop.
//!
//! *It is important that your Handler does not panic carelessly because a handler that panics will
//! disconnect every other connection that is using that WebSocket. Don't panic unless you want all
//! connections to immediately fail.*
//!
//! Guide
//! -----
//!
//! You may have noticed in the usage exmaples that the client example calls `unwrap` when sending the first
//! message, which will panic in the factory if the Message can't be sent for some reason. Also,
//! sending messages before a handler is returned means that the message will be queued before
//! the WebSocket handshake is complete. The handshake could fail for some reason, and then the
//! queued message would be wasted effort. Sending messages in the Factory is not bad for simple,
//! short-lived, or toy projects, but let's explore writing a handler that is better for
//! long-running applications.
//!
//! In order to solve the problem of sending a message immediately when a WebSocket connection is
//! established, you will need to write a Handler that implements the `on_open` method. For
//! example:
//!
//! ```no_run
//! use ws::{connect, Handler, Sender, Handshake, Result, Message, CloseCode};
//!
//! // Our Handler struct.
//! // Here we explicity indicate that the Client needs a Sender,
//! // whereas a closure captures the Sender for us automatically.
//! struct Client {
//! out: Sender,
//! }
//!
//! // We implement the Handler trait for Client so that we can get more
//! // fine-grained control of the connection.
//! impl Handler for Client {
//!
//! // `on_open` will be called only after the WebSocket handshake is successful
//! // so at this point we know that the connection is ready to send/receive messages.
//! // We ignore the `Handshake` for now, but you could also use this method to setup
//! // Handler state or reject the connection based on the details of the Request
//! // or Response, such as by checking cookies or Auth headers.
//! fn on_open(&mut self, _: Handshake) -> Result<()> {
//! // Now we don't need to call unwrap since `on_open` returns a `Result<()>`.
//! // If this call fails, it will only result in this connection disconnecting.
//! self.out.send("Hello WebSocket")
//! }
//!
//! // `on_message` is roughly equivalent to the Handler closure. It takes a `Message`
//! // and returns a `Result<()>`.
//! fn on_message(&mut self, msg: Message) -> Result<()> {
//! // Close the connection when we get a response from the server
//! println!("Got message: {}", msg);
//! self.out.close(CloseCode::Normal)
//! }
//! }
//!
//! // Now, instead of a closure, the Factory returns a new instance of our Handler.
//! connect("ws://127.0.0.1:3012", |out| { Client { out: out } }).unwrap()
//! ```
//!
//! That is a big increase in verbosity in order to accomplish the same effect as the
//! original example, but this way is more flexible and gives you access to more of the underlying
//! details of the WebSocket connection.
//!
//! Another method you will probably want to implement is `on_close`. This method is called anytime
//! the other side of the WebSocket connection attempts to close the connection. Implementing
//! `on_close` gives you a mechanism for informing the user regarding why the WebSocket connection
//! may have been closed, and it also gives you an opportunity to clean up any resources or state
//! that may be dependent on the connection that is now about to disconnect.
//!
//! An example server might use this as follows:
//!
//! ```no_run
//! use ws::{listen, Handler, Sender, Result, Message, CloseCode};
//!
//! struct Server {
//! out: Sender,
//! }
//!
//! impl Handler for Server {
//!
//! fn on_message(&mut self, msg: Message) -> Result<()> {
//! // Echo the message back
//! self.out.send(msg)
//! }
//!
//! fn on_close(&mut self, code: CloseCode, reason: &str) {
//! // The WebSocket protocol allows for a utf8 reason for the closing state after the
//! // close code. WS-RS will attempt to interpret this data as a utf8 description of the
//! // reason for closing the connection. I many cases, `reason` will be an empty string.
//! // So, you may not normally want to display `reason` to the user,
//! // but let's assume that we know that `reason` is human-readable.
//! match code {
//! CloseCode::Normal => println!("The client is done with the connection."),
//! CloseCode::Away => println!("The client is leaving the site."),
//! _ => println!("The client encountered an error: {}", reason),
//! }
//! }
//! }
//!
//! listen("127.0.0.1:3012", |out| { Server { out: out } }).unwrap()
//! ```
//!
//! Errors don't just occur on the other side of the connection, sometimes your code will encounter
//! an exceptional state too. You can access errors by implementing `on_error`. By implementing
//! `on_error` you can inform the user of an error and tear down any resources that you may have
//! setup for the connection, but which are not owned by the Handler. Also, note that certain kinds
//! of errors have certain ramifications within the WebSocket protocol. WS-RS will take care of
//! sending the appropriate close code.
//!
//! A server that tracks state outside of the handler might be as follows:
//!
//! ```no_run
//!
//! use std::rc::Rc;
//! use std::cell::RefCell;
//!
//! use ws::{listen, Handler, Sender, Result, Message, Handshake, CloseCode, Error};
//!
//! struct Server {
//! out: Sender,
//! count: Rc<RefCell<usize>>,
//! }
//!
//! impl Handler for Server {
//!
//! fn on_open(&mut self, _: Handshake) -> Result<()> {
//! // We have a new connection, so we increment the connection counter
//! Ok(*self.count.borrow_mut() += 1)
//! }
//!
//! fn on_message(&mut self, msg: Message) -> Result<()> {
//! // Tell the user the current count
//! println!("The number of live connections is {}", *self.count.borrow());
//!
//! // Echo the message back
//! self.out.send(msg)
//! }
//!
//! fn on_close(&mut self, code: CloseCode, reason: &str) {
//! match code {
//! CloseCode::Normal => println!("The client is done with the connection."),
//! CloseCode::Away => println!("The client is leaving the site."),
//! _ => println!("The client encountered an error: {}", reason),
//! }
//!
//! // The connection is going down, so we need to decrement the count
//! *self.count.borrow_mut() -= 1
//! }
//!
//! fn on_error(&mut self, err: Error) {
//! println!("The server encountered an error: {:?}", err);
//!
//! // The connection is going down, so we need to decrement the count
//! *self.count.borrow_mut() -= 1
//! }
//!
//! }
//! // RefCell enforces Rust borrowing rules at runtime.
//! // Calling borrow_mut will panic if the count being borrowed,
//! // but we know already that only one handler at a time will ever try to change the count.
//! // Rc is a reference-counted box for sharing the count between handlers
//! // since each handler needs to own its contents.
//! let count = Rc::new(RefCell::new(0));
//! listen("127.0.0.1:3012", |out| { Server { out: out, count: count.clone() } }).unwrap()
//! ```
//!
//! There are other Handler methods that allow even more fine-grained access, but most applications
//! will usually only need these four methods.
//!
extern crate httparse;
extern crate mio;
extern crate sha1;
extern crate rand;
extern crate url;
#[macro_use] extern crate log;
mod result;
mod connection;
mod frame;
mod message;
mod handshake;
mod protocol;
mod communication;
mod io;
pub use connection::factory::Factory;
pub use connection::factory::Settings as WebSocketSettings;
pub use connection::handler::Handler;
pub use connection::handler::Settings as ConnectionSettings;
pub use result::{Result, Error};
pub use result::Kind as ErrorKind;
pub use message::Message;
pub use communication::Sender;
pub use protocol::CloseCode;
pub use handshake::{Handshake, Request, Response};
use std::fmt;
use std::net::ToSocketAddrs;
use mio::EventLoopConfig;
use std::borrow::Borrow;
/// A utility function for setting up a WebSocket server.
///
/// # Safety
///
/// This function blocks until the EventLoop finishes running. Avoid calling this method within
/// another WebSocket handler.
///
/// # Examples
///
/// ```no_run
/// use ws::listen;
///
/// listen("127.0.0.1:3012", |out| {
/// move |msg| {
/// out.send(msg)
/// }
/// }).unwrap()
/// ```
///
pub fn listen<A, F, H>(addr: A, factory: F) -> Result<()>
where
A: ToSocketAddrs + fmt::Debug,
F: FnMut(Sender) -> H,
H: Handler,
{
let ws = try!(WebSocket::new(factory));
try!(ws.listen(addr));
Ok(())
}
/// A utility function for setting up a WebSocket client.
///
/// # Safety
///
/// This function blocks until the EventLoop finishes running. Avoid calling this method within
/// another WebSocket handler. If you need to establish a connection from inside of a handler,
/// use the `connect` method on the Sender.
///
/// # Examples
///
/// ```no_run
/// use ws::{connect, CloseCode};
///
/// connect("ws://127.0.0.1:3012", |out| {
/// out.send("Hello WebSocket").unwrap();
///
/// move |msg| {
/// println!("Got message: {}", msg);
/// out.close(CloseCode::Normal)
/// }
/// }).unwrap()
/// ```
///
pub fn connect<U, F, H>(url: U, factory: F) -> Result<()>
where
U: Borrow<str>,
F: FnMut(Sender) -> H,
H: Handler
{
let mut ws = try!(WebSocket::new(factory));
let parsed = try!(
url::Url::parse(url.borrow())
.map_err(|err| Error::new(
ErrorKind::Internal,
format!("Unable to parse {} as url due to {:?}", url.borrow(), err))));
try!(ws.connect(parsed));
try!(ws.run());
Ok(())
}
/// The WebSocket struct. A WebSocket can support multiple incoming and outgoing connections.
pub struct WebSocket<F>
where F: Factory
{
event_loop: io::Loop<F>,
handler: io::Handler<F>,
}
impl<F> WebSocket<F> | let mut config = EventLoopConfig::new();
config.notify_capacity(max + 1000);
WebSocket::with_config(factory, config)
}
/// Create a new WebSocket with a Factory and use the event loop config to provide settings for
/// the event loop.
pub fn with_config(factory: F, config: EventLoopConfig) -> Result<WebSocket<F>> {
Ok(WebSocket {
event_loop: try!(io::Loop::configured(config)),
handler: io::Handler::new(factory),
})
}
/// Consume the WebSocket and listen for new connections on the specified address.
///
/// # Safety
///
/// This method will block until the event loop finishes running.
pub fn listen<A>(mut self, addr_spec: A) -> Result<WebSocket<F>>
where A: ToSocketAddrs + fmt::Debug
{
let mut result = Err(Error::new(ErrorKind::Internal, format!("Unable to listen on {:?}", addr_spec)));
for addr in try!(addr_spec.to_socket_addrs()) {
result = self.handler.listen(&mut self.event_loop, &addr).map(|_| ());
if result.is_ok() {
return self.run()
}
}
result.map(|_| self)
}
/// Queue an outgoing connection on this WebSocket. This method may be called multiple times,
/// but the actuall connections will not be established until after `run` is called.
pub fn connect(&mut self, url: url::Url) -> Result<&mut WebSocket<F>> {
let sender = Sender::new(io::ALL, self.event_loop.channel());
try!(sender.connect(url));
Ok(self)
}
/// Run the WebSocket. This will run the encapsulated event loop blocking until the WebSocket
/// is shutdown.
pub fn run(mut self) -> Result<WebSocket<F>> {
try!(self.event_loop.run(&mut self.handler));
Ok(self)
}
} | where F: Factory
{
/// Create a new WebSocket using the given Factory to create handlers.
pub fn new(mut factory: F) -> Result<WebSocket<F>> {
let max = factory.settings().max_connections; | random_line_split |
mod.rs | use std::mem;
use std::time::SystemTime;
#[cfg(feature="dynamic_mem")]
const MAX_MEMORY_SLOTS: usize = 1024 * 1024 * 2;
#[cfg(not(feature="dynamic_mem"))]
const MAX_MEMORY_SLOTS: usize = 1024 * 128;
type Bits = u128;
const MARK_BITS_PER_SLOT: usize = mem::size_of::<Bits>();
const MARK_BITS: usize = MAX_MEMORY_SLOTS / MARK_BITS_PER_SLOT;
#[cfg(feature="dynamic_mem")]
type Mem = Vec<usize>;
#[cfg(not(feature="dynamic_mem"))]
type Mem = [usize; MAX_MEMORY_SLOTS] ;
pub const OBJECT_HEADER_SLOTS: usize = 1;
pub struct Memory {
head: usize,
mem: Mem,
mark_bits: [u128; MARK_BITS],
roots: Vec<usize>,
gc_count: usize,
allocates: usize,
last_gc_ms: u128,
total_gc_ms: u128,
lastgc_live_mem: usize,
lastgc_free_mem: usize,
show_gc: bool,
show_allocates: bool,
show_heap_map: bool,
show_free_list: bool,
}
impl<'a> IntoIterator for &'a Memory {
type Item = usize;
type IntoIter = MemoryIntoIterator<'a>;
fn into_iter(self) -> Self::IntoIter {
MemoryIntoIterator {
mem: self,
scan: 0,
free: 0,
}
}
}
pub struct MemoryIntoIterator<'a> {
mem: &'a Memory,
scan: usize,
free: usize,
}
impl<'a> Iterator for MemoryIntoIterator<'a> {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
if self.scan == 0 {
self.scan = 1;
self.free = self.mem.head;
} else {
self.scan = self.mem.next_object_in_heap(self.scan);
}
while self.scan == self.free {
self.scan = self.mem.next_object_in_heap(self.free);
self.free = self.mem.get_fl_next(self.free);
}
if self.scan >= MAX_MEMORY_SLOTS - 1 {
return None;
} else {
return Some(self.scan);
}
}
}
#[cfg(feature = "dynamic_mem")]
fn im() -> Mem {
return vec![0; MAX_MEMORY_SLOTS];
}
#[cfg(not(feature = "dynamic_mem"))]
fn im() -> Mem {
return [0; MAX_MEMORY_SLOTS];
}
impl Memory {
pub fn initialze_memory() -> Memory {
let mut mem = Memory {
head: 1,
mem: im(),
mark_bits: [0; MARK_BITS],
roots: Vec::new(),
gc_count: 0,
allocates: 0,
lastgc_live_mem: 0,
lastgc_free_mem: 0,
last_gc_ms: 0,
total_gc_ms: 0,
show_gc: false,
show_allocates: false,
show_heap_map: false,
show_free_list: false,
};
mem.set_size(0, MAX_MEMORY_SLOTS); // magic memory at zero is heap_size
mem.set_size(mem.head, MAX_MEMORY_SLOTS - 2); // set initial object size as all heap
mem.set_fl_next(mem.head, 0);
mem
}
// objects API
// allocate_object (size) --- size is number of indexable slots
// add/remote_root () --- add to or remove from gc root set.
// element_size() - number of indexable slots - get_size() - OBJECT_HEADER_SLOTS
// at_put - store into object slot at index
// at -- fetch object slot at index
pub fn allocate_object(&mut self, unrounded_size: usize) -> usize {
self.allocates += 1;
let mut result = self.allocate_object_nocompress(unrounded_size);
if result == 0 {
self.gc();
result = self.allocate_object_nocompress(unrounded_size);
if result == 0 {
self.print_freelist();
self.print_heap();
panic!("out of memory");
}
}
result
}
pub fn live_objects(&self) -> MemoryIntoIterator {
return self.into_iter();
}
pub fn add_root(&mut self, obj: usize) {
self.roots.push(obj);
}
pub fn remove_root(&mut self, obj: usize) {
for i in 0..self.roots.len() {
if obj == self.roots[i] {
self.roots.remove(i);
return;
}
}
}
pub fn at_put(&mut self, obj: usize, index: usize, value: usize) {
let slots = self.mem[obj];
let base = obj+OBJECT_HEADER_SLOTS;
let object =&mut self.mem[ base.. base + slots ];
object[index] = value;
}
pub fn at(&self, obj: usize, index: usize) -> usize {
let slots = self.mem[obj];
let base = obj+OBJECT_HEADER_SLOTS;
let object =&self.mem[ base.. base + slots ];
return object[index];
}
pub fn element_size(&self, obj: usize) -> usize {
return self.mem[obj] - OBJECT_HEADER_SLOTS;
}
pub fn enable_show_heap_map(&mut self, enabled: bool) {
self.show_heap_map = enabled;
}
pub fn enable_show_freelist(&mut self, enabled: bool) {
self.show_free_list = enabled;
}
pub fn enable_show_gc(&mut self, enabled: bool) {
self.show_gc = enabled;
}
pub fn enable_show_allocates(&mut self, enabled: bool) {
self.show_allocates = enabled;
}
fn rounded_size(unrounded_size: usize) -> usize {
(unrounded_size + 1) &!(1) // rounded to 2
}
fn get_size(&self, obj: usize) -> usize {
return self.mem[obj];
}
fn set_size(&mut self, obj: usize, size: usize) {
self.mem[obj] = size;
}
fn next_object_in_heap(&self, obj: usize) -> usize {
return obj + self.get_size(obj);
}
//free list is linked off the first slot
fn get_fl_next(&self, obj: usize) -> usize {
return self.mem[obj + 1];
}
fn | (&mut self, obj: usize, next: usize) {
self.mem[obj + 1] = next;
}
fn mark_object(&mut self, obj: usize) {
self.mark_bits[obj / MARK_BITS_PER_SLOT] |= 1 << (obj % MARK_BITS_PER_SLOT);
}
fn unmark_object(&mut self, obj: usize) {
self.mark_bits[obj / MARK_BITS_PER_SLOT] &=!(1 << (obj % MARK_BITS_PER_SLOT));
}
fn is_marked(&self, obj: usize) -> bool {
(self.mark_bits[obj / MARK_BITS_PER_SLOT] & (1 << (obj % MARK_BITS_PER_SLOT)))!= 0
}
fn allocate_object_nocompress(&mut self, unrounded_size: usize) -> usize {
let size = Memory::rounded_size(unrounded_size + OBJECT_HEADER_SLOTS);
let mut free = self.head;
while free!= 0 {
let avail = self.get_size(free);
if avail > size {
let newsize = avail - size;
if newsize < 2 {
panic!("remaining size is less than 2");
}
// shrink current free to smaller size
self.set_size(free, newsize);
// new object is on the end of current free object
let new_object = free + newsize;
self.set_size(new_object, size);
for index in 0..self.element_size(new_object) {
self.at_put(new_object, index, 0);
}
if self.show_allocates {
println!(
"Success: allocate_object returning -> {} size {}",
new_object, size
);
}
if self.head!= free {
if self.show_allocates {
println!("Reset head past intermediate free blocks \n");
let mut show = self.head;
while show!= free {
println!("Abandon {} size {}\n", show, self.get_size(show));
show = self.get_fl_next(show);
}
}
self.head = free;
}
return new_object;
}
free = self.get_fl_next(free);
}
0
}
pub fn gc(&mut self) {
let start = SystemTime::now();
for i in 0..self.roots.len() {
self.mark_and_scan(self.roots[i]);
}
self.sweep();
self.gc_count += 1;
if self.show_gc {
self.print_gc_stats();
}
match start.elapsed() {
Ok(elapsed) => {
self.last_gc_ms = elapsed.as_millis();
self.total_gc_ms += self.last_gc_ms;
}
Err(e) => {
println!("Error: {:?}", e);
}
}
}
fn sweep(&mut self) {
let mut scan = 1;
self.head = 0;
let mut tail = self.head;
self.lastgc_free_mem = 0;
self.lastgc_live_mem = 0;
while scan < MAX_MEMORY_SLOTS - 1 {
if self.is_marked(scan) {
self.unmark_object(scan);
self.lastgc_live_mem += self.get_size(scan);
} else {
self.lastgc_free_mem += self.get_size(scan);
if tail == 0 {
self.head = scan;
self.set_fl_next(scan, 0);
tail = scan;
} else {
if self.next_object_in_heap(tail) == scan {
self.set_size(tail, self.get_size(tail) + self.get_size(scan));
} else {
self.set_fl_next(tail, scan);
self.set_fl_next(scan, 0);
tail = scan;
}
}
}
scan = self.next_object_in_heap(scan);
}
if self.show_free_list {
self.print_freelist();
}
if self.show_heap_map {
self.print_heap();
}
}
fn mark_and_scan(&mut self, object: usize) {
if object == 0 || self.is_marked(object) {
return;
}
let slots = self.get_size(object);
self.mark_object(object);
for i in OBJECT_HEADER_SLOTS..slots {
self.mark_and_scan(self.mem[object + i]);
}
}
pub fn print_gc_stats(&self) {
println!(
"{} gcs, {} object allocates, Last GC: Live {} Dead {} in {} ms, Lifetime GC {} ms\n",
self.gc_count,
self.allocates,
self.lastgc_live_mem,
self.lastgc_free_mem,
self.last_gc_ms,
self.total_gc_ms,
);
}
fn print_heap(&mut self) {
print!("\x1B[{};{}H", 1, 1);
let mut scan = 1;
let mut count = 0;
let mut free = self.head;
while scan < MAX_MEMORY_SLOTS - 1 {
// skip free ones, print x's //
let mut num_chars_to_print = 0;
let mut char_to_print = '?';
if scan == free {
while scan == free {
char_to_print = 'x';
num_chars_to_print += self.get_size(scan);
scan = self.next_object_in_heap(free);
free = self.get_fl_next(free);
}
} else {
char_to_print = '.';
num_chars_to_print += self.get_size(scan);
scan = self.next_object_in_heap(scan);
}
for _i in 1..num_chars_to_print / 2 {
print!("{}", char_to_print);
count += 1;
if count % 120 == 0 {
print!("\n");
}
}
}
self.print_gc_stats();
}
pub fn print_freelist(&mut self) {
println!("\nprint_freelist: Head = {}", self.head);
let mut free = self.head;
let mut count = 0;
let mut total_free = 0;
while free!= 0 {
let size = self.get_size(free);
let next = self.get_fl_next(free);
total_free += self.get_size(free);
println!("{}: Free = {} {} slots next = {}", count, free, size, next);
free = next;
count += 1;
if count > MAX_MEMORY_SLOTS {
panic!()
}
}
println!(
"print_freelist {} elements, total free = {}\n",
count, total_free
);
}
}
| set_fl_next | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.